xref: /openbmc/linux/net/ceph/osd_client.c (revision eb039161)
1 
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
11 #ifdef CONFIG_BLOCK
12 #include <linux/bio.h>
13 #endif
14 
15 #include <linux/ceph/ceph_features.h>
16 #include <linux/ceph/libceph.h>
17 #include <linux/ceph/osd_client.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/pagelist.h>
22 
23 #define OSD_OPREPLY_FRONT_LEN	512
24 
25 static struct kmem_cache	*ceph_osd_request_cache;
26 
27 static const struct ceph_connection_operations osd_con_ops;
28 
29 /*
30  * Implement client access to distributed object storage cluster.
31  *
32  * All data objects are stored within a cluster/cloud of OSDs, or
33  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
34  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
35  * remote daemons serving up and coordinating consistent and safe
36  * access to storage.
37  *
38  * Cluster membership and the mapping of data objects onto storage devices
39  * are described by the osd map.
40  *
41  * We keep track of pending OSD requests (read, write), resubmit
42  * requests to different OSDs when the cluster topology/data layout
43  * change, or retry the affected requests when the communications
44  * channel with an OSD is reset.
45  */
46 
47 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
48 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
49 static void link_linger(struct ceph_osd *osd,
50 			struct ceph_osd_linger_request *lreq);
51 static void unlink_linger(struct ceph_osd *osd,
52 			  struct ceph_osd_linger_request *lreq);
53 static void clear_backoffs(struct ceph_osd *osd);
54 
55 #if 1
56 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
57 {
58 	bool wrlocked = true;
59 
60 	if (unlikely(down_read_trylock(sem))) {
61 		wrlocked = false;
62 		up_read(sem);
63 	}
64 
65 	return wrlocked;
66 }
67 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
68 {
69 	WARN_ON(!rwsem_is_locked(&osdc->lock));
70 }
71 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
72 {
73 	WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
74 }
75 static inline void verify_osd_locked(struct ceph_osd *osd)
76 {
77 	struct ceph_osd_client *osdc = osd->o_osdc;
78 
79 	WARN_ON(!(mutex_is_locked(&osd->lock) &&
80 		  rwsem_is_locked(&osdc->lock)) &&
81 		!rwsem_is_wrlocked(&osdc->lock));
82 }
83 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
84 {
85 	WARN_ON(!mutex_is_locked(&lreq->lock));
86 }
87 #else
88 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
89 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
90 static inline void verify_osd_locked(struct ceph_osd *osd) { }
91 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
92 #endif
93 
94 /*
95  * calculate the mapping of a file extent onto an object, and fill out the
96  * request accordingly.  shorten extent as necessary if it crosses an
97  * object boundary.
98  *
99  * fill osd op in request message.
100  */
101 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
102 			u64 *objnum, u64 *objoff, u64 *objlen)
103 {
104 	u64 orig_len = *plen;
105 	int r;
106 
107 	/* object extent? */
108 	r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
109 					  objoff, objlen);
110 	if (r < 0)
111 		return r;
112 	if (*objlen < orig_len) {
113 		*plen = *objlen;
114 		dout(" skipping last %llu, final file extent %llu~%llu\n",
115 		     orig_len - *plen, off, *plen);
116 	}
117 
118 	dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
119 
120 	return 0;
121 }
122 
123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
124 {
125 	memset(osd_data, 0, sizeof (*osd_data));
126 	osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
127 }
128 
129 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
130 			struct page **pages, u64 length, u32 alignment,
131 			bool pages_from_pool, bool own_pages)
132 {
133 	osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
134 	osd_data->pages = pages;
135 	osd_data->length = length;
136 	osd_data->alignment = alignment;
137 	osd_data->pages_from_pool = pages_from_pool;
138 	osd_data->own_pages = own_pages;
139 }
140 
141 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
142 			struct ceph_pagelist *pagelist)
143 {
144 	osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
145 	osd_data->pagelist = pagelist;
146 }
147 
148 #ifdef CONFIG_BLOCK
149 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
150 			struct bio *bio, size_t bio_length)
151 {
152 	osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
153 	osd_data->bio = bio;
154 	osd_data->bio_length = bio_length;
155 }
156 #endif /* CONFIG_BLOCK */
157 
158 #define osd_req_op_data(oreq, whch, typ, fld)				\
159 ({									\
160 	struct ceph_osd_request *__oreq = (oreq);			\
161 	unsigned int __whch = (whch);					\
162 	BUG_ON(__whch >= __oreq->r_num_ops);				\
163 	&__oreq->r_ops[__whch].typ.fld;					\
164 })
165 
166 static struct ceph_osd_data *
167 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
168 {
169 	BUG_ON(which >= osd_req->r_num_ops);
170 
171 	return &osd_req->r_ops[which].raw_data_in;
172 }
173 
174 struct ceph_osd_data *
175 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
176 			unsigned int which)
177 {
178 	return osd_req_op_data(osd_req, which, extent, osd_data);
179 }
180 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
181 
182 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
183 			unsigned int which, struct page **pages,
184 			u64 length, u32 alignment,
185 			bool pages_from_pool, bool own_pages)
186 {
187 	struct ceph_osd_data *osd_data;
188 
189 	osd_data = osd_req_op_raw_data_in(osd_req, which);
190 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
191 				pages_from_pool, own_pages);
192 }
193 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
194 
195 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
196 			unsigned int which, struct page **pages,
197 			u64 length, u32 alignment,
198 			bool pages_from_pool, bool own_pages)
199 {
200 	struct ceph_osd_data *osd_data;
201 
202 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
203 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
204 				pages_from_pool, own_pages);
205 }
206 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
207 
208 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
209 			unsigned int which, struct ceph_pagelist *pagelist)
210 {
211 	struct ceph_osd_data *osd_data;
212 
213 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
214 	ceph_osd_data_pagelist_init(osd_data, pagelist);
215 }
216 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
217 
218 #ifdef CONFIG_BLOCK
219 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
220 			unsigned int which, struct bio *bio, size_t bio_length)
221 {
222 	struct ceph_osd_data *osd_data;
223 
224 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
225 	ceph_osd_data_bio_init(osd_data, bio, bio_length);
226 }
227 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
228 #endif /* CONFIG_BLOCK */
229 
230 static void osd_req_op_cls_request_info_pagelist(
231 			struct ceph_osd_request *osd_req,
232 			unsigned int which, struct ceph_pagelist *pagelist)
233 {
234 	struct ceph_osd_data *osd_data;
235 
236 	osd_data = osd_req_op_data(osd_req, which, cls, request_info);
237 	ceph_osd_data_pagelist_init(osd_data, pagelist);
238 }
239 
240 void osd_req_op_cls_request_data_pagelist(
241 			struct ceph_osd_request *osd_req,
242 			unsigned int which, struct ceph_pagelist *pagelist)
243 {
244 	struct ceph_osd_data *osd_data;
245 
246 	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
247 	ceph_osd_data_pagelist_init(osd_data, pagelist);
248 	osd_req->r_ops[which].cls.indata_len += pagelist->length;
249 	osd_req->r_ops[which].indata_len += pagelist->length;
250 }
251 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
252 
253 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
254 			unsigned int which, struct page **pages, u64 length,
255 			u32 alignment, bool pages_from_pool, bool own_pages)
256 {
257 	struct ceph_osd_data *osd_data;
258 
259 	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
260 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
261 				pages_from_pool, own_pages);
262 	osd_req->r_ops[which].cls.indata_len += length;
263 	osd_req->r_ops[which].indata_len += length;
264 }
265 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
266 
267 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
268 			unsigned int which, struct page **pages, u64 length,
269 			u32 alignment, bool pages_from_pool, bool own_pages)
270 {
271 	struct ceph_osd_data *osd_data;
272 
273 	osd_data = osd_req_op_data(osd_req, which, cls, response_data);
274 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
275 				pages_from_pool, own_pages);
276 }
277 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
278 
279 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
280 {
281 	switch (osd_data->type) {
282 	case CEPH_OSD_DATA_TYPE_NONE:
283 		return 0;
284 	case CEPH_OSD_DATA_TYPE_PAGES:
285 		return osd_data->length;
286 	case CEPH_OSD_DATA_TYPE_PAGELIST:
287 		return (u64)osd_data->pagelist->length;
288 #ifdef CONFIG_BLOCK
289 	case CEPH_OSD_DATA_TYPE_BIO:
290 		return (u64)osd_data->bio_length;
291 #endif /* CONFIG_BLOCK */
292 	default:
293 		WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
294 		return 0;
295 	}
296 }
297 
298 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
299 {
300 	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
301 		int num_pages;
302 
303 		num_pages = calc_pages_for((u64)osd_data->alignment,
304 						(u64)osd_data->length);
305 		ceph_release_page_vector(osd_data->pages, num_pages);
306 	}
307 	ceph_osd_data_init(osd_data);
308 }
309 
310 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
311 			unsigned int which)
312 {
313 	struct ceph_osd_req_op *op;
314 
315 	BUG_ON(which >= osd_req->r_num_ops);
316 	op = &osd_req->r_ops[which];
317 
318 	switch (op->op) {
319 	case CEPH_OSD_OP_READ:
320 	case CEPH_OSD_OP_WRITE:
321 	case CEPH_OSD_OP_WRITEFULL:
322 		ceph_osd_data_release(&op->extent.osd_data);
323 		break;
324 	case CEPH_OSD_OP_CALL:
325 		ceph_osd_data_release(&op->cls.request_info);
326 		ceph_osd_data_release(&op->cls.request_data);
327 		ceph_osd_data_release(&op->cls.response_data);
328 		break;
329 	case CEPH_OSD_OP_SETXATTR:
330 	case CEPH_OSD_OP_CMPXATTR:
331 		ceph_osd_data_release(&op->xattr.osd_data);
332 		break;
333 	case CEPH_OSD_OP_STAT:
334 		ceph_osd_data_release(&op->raw_data_in);
335 		break;
336 	case CEPH_OSD_OP_NOTIFY_ACK:
337 		ceph_osd_data_release(&op->notify_ack.request_data);
338 		break;
339 	case CEPH_OSD_OP_NOTIFY:
340 		ceph_osd_data_release(&op->notify.request_data);
341 		ceph_osd_data_release(&op->notify.response_data);
342 		break;
343 	case CEPH_OSD_OP_LIST_WATCHERS:
344 		ceph_osd_data_release(&op->list_watchers.response_data);
345 		break;
346 	default:
347 		break;
348 	}
349 }
350 
351 /*
352  * Assumes @t is zero-initialized.
353  */
354 static void target_init(struct ceph_osd_request_target *t)
355 {
356 	ceph_oid_init(&t->base_oid);
357 	ceph_oloc_init(&t->base_oloc);
358 	ceph_oid_init(&t->target_oid);
359 	ceph_oloc_init(&t->target_oloc);
360 
361 	ceph_osds_init(&t->acting);
362 	ceph_osds_init(&t->up);
363 	t->size = -1;
364 	t->min_size = -1;
365 
366 	t->osd = CEPH_HOMELESS_OSD;
367 }
368 
369 static void target_copy(struct ceph_osd_request_target *dest,
370 			const struct ceph_osd_request_target *src)
371 {
372 	ceph_oid_copy(&dest->base_oid, &src->base_oid);
373 	ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
374 	ceph_oid_copy(&dest->target_oid, &src->target_oid);
375 	ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
376 
377 	dest->pgid = src->pgid; /* struct */
378 	dest->spgid = src->spgid; /* struct */
379 	dest->pg_num = src->pg_num;
380 	dest->pg_num_mask = src->pg_num_mask;
381 	ceph_osds_copy(&dest->acting, &src->acting);
382 	ceph_osds_copy(&dest->up, &src->up);
383 	dest->size = src->size;
384 	dest->min_size = src->min_size;
385 	dest->sort_bitwise = src->sort_bitwise;
386 
387 	dest->flags = src->flags;
388 	dest->paused = src->paused;
389 
390 	dest->epoch = src->epoch;
391 	dest->last_force_resend = src->last_force_resend;
392 
393 	dest->osd = src->osd;
394 }
395 
396 static void target_destroy(struct ceph_osd_request_target *t)
397 {
398 	ceph_oid_destroy(&t->base_oid);
399 	ceph_oloc_destroy(&t->base_oloc);
400 	ceph_oid_destroy(&t->target_oid);
401 	ceph_oloc_destroy(&t->target_oloc);
402 }
403 
404 /*
405  * requests
406  */
407 static void request_release_checks(struct ceph_osd_request *req)
408 {
409 	WARN_ON(!RB_EMPTY_NODE(&req->r_node));
410 	WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
411 	WARN_ON(!list_empty(&req->r_unsafe_item));
412 	WARN_ON(req->r_osd);
413 }
414 
415 static void ceph_osdc_release_request(struct kref *kref)
416 {
417 	struct ceph_osd_request *req = container_of(kref,
418 					    struct ceph_osd_request, r_kref);
419 	unsigned int which;
420 
421 	dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
422 	     req->r_request, req->r_reply);
423 	request_release_checks(req);
424 
425 	if (req->r_request)
426 		ceph_msg_put(req->r_request);
427 	if (req->r_reply)
428 		ceph_msg_put(req->r_reply);
429 
430 	for (which = 0; which < req->r_num_ops; which++)
431 		osd_req_op_data_release(req, which);
432 
433 	target_destroy(&req->r_t);
434 	ceph_put_snap_context(req->r_snapc);
435 
436 	if (req->r_mempool)
437 		mempool_free(req, req->r_osdc->req_mempool);
438 	else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
439 		kmem_cache_free(ceph_osd_request_cache, req);
440 	else
441 		kfree(req);
442 }
443 
444 void ceph_osdc_get_request(struct ceph_osd_request *req)
445 {
446 	dout("%s %p (was %d)\n", __func__, req,
447 	     kref_read(&req->r_kref));
448 	kref_get(&req->r_kref);
449 }
450 EXPORT_SYMBOL(ceph_osdc_get_request);
451 
452 void ceph_osdc_put_request(struct ceph_osd_request *req)
453 {
454 	if (req) {
455 		dout("%s %p (was %d)\n", __func__, req,
456 		     kref_read(&req->r_kref));
457 		kref_put(&req->r_kref, ceph_osdc_release_request);
458 	}
459 }
460 EXPORT_SYMBOL(ceph_osdc_put_request);
461 
462 static void request_init(struct ceph_osd_request *req)
463 {
464 	/* req only, each op is zeroed in _osd_req_op_init() */
465 	memset(req, 0, sizeof(*req));
466 
467 	kref_init(&req->r_kref);
468 	init_completion(&req->r_completion);
469 	RB_CLEAR_NODE(&req->r_node);
470 	RB_CLEAR_NODE(&req->r_mc_node);
471 	INIT_LIST_HEAD(&req->r_unsafe_item);
472 
473 	target_init(&req->r_t);
474 }
475 
476 /*
477  * This is ugly, but it allows us to reuse linger registration and ping
478  * requests, keeping the structure of the code around send_linger{_ping}()
479  * reasonable.  Setting up a min_nr=2 mempool for each linger request
480  * and dealing with copying ops (this blasts req only, watch op remains
481  * intact) isn't any better.
482  */
483 static void request_reinit(struct ceph_osd_request *req)
484 {
485 	struct ceph_osd_client *osdc = req->r_osdc;
486 	bool mempool = req->r_mempool;
487 	unsigned int num_ops = req->r_num_ops;
488 	u64 snapid = req->r_snapid;
489 	struct ceph_snap_context *snapc = req->r_snapc;
490 	bool linger = req->r_linger;
491 	struct ceph_msg *request_msg = req->r_request;
492 	struct ceph_msg *reply_msg = req->r_reply;
493 
494 	dout("%s req %p\n", __func__, req);
495 	WARN_ON(kref_read(&req->r_kref) != 1);
496 	request_release_checks(req);
497 
498 	WARN_ON(kref_read(&request_msg->kref) != 1);
499 	WARN_ON(kref_read(&reply_msg->kref) != 1);
500 	target_destroy(&req->r_t);
501 
502 	request_init(req);
503 	req->r_osdc = osdc;
504 	req->r_mempool = mempool;
505 	req->r_num_ops = num_ops;
506 	req->r_snapid = snapid;
507 	req->r_snapc = snapc;
508 	req->r_linger = linger;
509 	req->r_request = request_msg;
510 	req->r_reply = reply_msg;
511 }
512 
513 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
514 					       struct ceph_snap_context *snapc,
515 					       unsigned int num_ops,
516 					       bool use_mempool,
517 					       gfp_t gfp_flags)
518 {
519 	struct ceph_osd_request *req;
520 
521 	if (use_mempool) {
522 		BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
523 		req = mempool_alloc(osdc->req_mempool, gfp_flags);
524 	} else if (num_ops <= CEPH_OSD_SLAB_OPS) {
525 		req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
526 	} else {
527 		BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
528 		req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
529 			      gfp_flags);
530 	}
531 	if (unlikely(!req))
532 		return NULL;
533 
534 	request_init(req);
535 	req->r_osdc = osdc;
536 	req->r_mempool = use_mempool;
537 	req->r_num_ops = num_ops;
538 	req->r_snapid = CEPH_NOSNAP;
539 	req->r_snapc = ceph_get_snap_context(snapc);
540 
541 	dout("%s req %p\n", __func__, req);
542 	return req;
543 }
544 EXPORT_SYMBOL(ceph_osdc_alloc_request);
545 
546 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
547 {
548 	return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
549 }
550 
551 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
552 {
553 	struct ceph_osd_client *osdc = req->r_osdc;
554 	struct ceph_msg *msg;
555 	int msg_size;
556 
557 	WARN_ON(ceph_oid_empty(&req->r_base_oid));
558 	WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
559 
560 	/* create request message */
561 	msg_size = CEPH_ENCODING_START_BLK_LEN +
562 			CEPH_PGID_ENCODING_LEN + 1; /* spgid */
563 	msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
564 	msg_size += CEPH_ENCODING_START_BLK_LEN +
565 			sizeof(struct ceph_osd_reqid); /* reqid */
566 	msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
567 	msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
568 	msg_size += CEPH_ENCODING_START_BLK_LEN +
569 			ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
570 	msg_size += 4 + req->r_base_oid.name_len; /* oid */
571 	msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
572 	msg_size += 8; /* snapid */
573 	msg_size += 8; /* snap_seq */
574 	msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
575 	msg_size += 4 + 8; /* retry_attempt, features */
576 
577 	if (req->r_mempool)
578 		msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
579 	else
580 		msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
581 	if (!msg)
582 		return -ENOMEM;
583 
584 	memset(msg->front.iov_base, 0, msg->front.iov_len);
585 	req->r_request = msg;
586 
587 	/* create reply message */
588 	msg_size = OSD_OPREPLY_FRONT_LEN;
589 	msg_size += req->r_base_oid.name_len;
590 	msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
591 
592 	if (req->r_mempool)
593 		msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
594 	else
595 		msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
596 	if (!msg)
597 		return -ENOMEM;
598 
599 	req->r_reply = msg;
600 
601 	return 0;
602 }
603 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
604 
605 static bool osd_req_opcode_valid(u16 opcode)
606 {
607 	switch (opcode) {
608 #define GENERATE_CASE(op, opcode, str)	case CEPH_OSD_OP_##op: return true;
609 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
610 #undef GENERATE_CASE
611 	default:
612 		return false;
613 	}
614 }
615 
616 /*
617  * This is an osd op init function for opcodes that have no data or
618  * other information associated with them.  It also serves as a
619  * common init routine for all the other init functions, below.
620  */
621 static struct ceph_osd_req_op *
622 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
623 		 u16 opcode, u32 flags)
624 {
625 	struct ceph_osd_req_op *op;
626 
627 	BUG_ON(which >= osd_req->r_num_ops);
628 	BUG_ON(!osd_req_opcode_valid(opcode));
629 
630 	op = &osd_req->r_ops[which];
631 	memset(op, 0, sizeof (*op));
632 	op->op = opcode;
633 	op->flags = flags;
634 
635 	return op;
636 }
637 
638 void osd_req_op_init(struct ceph_osd_request *osd_req,
639 		     unsigned int which, u16 opcode, u32 flags)
640 {
641 	(void)_osd_req_op_init(osd_req, which, opcode, flags);
642 }
643 EXPORT_SYMBOL(osd_req_op_init);
644 
645 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
646 				unsigned int which, u16 opcode,
647 				u64 offset, u64 length,
648 				u64 truncate_size, u32 truncate_seq)
649 {
650 	struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
651 						      opcode, 0);
652 	size_t payload_len = 0;
653 
654 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
655 	       opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
656 	       opcode != CEPH_OSD_OP_TRUNCATE);
657 
658 	op->extent.offset = offset;
659 	op->extent.length = length;
660 	op->extent.truncate_size = truncate_size;
661 	op->extent.truncate_seq = truncate_seq;
662 	if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
663 		payload_len += length;
664 
665 	op->indata_len = payload_len;
666 }
667 EXPORT_SYMBOL(osd_req_op_extent_init);
668 
669 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
670 				unsigned int which, u64 length)
671 {
672 	struct ceph_osd_req_op *op;
673 	u64 previous;
674 
675 	BUG_ON(which >= osd_req->r_num_ops);
676 	op = &osd_req->r_ops[which];
677 	previous = op->extent.length;
678 
679 	if (length == previous)
680 		return;		/* Nothing to do */
681 	BUG_ON(length > previous);
682 
683 	op->extent.length = length;
684 	if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
685 		op->indata_len -= previous - length;
686 }
687 EXPORT_SYMBOL(osd_req_op_extent_update);
688 
689 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
690 				unsigned int which, u64 offset_inc)
691 {
692 	struct ceph_osd_req_op *op, *prev_op;
693 
694 	BUG_ON(which + 1 >= osd_req->r_num_ops);
695 
696 	prev_op = &osd_req->r_ops[which];
697 	op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
698 	/* dup previous one */
699 	op->indata_len = prev_op->indata_len;
700 	op->outdata_len = prev_op->outdata_len;
701 	op->extent = prev_op->extent;
702 	/* adjust offset */
703 	op->extent.offset += offset_inc;
704 	op->extent.length -= offset_inc;
705 
706 	if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
707 		op->indata_len -= offset_inc;
708 }
709 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
710 
711 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
712 			u16 opcode, const char *class, const char *method)
713 {
714 	struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
715 						      opcode, 0);
716 	struct ceph_pagelist *pagelist;
717 	size_t payload_len = 0;
718 	size_t size;
719 
720 	BUG_ON(opcode != CEPH_OSD_OP_CALL);
721 
722 	pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
723 	BUG_ON(!pagelist);
724 	ceph_pagelist_init(pagelist);
725 
726 	op->cls.class_name = class;
727 	size = strlen(class);
728 	BUG_ON(size > (size_t) U8_MAX);
729 	op->cls.class_len = size;
730 	ceph_pagelist_append(pagelist, class, size);
731 	payload_len += size;
732 
733 	op->cls.method_name = method;
734 	size = strlen(method);
735 	BUG_ON(size > (size_t) U8_MAX);
736 	op->cls.method_len = size;
737 	ceph_pagelist_append(pagelist, method, size);
738 	payload_len += size;
739 
740 	osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
741 
742 	op->indata_len = payload_len;
743 }
744 EXPORT_SYMBOL(osd_req_op_cls_init);
745 
746 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
747 			  u16 opcode, const char *name, const void *value,
748 			  size_t size, u8 cmp_op, u8 cmp_mode)
749 {
750 	struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
751 						      opcode, 0);
752 	struct ceph_pagelist *pagelist;
753 	size_t payload_len;
754 
755 	BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
756 
757 	pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
758 	if (!pagelist)
759 		return -ENOMEM;
760 
761 	ceph_pagelist_init(pagelist);
762 
763 	payload_len = strlen(name);
764 	op->xattr.name_len = payload_len;
765 	ceph_pagelist_append(pagelist, name, payload_len);
766 
767 	op->xattr.value_len = size;
768 	ceph_pagelist_append(pagelist, value, size);
769 	payload_len += size;
770 
771 	op->xattr.cmp_op = cmp_op;
772 	op->xattr.cmp_mode = cmp_mode;
773 
774 	ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
775 	op->indata_len = payload_len;
776 	return 0;
777 }
778 EXPORT_SYMBOL(osd_req_op_xattr_init);
779 
780 /*
781  * @watch_opcode: CEPH_OSD_WATCH_OP_*
782  */
783 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
784 				  u64 cookie, u8 watch_opcode)
785 {
786 	struct ceph_osd_req_op *op;
787 
788 	op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
789 	op->watch.cookie = cookie;
790 	op->watch.op = watch_opcode;
791 	op->watch.gen = 0;
792 }
793 
794 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
795 				unsigned int which,
796 				u64 expected_object_size,
797 				u64 expected_write_size)
798 {
799 	struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
800 						      CEPH_OSD_OP_SETALLOCHINT,
801 						      0);
802 
803 	op->alloc_hint.expected_object_size = expected_object_size;
804 	op->alloc_hint.expected_write_size = expected_write_size;
805 
806 	/*
807 	 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
808 	 * not worth a feature bit.  Set FAILOK per-op flag to make
809 	 * sure older osds don't trip over an unsupported opcode.
810 	 */
811 	op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
812 }
813 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
814 
815 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
816 				struct ceph_osd_data *osd_data)
817 {
818 	u64 length = ceph_osd_data_length(osd_data);
819 
820 	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
821 		BUG_ON(length > (u64) SIZE_MAX);
822 		if (length)
823 			ceph_msg_data_add_pages(msg, osd_data->pages,
824 					length, osd_data->alignment);
825 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
826 		BUG_ON(!length);
827 		ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
828 #ifdef CONFIG_BLOCK
829 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
830 		ceph_msg_data_add_bio(msg, osd_data->bio, length);
831 #endif
832 	} else {
833 		BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
834 	}
835 }
836 
837 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
838 			     const struct ceph_osd_req_op *src)
839 {
840 	if (WARN_ON(!osd_req_opcode_valid(src->op))) {
841 		pr_err("unrecognized osd opcode %d\n", src->op);
842 
843 		return 0;
844 	}
845 
846 	switch (src->op) {
847 	case CEPH_OSD_OP_STAT:
848 		break;
849 	case CEPH_OSD_OP_READ:
850 	case CEPH_OSD_OP_WRITE:
851 	case CEPH_OSD_OP_WRITEFULL:
852 	case CEPH_OSD_OP_ZERO:
853 	case CEPH_OSD_OP_TRUNCATE:
854 		dst->extent.offset = cpu_to_le64(src->extent.offset);
855 		dst->extent.length = cpu_to_le64(src->extent.length);
856 		dst->extent.truncate_size =
857 			cpu_to_le64(src->extent.truncate_size);
858 		dst->extent.truncate_seq =
859 			cpu_to_le32(src->extent.truncate_seq);
860 		break;
861 	case CEPH_OSD_OP_CALL:
862 		dst->cls.class_len = src->cls.class_len;
863 		dst->cls.method_len = src->cls.method_len;
864 		dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
865 		break;
866 	case CEPH_OSD_OP_STARTSYNC:
867 		break;
868 	case CEPH_OSD_OP_WATCH:
869 		dst->watch.cookie = cpu_to_le64(src->watch.cookie);
870 		dst->watch.ver = cpu_to_le64(0);
871 		dst->watch.op = src->watch.op;
872 		dst->watch.gen = cpu_to_le32(src->watch.gen);
873 		break;
874 	case CEPH_OSD_OP_NOTIFY_ACK:
875 		break;
876 	case CEPH_OSD_OP_NOTIFY:
877 		dst->notify.cookie = cpu_to_le64(src->notify.cookie);
878 		break;
879 	case CEPH_OSD_OP_LIST_WATCHERS:
880 		break;
881 	case CEPH_OSD_OP_SETALLOCHINT:
882 		dst->alloc_hint.expected_object_size =
883 		    cpu_to_le64(src->alloc_hint.expected_object_size);
884 		dst->alloc_hint.expected_write_size =
885 		    cpu_to_le64(src->alloc_hint.expected_write_size);
886 		break;
887 	case CEPH_OSD_OP_SETXATTR:
888 	case CEPH_OSD_OP_CMPXATTR:
889 		dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
890 		dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
891 		dst->xattr.cmp_op = src->xattr.cmp_op;
892 		dst->xattr.cmp_mode = src->xattr.cmp_mode;
893 		break;
894 	case CEPH_OSD_OP_CREATE:
895 	case CEPH_OSD_OP_DELETE:
896 		break;
897 	default:
898 		pr_err("unsupported osd opcode %s\n",
899 			ceph_osd_op_name(src->op));
900 		WARN_ON(1);
901 
902 		return 0;
903 	}
904 
905 	dst->op = cpu_to_le16(src->op);
906 	dst->flags = cpu_to_le32(src->flags);
907 	dst->payload_len = cpu_to_le32(src->indata_len);
908 
909 	return src->indata_len;
910 }
911 
912 /*
913  * build new request AND message, calculate layout, and adjust file
914  * extent as needed.
915  *
916  * if the file was recently truncated, we include information about its
917  * old and new size so that the object can be updated appropriately.  (we
918  * avoid synchronously deleting truncated objects because it's slow.)
919  *
920  * if @do_sync, include a 'startsync' command so that the osd will flush
921  * data quickly.
922  */
923 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
924 					       struct ceph_file_layout *layout,
925 					       struct ceph_vino vino,
926 					       u64 off, u64 *plen,
927 					       unsigned int which, int num_ops,
928 					       int opcode, int flags,
929 					       struct ceph_snap_context *snapc,
930 					       u32 truncate_seq,
931 					       u64 truncate_size,
932 					       bool use_mempool)
933 {
934 	struct ceph_osd_request *req;
935 	u64 objnum = 0;
936 	u64 objoff = 0;
937 	u64 objlen = 0;
938 	int r;
939 
940 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
941 	       opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
942 	       opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
943 
944 	req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
945 					GFP_NOFS);
946 	if (!req) {
947 		r = -ENOMEM;
948 		goto fail;
949 	}
950 
951 	/* calculate max write size */
952 	r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
953 	if (r)
954 		goto fail;
955 
956 	if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
957 		osd_req_op_init(req, which, opcode, 0);
958 	} else {
959 		u32 object_size = layout->object_size;
960 		u32 object_base = off - objoff;
961 		if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
962 			if (truncate_size <= object_base) {
963 				truncate_size = 0;
964 			} else {
965 				truncate_size -= object_base;
966 				if (truncate_size > object_size)
967 					truncate_size = object_size;
968 			}
969 		}
970 		osd_req_op_extent_init(req, which, opcode, objoff, objlen,
971 				       truncate_size, truncate_seq);
972 	}
973 
974 	req->r_abort_on_full = true;
975 	req->r_flags = flags;
976 	req->r_base_oloc.pool = layout->pool_id;
977 	req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
978 	ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
979 
980 	req->r_snapid = vino.snap;
981 	if (flags & CEPH_OSD_FLAG_WRITE)
982 		req->r_data_offset = off;
983 
984 	r = ceph_osdc_alloc_messages(req, GFP_NOFS);
985 	if (r)
986 		goto fail;
987 
988 	return req;
989 
990 fail:
991 	ceph_osdc_put_request(req);
992 	return ERR_PTR(r);
993 }
994 EXPORT_SYMBOL(ceph_osdc_new_request);
995 
996 /*
997  * We keep osd requests in an rbtree, sorted by ->r_tid.
998  */
999 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1000 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1001 
1002 static bool osd_homeless(struct ceph_osd *osd)
1003 {
1004 	return osd->o_osd == CEPH_HOMELESS_OSD;
1005 }
1006 
1007 static bool osd_registered(struct ceph_osd *osd)
1008 {
1009 	verify_osdc_locked(osd->o_osdc);
1010 
1011 	return !RB_EMPTY_NODE(&osd->o_node);
1012 }
1013 
1014 /*
1015  * Assumes @osd is zero-initialized.
1016  */
1017 static void osd_init(struct ceph_osd *osd)
1018 {
1019 	refcount_set(&osd->o_ref, 1);
1020 	RB_CLEAR_NODE(&osd->o_node);
1021 	osd->o_requests = RB_ROOT;
1022 	osd->o_linger_requests = RB_ROOT;
1023 	osd->o_backoff_mappings = RB_ROOT;
1024 	osd->o_backoffs_by_id = RB_ROOT;
1025 	INIT_LIST_HEAD(&osd->o_osd_lru);
1026 	INIT_LIST_HEAD(&osd->o_keepalive_item);
1027 	osd->o_incarnation = 1;
1028 	mutex_init(&osd->lock);
1029 }
1030 
1031 static void osd_cleanup(struct ceph_osd *osd)
1032 {
1033 	WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1034 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1035 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1036 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1037 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1038 	WARN_ON(!list_empty(&osd->o_osd_lru));
1039 	WARN_ON(!list_empty(&osd->o_keepalive_item));
1040 
1041 	if (osd->o_auth.authorizer) {
1042 		WARN_ON(osd_homeless(osd));
1043 		ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1044 	}
1045 }
1046 
1047 /*
1048  * Track open sessions with osds.
1049  */
1050 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1051 {
1052 	struct ceph_osd *osd;
1053 
1054 	WARN_ON(onum == CEPH_HOMELESS_OSD);
1055 
1056 	osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1057 	osd_init(osd);
1058 	osd->o_osdc = osdc;
1059 	osd->o_osd = onum;
1060 
1061 	ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1062 
1063 	return osd;
1064 }
1065 
1066 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1067 {
1068 	if (refcount_inc_not_zero(&osd->o_ref)) {
1069 		dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1070 		     refcount_read(&osd->o_ref));
1071 		return osd;
1072 	} else {
1073 		dout("get_osd %p FAIL\n", osd);
1074 		return NULL;
1075 	}
1076 }
1077 
1078 static void put_osd(struct ceph_osd *osd)
1079 {
1080 	dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1081 	     refcount_read(&osd->o_ref) - 1);
1082 	if (refcount_dec_and_test(&osd->o_ref)) {
1083 		osd_cleanup(osd);
1084 		kfree(osd);
1085 	}
1086 }
1087 
1088 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1089 
1090 static void __move_osd_to_lru(struct ceph_osd *osd)
1091 {
1092 	struct ceph_osd_client *osdc = osd->o_osdc;
1093 
1094 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1095 	BUG_ON(!list_empty(&osd->o_osd_lru));
1096 
1097 	spin_lock(&osdc->osd_lru_lock);
1098 	list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1099 	spin_unlock(&osdc->osd_lru_lock);
1100 
1101 	osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1102 }
1103 
1104 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1105 {
1106 	if (RB_EMPTY_ROOT(&osd->o_requests) &&
1107 	    RB_EMPTY_ROOT(&osd->o_linger_requests))
1108 		__move_osd_to_lru(osd);
1109 }
1110 
1111 static void __remove_osd_from_lru(struct ceph_osd *osd)
1112 {
1113 	struct ceph_osd_client *osdc = osd->o_osdc;
1114 
1115 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1116 
1117 	spin_lock(&osdc->osd_lru_lock);
1118 	if (!list_empty(&osd->o_osd_lru))
1119 		list_del_init(&osd->o_osd_lru);
1120 	spin_unlock(&osdc->osd_lru_lock);
1121 }
1122 
1123 /*
1124  * Close the connection and assign any leftover requests to the
1125  * homeless session.
1126  */
1127 static void close_osd(struct ceph_osd *osd)
1128 {
1129 	struct ceph_osd_client *osdc = osd->o_osdc;
1130 	struct rb_node *n;
1131 
1132 	verify_osdc_wrlocked(osdc);
1133 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1134 
1135 	ceph_con_close(&osd->o_con);
1136 
1137 	for (n = rb_first(&osd->o_requests); n; ) {
1138 		struct ceph_osd_request *req =
1139 		    rb_entry(n, struct ceph_osd_request, r_node);
1140 
1141 		n = rb_next(n); /* unlink_request() */
1142 
1143 		dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1144 		unlink_request(osd, req);
1145 		link_request(&osdc->homeless_osd, req);
1146 	}
1147 	for (n = rb_first(&osd->o_linger_requests); n; ) {
1148 		struct ceph_osd_linger_request *lreq =
1149 		    rb_entry(n, struct ceph_osd_linger_request, node);
1150 
1151 		n = rb_next(n); /* unlink_linger() */
1152 
1153 		dout(" reassigning lreq %p linger_id %llu\n", lreq,
1154 		     lreq->linger_id);
1155 		unlink_linger(osd, lreq);
1156 		link_linger(&osdc->homeless_osd, lreq);
1157 	}
1158 	clear_backoffs(osd);
1159 
1160 	__remove_osd_from_lru(osd);
1161 	erase_osd(&osdc->osds, osd);
1162 	put_osd(osd);
1163 }
1164 
1165 /*
1166  * reset osd connect
1167  */
1168 static int reopen_osd(struct ceph_osd *osd)
1169 {
1170 	struct ceph_entity_addr *peer_addr;
1171 
1172 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1173 
1174 	if (RB_EMPTY_ROOT(&osd->o_requests) &&
1175 	    RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1176 		close_osd(osd);
1177 		return -ENODEV;
1178 	}
1179 
1180 	peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1181 	if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1182 			!ceph_con_opened(&osd->o_con)) {
1183 		struct rb_node *n;
1184 
1185 		dout("osd addr hasn't changed and connection never opened, "
1186 		     "letting msgr retry\n");
1187 		/* touch each r_stamp for handle_timeout()'s benfit */
1188 		for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1189 			struct ceph_osd_request *req =
1190 			    rb_entry(n, struct ceph_osd_request, r_node);
1191 			req->r_stamp = jiffies;
1192 		}
1193 
1194 		return -EAGAIN;
1195 	}
1196 
1197 	ceph_con_close(&osd->o_con);
1198 	ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1199 	osd->o_incarnation++;
1200 
1201 	return 0;
1202 }
1203 
1204 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1205 					  bool wrlocked)
1206 {
1207 	struct ceph_osd *osd;
1208 
1209 	if (wrlocked)
1210 		verify_osdc_wrlocked(osdc);
1211 	else
1212 		verify_osdc_locked(osdc);
1213 
1214 	if (o != CEPH_HOMELESS_OSD)
1215 		osd = lookup_osd(&osdc->osds, o);
1216 	else
1217 		osd = &osdc->homeless_osd;
1218 	if (!osd) {
1219 		if (!wrlocked)
1220 			return ERR_PTR(-EAGAIN);
1221 
1222 		osd = create_osd(osdc, o);
1223 		insert_osd(&osdc->osds, osd);
1224 		ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1225 			      &osdc->osdmap->osd_addr[osd->o_osd]);
1226 	}
1227 
1228 	dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1229 	return osd;
1230 }
1231 
1232 /*
1233  * Create request <-> OSD session relation.
1234  *
1235  * @req has to be assigned a tid, @osd may be homeless.
1236  */
1237 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1238 {
1239 	verify_osd_locked(osd);
1240 	WARN_ON(!req->r_tid || req->r_osd);
1241 	dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1242 	     req, req->r_tid);
1243 
1244 	if (!osd_homeless(osd))
1245 		__remove_osd_from_lru(osd);
1246 	else
1247 		atomic_inc(&osd->o_osdc->num_homeless);
1248 
1249 	get_osd(osd);
1250 	insert_request(&osd->o_requests, req);
1251 	req->r_osd = osd;
1252 }
1253 
1254 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1255 {
1256 	verify_osd_locked(osd);
1257 	WARN_ON(req->r_osd != osd);
1258 	dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1259 	     req, req->r_tid);
1260 
1261 	req->r_osd = NULL;
1262 	erase_request(&osd->o_requests, req);
1263 	put_osd(osd);
1264 
1265 	if (!osd_homeless(osd))
1266 		maybe_move_osd_to_lru(osd);
1267 	else
1268 		atomic_dec(&osd->o_osdc->num_homeless);
1269 }
1270 
1271 static bool __pool_full(struct ceph_pg_pool_info *pi)
1272 {
1273 	return pi->flags & CEPH_POOL_FLAG_FULL;
1274 }
1275 
1276 static bool have_pool_full(struct ceph_osd_client *osdc)
1277 {
1278 	struct rb_node *n;
1279 
1280 	for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1281 		struct ceph_pg_pool_info *pi =
1282 		    rb_entry(n, struct ceph_pg_pool_info, node);
1283 
1284 		if (__pool_full(pi))
1285 			return true;
1286 	}
1287 
1288 	return false;
1289 }
1290 
1291 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1292 {
1293 	struct ceph_pg_pool_info *pi;
1294 
1295 	pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1296 	if (!pi)
1297 		return false;
1298 
1299 	return __pool_full(pi);
1300 }
1301 
1302 /*
1303  * Returns whether a request should be blocked from being sent
1304  * based on the current osdmap and osd_client settings.
1305  */
1306 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1307 				    const struct ceph_osd_request_target *t,
1308 				    struct ceph_pg_pool_info *pi)
1309 {
1310 	bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1311 	bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1312 		       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1313 		       __pool_full(pi);
1314 
1315 	WARN_ON(pi->id != t->target_oloc.pool);
1316 	return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1317 	       ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1318 	       (osdc->osdmap->epoch < osdc->epoch_barrier);
1319 }
1320 
1321 enum calc_target_result {
1322 	CALC_TARGET_NO_ACTION = 0,
1323 	CALC_TARGET_NEED_RESEND,
1324 	CALC_TARGET_POOL_DNE,
1325 };
1326 
1327 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1328 					   struct ceph_osd_request_target *t,
1329 					   struct ceph_connection *con,
1330 					   bool any_change)
1331 {
1332 	struct ceph_pg_pool_info *pi;
1333 	struct ceph_pg pgid, last_pgid;
1334 	struct ceph_osds up, acting;
1335 	bool force_resend = false;
1336 	bool unpaused = false;
1337 	bool legacy_change;
1338 	bool split = false;
1339 	bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1340 	enum calc_target_result ct_res;
1341 	int ret;
1342 
1343 	t->epoch = osdc->osdmap->epoch;
1344 	pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1345 	if (!pi) {
1346 		t->osd = CEPH_HOMELESS_OSD;
1347 		ct_res = CALC_TARGET_POOL_DNE;
1348 		goto out;
1349 	}
1350 
1351 	if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1352 		if (t->last_force_resend < pi->last_force_request_resend) {
1353 			t->last_force_resend = pi->last_force_request_resend;
1354 			force_resend = true;
1355 		} else if (t->last_force_resend == 0) {
1356 			force_resend = true;
1357 		}
1358 	}
1359 
1360 	/* apply tiering */
1361 	ceph_oid_copy(&t->target_oid, &t->base_oid);
1362 	ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1363 	if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1364 		if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1365 			t->target_oloc.pool = pi->read_tier;
1366 		if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1367 			t->target_oloc.pool = pi->write_tier;
1368 
1369 		pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1370 		if (!pi) {
1371 			t->osd = CEPH_HOMELESS_OSD;
1372 			ct_res = CALC_TARGET_POOL_DNE;
1373 			goto out;
1374 		}
1375 	}
1376 
1377 	ret = __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc,
1378 					  &pgid);
1379 	if (ret) {
1380 		WARN_ON(ret != -ENOENT);
1381 		t->osd = CEPH_HOMELESS_OSD;
1382 		ct_res = CALC_TARGET_POOL_DNE;
1383 		goto out;
1384 	}
1385 	last_pgid.pool = pgid.pool;
1386 	last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1387 
1388 	ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1389 	if (any_change &&
1390 	    ceph_is_new_interval(&t->acting,
1391 				 &acting,
1392 				 &t->up,
1393 				 &up,
1394 				 t->size,
1395 				 pi->size,
1396 				 t->min_size,
1397 				 pi->min_size,
1398 				 t->pg_num,
1399 				 pi->pg_num,
1400 				 t->sort_bitwise,
1401 				 sort_bitwise,
1402 				 &last_pgid))
1403 		force_resend = true;
1404 
1405 	if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1406 		t->paused = false;
1407 		unpaused = true;
1408 	}
1409 	legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1410 			ceph_osds_changed(&t->acting, &acting, any_change);
1411 	if (t->pg_num)
1412 		split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1413 
1414 	if (legacy_change || force_resend || split) {
1415 		t->pgid = pgid; /* struct */
1416 		ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1417 		ceph_osds_copy(&t->acting, &acting);
1418 		ceph_osds_copy(&t->up, &up);
1419 		t->size = pi->size;
1420 		t->min_size = pi->min_size;
1421 		t->pg_num = pi->pg_num;
1422 		t->pg_num_mask = pi->pg_num_mask;
1423 		t->sort_bitwise = sort_bitwise;
1424 
1425 		t->osd = acting.primary;
1426 	}
1427 
1428 	if (unpaused || legacy_change || force_resend ||
1429 	    (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1430 					       RESEND_ON_SPLIT)))
1431 		ct_res = CALC_TARGET_NEED_RESEND;
1432 	else
1433 		ct_res = CALC_TARGET_NO_ACTION;
1434 
1435 out:
1436 	dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1437 	return ct_res;
1438 }
1439 
1440 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1441 {
1442 	struct ceph_spg_mapping *spg;
1443 
1444 	spg = kmalloc(sizeof(*spg), GFP_NOIO);
1445 	if (!spg)
1446 		return NULL;
1447 
1448 	RB_CLEAR_NODE(&spg->node);
1449 	spg->backoffs = RB_ROOT;
1450 	return spg;
1451 }
1452 
1453 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1454 {
1455 	WARN_ON(!RB_EMPTY_NODE(&spg->node));
1456 	WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1457 
1458 	kfree(spg);
1459 }
1460 
1461 /*
1462  * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1463  * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
1464  * defined only within a specific spgid; it does not pass anything to
1465  * children on split, or to another primary.
1466  */
1467 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1468 		 RB_BYPTR, const struct ceph_spg *, node)
1469 
1470 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1471 {
1472 	return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1473 }
1474 
1475 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1476 				   void **pkey, size_t *pkey_len)
1477 {
1478 	if (hoid->key_len) {
1479 		*pkey = hoid->key;
1480 		*pkey_len = hoid->key_len;
1481 	} else {
1482 		*pkey = hoid->oid;
1483 		*pkey_len = hoid->oid_len;
1484 	}
1485 }
1486 
1487 static int compare_names(const void *name1, size_t name1_len,
1488 			 const void *name2, size_t name2_len)
1489 {
1490 	int ret;
1491 
1492 	ret = memcmp(name1, name2, min(name1_len, name2_len));
1493 	if (!ret) {
1494 		if (name1_len < name2_len)
1495 			ret = -1;
1496 		else if (name1_len > name2_len)
1497 			ret = 1;
1498 	}
1499 	return ret;
1500 }
1501 
1502 static int hoid_compare(const struct ceph_hobject_id *lhs,
1503 			const struct ceph_hobject_id *rhs)
1504 {
1505 	void *effective_key1, *effective_key2;
1506 	size_t effective_key1_len, effective_key2_len;
1507 	int ret;
1508 
1509 	if (lhs->is_max < rhs->is_max)
1510 		return -1;
1511 	if (lhs->is_max > rhs->is_max)
1512 		return 1;
1513 
1514 	if (lhs->pool < rhs->pool)
1515 		return -1;
1516 	if (lhs->pool > rhs->pool)
1517 		return 1;
1518 
1519 	if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1520 		return -1;
1521 	if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1522 		return 1;
1523 
1524 	ret = compare_names(lhs->nspace, lhs->nspace_len,
1525 			    rhs->nspace, rhs->nspace_len);
1526 	if (ret)
1527 		return ret;
1528 
1529 	hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1530 	hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1531 	ret = compare_names(effective_key1, effective_key1_len,
1532 			    effective_key2, effective_key2_len);
1533 	if (ret)
1534 		return ret;
1535 
1536 	ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1537 	if (ret)
1538 		return ret;
1539 
1540 	if (lhs->snapid < rhs->snapid)
1541 		return -1;
1542 	if (lhs->snapid > rhs->snapid)
1543 		return 1;
1544 
1545 	return 0;
1546 }
1547 
1548 /*
1549  * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1550  * compat stuff here.
1551  *
1552  * Assumes @hoid is zero-initialized.
1553  */
1554 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1555 {
1556 	u8 struct_v;
1557 	u32 struct_len;
1558 	int ret;
1559 
1560 	ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1561 				  &struct_len);
1562 	if (ret)
1563 		return ret;
1564 
1565 	if (struct_v < 4) {
1566 		pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1567 		goto e_inval;
1568 	}
1569 
1570 	hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1571 						GFP_NOIO);
1572 	if (IS_ERR(hoid->key)) {
1573 		ret = PTR_ERR(hoid->key);
1574 		hoid->key = NULL;
1575 		return ret;
1576 	}
1577 
1578 	hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1579 						GFP_NOIO);
1580 	if (IS_ERR(hoid->oid)) {
1581 		ret = PTR_ERR(hoid->oid);
1582 		hoid->oid = NULL;
1583 		return ret;
1584 	}
1585 
1586 	ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1587 	ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1588 	ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1589 
1590 	hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1591 						   GFP_NOIO);
1592 	if (IS_ERR(hoid->nspace)) {
1593 		ret = PTR_ERR(hoid->nspace);
1594 		hoid->nspace = NULL;
1595 		return ret;
1596 	}
1597 
1598 	ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1599 
1600 	ceph_hoid_build_hash_cache(hoid);
1601 	return 0;
1602 
1603 e_inval:
1604 	return -EINVAL;
1605 }
1606 
1607 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1608 {
1609 	return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1610 	       4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1611 }
1612 
1613 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1614 {
1615 	ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1616 	ceph_encode_string(p, end, hoid->key, hoid->key_len);
1617 	ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1618 	ceph_encode_64(p, hoid->snapid);
1619 	ceph_encode_32(p, hoid->hash);
1620 	ceph_encode_8(p, hoid->is_max);
1621 	ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1622 	ceph_encode_64(p, hoid->pool);
1623 }
1624 
1625 static void free_hoid(struct ceph_hobject_id *hoid)
1626 {
1627 	if (hoid) {
1628 		kfree(hoid->key);
1629 		kfree(hoid->oid);
1630 		kfree(hoid->nspace);
1631 		kfree(hoid);
1632 	}
1633 }
1634 
1635 static struct ceph_osd_backoff *alloc_backoff(void)
1636 {
1637 	struct ceph_osd_backoff *backoff;
1638 
1639 	backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1640 	if (!backoff)
1641 		return NULL;
1642 
1643 	RB_CLEAR_NODE(&backoff->spg_node);
1644 	RB_CLEAR_NODE(&backoff->id_node);
1645 	return backoff;
1646 }
1647 
1648 static void free_backoff(struct ceph_osd_backoff *backoff)
1649 {
1650 	WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1651 	WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1652 
1653 	free_hoid(backoff->begin);
1654 	free_hoid(backoff->end);
1655 	kfree(backoff);
1656 }
1657 
1658 /*
1659  * Within a specific spgid, backoffs are managed by ->begin hoid.
1660  */
1661 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1662 			RB_BYVAL, spg_node);
1663 
1664 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1665 					    const struct ceph_hobject_id *hoid)
1666 {
1667 	struct rb_node *n = root->rb_node;
1668 
1669 	while (n) {
1670 		struct ceph_osd_backoff *cur =
1671 		    rb_entry(n, struct ceph_osd_backoff, spg_node);
1672 		int cmp;
1673 
1674 		cmp = hoid_compare(hoid, cur->begin);
1675 		if (cmp < 0) {
1676 			n = n->rb_left;
1677 		} else if (cmp > 0) {
1678 			if (hoid_compare(hoid, cur->end) < 0)
1679 				return cur;
1680 
1681 			n = n->rb_right;
1682 		} else {
1683 			return cur;
1684 		}
1685 	}
1686 
1687 	return NULL;
1688 }
1689 
1690 /*
1691  * Each backoff has a unique id within its OSD session.
1692  */
1693 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1694 
1695 static void clear_backoffs(struct ceph_osd *osd)
1696 {
1697 	while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1698 		struct ceph_spg_mapping *spg =
1699 		    rb_entry(rb_first(&osd->o_backoff_mappings),
1700 			     struct ceph_spg_mapping, node);
1701 
1702 		while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1703 			struct ceph_osd_backoff *backoff =
1704 			    rb_entry(rb_first(&spg->backoffs),
1705 				     struct ceph_osd_backoff, spg_node);
1706 
1707 			erase_backoff(&spg->backoffs, backoff);
1708 			erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1709 			free_backoff(backoff);
1710 		}
1711 		erase_spg_mapping(&osd->o_backoff_mappings, spg);
1712 		free_spg_mapping(spg);
1713 	}
1714 }
1715 
1716 /*
1717  * Set up a temporary, non-owning view into @t.
1718  */
1719 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1720 				  const struct ceph_osd_request_target *t)
1721 {
1722 	hoid->key = NULL;
1723 	hoid->key_len = 0;
1724 	hoid->oid = t->target_oid.name;
1725 	hoid->oid_len = t->target_oid.name_len;
1726 	hoid->snapid = CEPH_NOSNAP;
1727 	hoid->hash = t->pgid.seed;
1728 	hoid->is_max = false;
1729 	if (t->target_oloc.pool_ns) {
1730 		hoid->nspace = t->target_oloc.pool_ns->str;
1731 		hoid->nspace_len = t->target_oloc.pool_ns->len;
1732 	} else {
1733 		hoid->nspace = NULL;
1734 		hoid->nspace_len = 0;
1735 	}
1736 	hoid->pool = t->target_oloc.pool;
1737 	ceph_hoid_build_hash_cache(hoid);
1738 }
1739 
1740 static bool should_plug_request(struct ceph_osd_request *req)
1741 {
1742 	struct ceph_osd *osd = req->r_osd;
1743 	struct ceph_spg_mapping *spg;
1744 	struct ceph_osd_backoff *backoff;
1745 	struct ceph_hobject_id hoid;
1746 
1747 	spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1748 	if (!spg)
1749 		return false;
1750 
1751 	hoid_fill_from_target(&hoid, &req->r_t);
1752 	backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1753 	if (!backoff)
1754 		return false;
1755 
1756 	dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1757 	     __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1758 	     backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1759 	return true;
1760 }
1761 
1762 static void setup_request_data(struct ceph_osd_request *req,
1763 			       struct ceph_msg *msg)
1764 {
1765 	u32 data_len = 0;
1766 	int i;
1767 
1768 	if (!list_empty(&msg->data))
1769 		return;
1770 
1771 	WARN_ON(msg->data_length);
1772 	for (i = 0; i < req->r_num_ops; i++) {
1773 		struct ceph_osd_req_op *op = &req->r_ops[i];
1774 
1775 		switch (op->op) {
1776 		/* request */
1777 		case CEPH_OSD_OP_WRITE:
1778 		case CEPH_OSD_OP_WRITEFULL:
1779 			WARN_ON(op->indata_len != op->extent.length);
1780 			ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1781 			break;
1782 		case CEPH_OSD_OP_SETXATTR:
1783 		case CEPH_OSD_OP_CMPXATTR:
1784 			WARN_ON(op->indata_len != op->xattr.name_len +
1785 						  op->xattr.value_len);
1786 			ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1787 			break;
1788 		case CEPH_OSD_OP_NOTIFY_ACK:
1789 			ceph_osdc_msg_data_add(msg,
1790 					       &op->notify_ack.request_data);
1791 			break;
1792 
1793 		/* reply */
1794 		case CEPH_OSD_OP_STAT:
1795 			ceph_osdc_msg_data_add(req->r_reply,
1796 					       &op->raw_data_in);
1797 			break;
1798 		case CEPH_OSD_OP_READ:
1799 			ceph_osdc_msg_data_add(req->r_reply,
1800 					       &op->extent.osd_data);
1801 			break;
1802 		case CEPH_OSD_OP_LIST_WATCHERS:
1803 			ceph_osdc_msg_data_add(req->r_reply,
1804 					       &op->list_watchers.response_data);
1805 			break;
1806 
1807 		/* both */
1808 		case CEPH_OSD_OP_CALL:
1809 			WARN_ON(op->indata_len != op->cls.class_len +
1810 						  op->cls.method_len +
1811 						  op->cls.indata_len);
1812 			ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1813 			/* optional, can be NONE */
1814 			ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1815 			/* optional, can be NONE */
1816 			ceph_osdc_msg_data_add(req->r_reply,
1817 					       &op->cls.response_data);
1818 			break;
1819 		case CEPH_OSD_OP_NOTIFY:
1820 			ceph_osdc_msg_data_add(msg,
1821 					       &op->notify.request_data);
1822 			ceph_osdc_msg_data_add(req->r_reply,
1823 					       &op->notify.response_data);
1824 			break;
1825 		}
1826 
1827 		data_len += op->indata_len;
1828 	}
1829 
1830 	WARN_ON(data_len != msg->data_length);
1831 }
1832 
1833 static void encode_pgid(void **p, const struct ceph_pg *pgid)
1834 {
1835 	ceph_encode_8(p, 1);
1836 	ceph_encode_64(p, pgid->pool);
1837 	ceph_encode_32(p, pgid->seed);
1838 	ceph_encode_32(p, -1); /* preferred */
1839 }
1840 
1841 static void encode_spgid(void **p, const struct ceph_spg *spgid)
1842 {
1843 	ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
1844 	encode_pgid(p, &spgid->pgid);
1845 	ceph_encode_8(p, spgid->shard);
1846 }
1847 
1848 static void encode_oloc(void **p, void *end,
1849 			const struct ceph_object_locator *oloc)
1850 {
1851 	ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
1852 	ceph_encode_64(p, oloc->pool);
1853 	ceph_encode_32(p, -1); /* preferred */
1854 	ceph_encode_32(p, 0);  /* key len */
1855 	if (oloc->pool_ns)
1856 		ceph_encode_string(p, end, oloc->pool_ns->str,
1857 				   oloc->pool_ns->len);
1858 	else
1859 		ceph_encode_32(p, 0);
1860 }
1861 
1862 static void encode_request_partial(struct ceph_osd_request *req,
1863 				   struct ceph_msg *msg)
1864 {
1865 	void *p = msg->front.iov_base;
1866 	void *const end = p + msg->front_alloc_len;
1867 	u32 data_len = 0;
1868 	int i;
1869 
1870 	if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1871 		/* snapshots aren't writeable */
1872 		WARN_ON(req->r_snapid != CEPH_NOSNAP);
1873 	} else {
1874 		WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1875 			req->r_data_offset || req->r_snapc);
1876 	}
1877 
1878 	setup_request_data(req, msg);
1879 
1880 	encode_spgid(&p, &req->r_t.spgid); /* actual spg */
1881 	ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
1882 	ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1883 	ceph_encode_32(&p, req->r_flags);
1884 
1885 	/* reqid */
1886 	ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
1887 	memset(p, 0, sizeof(struct ceph_osd_reqid));
1888 	p += sizeof(struct ceph_osd_reqid);
1889 
1890 	/* trace */
1891 	memset(p, 0, sizeof(struct ceph_blkin_trace_info));
1892 	p += sizeof(struct ceph_blkin_trace_info);
1893 
1894 	ceph_encode_32(&p, 0); /* client_inc, always 0 */
1895 	ceph_encode_timespec(p, &req->r_mtime);
1896 	p += sizeof(struct ceph_timespec);
1897 
1898 	encode_oloc(&p, end, &req->r_t.target_oloc);
1899 	ceph_encode_string(&p, end, req->r_t.target_oid.name,
1900 			   req->r_t.target_oid.name_len);
1901 
1902 	/* ops, can imply data */
1903 	ceph_encode_16(&p, req->r_num_ops);
1904 	for (i = 0; i < req->r_num_ops; i++) {
1905 		data_len += osd_req_encode_op(p, &req->r_ops[i]);
1906 		p += sizeof(struct ceph_osd_op);
1907 	}
1908 
1909 	ceph_encode_64(&p, req->r_snapid); /* snapid */
1910 	if (req->r_snapc) {
1911 		ceph_encode_64(&p, req->r_snapc->seq);
1912 		ceph_encode_32(&p, req->r_snapc->num_snaps);
1913 		for (i = 0; i < req->r_snapc->num_snaps; i++)
1914 			ceph_encode_64(&p, req->r_snapc->snaps[i]);
1915 	} else {
1916 		ceph_encode_64(&p, 0); /* snap_seq */
1917 		ceph_encode_32(&p, 0); /* snaps len */
1918 	}
1919 
1920 	ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1921 	BUG_ON(p != end - 8); /* space for features */
1922 
1923 	msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
1924 	/* front_len is finalized in encode_request_finish() */
1925 	msg->hdr.data_len = cpu_to_le32(data_len);
1926 	/*
1927 	 * The header "data_off" is a hint to the receiver allowing it
1928 	 * to align received data into its buffers such that there's no
1929 	 * need to re-copy it before writing it to disk (direct I/O).
1930 	 */
1931 	msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1932 
1933 	dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
1934 	     req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1935 }
1936 
1937 static void encode_request_finish(struct ceph_msg *msg)
1938 {
1939 	void *p = msg->front.iov_base;
1940 	void *const end = p + msg->front_alloc_len;
1941 
1942 	if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
1943 		/* luminous OSD -- encode features and be done */
1944 		p = end - 8;
1945 		ceph_encode_64(&p, msg->con->peer_features);
1946 	} else {
1947 		struct {
1948 			char spgid[CEPH_ENCODING_START_BLK_LEN +
1949 				   CEPH_PGID_ENCODING_LEN + 1];
1950 			__le32 hash;
1951 			__le32 epoch;
1952 			__le32 flags;
1953 			char reqid[CEPH_ENCODING_START_BLK_LEN +
1954 				   sizeof(struct ceph_osd_reqid)];
1955 			char trace[sizeof(struct ceph_blkin_trace_info)];
1956 			__le32 client_inc;
1957 			struct ceph_timespec mtime;
1958 		} __packed head;
1959 		struct ceph_pg pgid;
1960 		void *oloc, *oid, *tail;
1961 		int oloc_len, oid_len, tail_len;
1962 		int len;
1963 
1964 		/*
1965 		 * Pre-luminous OSD -- reencode v8 into v4 using @head
1966 		 * as a temporary buffer.  Encode the raw PG; the rest
1967 		 * is just a matter of moving oloc, oid and tail blobs
1968 		 * around.
1969 		 */
1970 		memcpy(&head, p, sizeof(head));
1971 		p += sizeof(head);
1972 
1973 		oloc = p;
1974 		p += CEPH_ENCODING_START_BLK_LEN;
1975 		pgid.pool = ceph_decode_64(&p);
1976 		p += 4 + 4; /* preferred, key len */
1977 		len = ceph_decode_32(&p);
1978 		p += len;   /* nspace */
1979 		oloc_len = p - oloc;
1980 
1981 		oid = p;
1982 		len = ceph_decode_32(&p);
1983 		p += len;
1984 		oid_len = p - oid;
1985 
1986 		tail = p;
1987 		tail_len = (end - p) - 8;
1988 
1989 		p = msg->front.iov_base;
1990 		ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
1991 		ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
1992 		ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
1993 		ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
1994 
1995 		/* reassert_version */
1996 		memset(p, 0, sizeof(struct ceph_eversion));
1997 		p += sizeof(struct ceph_eversion);
1998 
1999 		BUG_ON(p >= oloc);
2000 		memmove(p, oloc, oloc_len);
2001 		p += oloc_len;
2002 
2003 		pgid.seed = le32_to_cpu(head.hash);
2004 		encode_pgid(&p, &pgid); /* raw pg */
2005 
2006 		BUG_ON(p >= oid);
2007 		memmove(p, oid, oid_len);
2008 		p += oid_len;
2009 
2010 		/* tail -- ops, snapid, snapc, retry_attempt */
2011 		BUG_ON(p >= tail);
2012 		memmove(p, tail, tail_len);
2013 		p += tail_len;
2014 
2015 		msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2016 	}
2017 
2018 	BUG_ON(p > end);
2019 	msg->front.iov_len = p - msg->front.iov_base;
2020 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2021 
2022 	dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2023 	     le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2024 	     le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2025 	     le16_to_cpu(msg->hdr.version));
2026 }
2027 
2028 /*
2029  * @req has to be assigned a tid and registered.
2030  */
2031 static void send_request(struct ceph_osd_request *req)
2032 {
2033 	struct ceph_osd *osd = req->r_osd;
2034 
2035 	verify_osd_locked(osd);
2036 	WARN_ON(osd->o_osd != req->r_t.osd);
2037 
2038 	/* backoff? */
2039 	if (should_plug_request(req))
2040 		return;
2041 
2042 	/*
2043 	 * We may have a previously queued request message hanging
2044 	 * around.  Cancel it to avoid corrupting the msgr.
2045 	 */
2046 	if (req->r_sent)
2047 		ceph_msg_revoke(req->r_request);
2048 
2049 	req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2050 	if (req->r_attempts)
2051 		req->r_flags |= CEPH_OSD_FLAG_RETRY;
2052 	else
2053 		WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2054 
2055 	encode_request_partial(req, req->r_request);
2056 
2057 	dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2058 	     __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2059 	     req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2060 	     req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2061 	     req->r_attempts);
2062 
2063 	req->r_t.paused = false;
2064 	req->r_stamp = jiffies;
2065 	req->r_attempts++;
2066 
2067 	req->r_sent = osd->o_incarnation;
2068 	req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2069 	ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2070 }
2071 
2072 static void maybe_request_map(struct ceph_osd_client *osdc)
2073 {
2074 	bool continuous = false;
2075 
2076 	verify_osdc_locked(osdc);
2077 	WARN_ON(!osdc->osdmap->epoch);
2078 
2079 	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2080 	    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2081 	    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2082 		dout("%s osdc %p continuous\n", __func__, osdc);
2083 		continuous = true;
2084 	} else {
2085 		dout("%s osdc %p onetime\n", __func__, osdc);
2086 	}
2087 
2088 	if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2089 			       osdc->osdmap->epoch + 1, continuous))
2090 		ceph_monc_renew_subs(&osdc->client->monc);
2091 }
2092 
2093 static void complete_request(struct ceph_osd_request *req, int err);
2094 static void send_map_check(struct ceph_osd_request *req);
2095 
2096 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2097 {
2098 	struct ceph_osd_client *osdc = req->r_osdc;
2099 	struct ceph_osd *osd;
2100 	enum calc_target_result ct_res;
2101 	bool need_send = false;
2102 	bool promoted = false;
2103 	bool need_abort = false;
2104 
2105 	WARN_ON(req->r_tid);
2106 	dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2107 
2108 again:
2109 	ct_res = calc_target(osdc, &req->r_t, NULL, false);
2110 	if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2111 		goto promote;
2112 
2113 	osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2114 	if (IS_ERR(osd)) {
2115 		WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2116 		goto promote;
2117 	}
2118 
2119 	if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2120 		dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2121 		     osdc->epoch_barrier);
2122 		req->r_t.paused = true;
2123 		maybe_request_map(osdc);
2124 	} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2125 		   ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2126 		dout("req %p pausewr\n", req);
2127 		req->r_t.paused = true;
2128 		maybe_request_map(osdc);
2129 	} else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2130 		   ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2131 		dout("req %p pauserd\n", req);
2132 		req->r_t.paused = true;
2133 		maybe_request_map(osdc);
2134 	} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2135 		   !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2136 				     CEPH_OSD_FLAG_FULL_FORCE)) &&
2137 		   (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2138 		    pool_full(osdc, req->r_t.base_oloc.pool))) {
2139 		dout("req %p full/pool_full\n", req);
2140 		pr_warn_ratelimited("FULL or reached pool quota\n");
2141 		req->r_t.paused = true;
2142 		maybe_request_map(osdc);
2143 		if (req->r_abort_on_full)
2144 			need_abort = true;
2145 	} else if (!osd_homeless(osd)) {
2146 		need_send = true;
2147 	} else {
2148 		maybe_request_map(osdc);
2149 	}
2150 
2151 	mutex_lock(&osd->lock);
2152 	/*
2153 	 * Assign the tid atomically with send_request() to protect
2154 	 * multiple writes to the same object from racing with each
2155 	 * other, resulting in out of order ops on the OSDs.
2156 	 */
2157 	req->r_tid = atomic64_inc_return(&osdc->last_tid);
2158 	link_request(osd, req);
2159 	if (need_send)
2160 		send_request(req);
2161 	else if (need_abort)
2162 		complete_request(req, -ENOSPC);
2163 	mutex_unlock(&osd->lock);
2164 
2165 	if (ct_res == CALC_TARGET_POOL_DNE)
2166 		send_map_check(req);
2167 
2168 	if (promoted)
2169 		downgrade_write(&osdc->lock);
2170 	return;
2171 
2172 promote:
2173 	up_read(&osdc->lock);
2174 	down_write(&osdc->lock);
2175 	wrlocked = true;
2176 	promoted = true;
2177 	goto again;
2178 }
2179 
2180 static void account_request(struct ceph_osd_request *req)
2181 {
2182 	WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2183 	WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2184 
2185 	req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2186 	atomic_inc(&req->r_osdc->num_requests);
2187 
2188 	req->r_start_stamp = jiffies;
2189 }
2190 
2191 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2192 {
2193 	ceph_osdc_get_request(req);
2194 	account_request(req);
2195 	__submit_request(req, wrlocked);
2196 }
2197 
2198 static void finish_request(struct ceph_osd_request *req)
2199 {
2200 	struct ceph_osd_client *osdc = req->r_osdc;
2201 
2202 	WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2203 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2204 
2205 	if (req->r_osd)
2206 		unlink_request(req->r_osd, req);
2207 	atomic_dec(&osdc->num_requests);
2208 
2209 	/*
2210 	 * If an OSD has failed or returned and a request has been sent
2211 	 * twice, it's possible to get a reply and end up here while the
2212 	 * request message is queued for delivery.  We will ignore the
2213 	 * reply, so not a big deal, but better to try and catch it.
2214 	 */
2215 	ceph_msg_revoke(req->r_request);
2216 	ceph_msg_revoke_incoming(req->r_reply);
2217 }
2218 
2219 static void __complete_request(struct ceph_osd_request *req)
2220 {
2221 	if (req->r_callback) {
2222 		dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
2223 		     req->r_tid, req->r_callback, req->r_result);
2224 		req->r_callback(req);
2225 	}
2226 }
2227 
2228 /*
2229  * This is open-coded in handle_reply().
2230  */
2231 static void complete_request(struct ceph_osd_request *req, int err)
2232 {
2233 	dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2234 
2235 	req->r_result = err;
2236 	finish_request(req);
2237 	__complete_request(req);
2238 	complete_all(&req->r_completion);
2239 	ceph_osdc_put_request(req);
2240 }
2241 
2242 static void cancel_map_check(struct ceph_osd_request *req)
2243 {
2244 	struct ceph_osd_client *osdc = req->r_osdc;
2245 	struct ceph_osd_request *lookup_req;
2246 
2247 	verify_osdc_wrlocked(osdc);
2248 
2249 	lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2250 	if (!lookup_req)
2251 		return;
2252 
2253 	WARN_ON(lookup_req != req);
2254 	erase_request_mc(&osdc->map_checks, req);
2255 	ceph_osdc_put_request(req);
2256 }
2257 
2258 static void cancel_request(struct ceph_osd_request *req)
2259 {
2260 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2261 
2262 	cancel_map_check(req);
2263 	finish_request(req);
2264 	complete_all(&req->r_completion);
2265 	ceph_osdc_put_request(req);
2266 }
2267 
2268 static void abort_request(struct ceph_osd_request *req, int err)
2269 {
2270 	dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2271 
2272 	cancel_map_check(req);
2273 	complete_request(req, err);
2274 }
2275 
2276 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2277 {
2278 	if (likely(eb > osdc->epoch_barrier)) {
2279 		dout("updating epoch_barrier from %u to %u\n",
2280 				osdc->epoch_barrier, eb);
2281 		osdc->epoch_barrier = eb;
2282 		/* Request map if we're not to the barrier yet */
2283 		if (eb > osdc->osdmap->epoch)
2284 			maybe_request_map(osdc);
2285 	}
2286 }
2287 
2288 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2289 {
2290 	down_read(&osdc->lock);
2291 	if (unlikely(eb > osdc->epoch_barrier)) {
2292 		up_read(&osdc->lock);
2293 		down_write(&osdc->lock);
2294 		update_epoch_barrier(osdc, eb);
2295 		up_write(&osdc->lock);
2296 	} else {
2297 		up_read(&osdc->lock);
2298 	}
2299 }
2300 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2301 
2302 /*
2303  * Drop all pending requests that are stalled waiting on a full condition to
2304  * clear, and complete them with ENOSPC as the return code. Set the
2305  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2306  * cancelled.
2307  */
2308 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2309 {
2310 	struct rb_node *n;
2311 	bool victims = false;
2312 
2313 	dout("enter abort_on_full\n");
2314 
2315 	if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc))
2316 		goto out;
2317 
2318 	/* Scan list and see if there is anything to abort */
2319 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2320 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2321 		struct rb_node *m;
2322 
2323 		m = rb_first(&osd->o_requests);
2324 		while (m) {
2325 			struct ceph_osd_request *req = rb_entry(m,
2326 					struct ceph_osd_request, r_node);
2327 			m = rb_next(m);
2328 
2329 			if (req->r_abort_on_full) {
2330 				victims = true;
2331 				break;
2332 			}
2333 		}
2334 		if (victims)
2335 			break;
2336 	}
2337 
2338 	if (!victims)
2339 		goto out;
2340 
2341 	/*
2342 	 * Update the barrier to current epoch if it's behind that point,
2343 	 * since we know we have some calls to be aborted in the tree.
2344 	 */
2345 	update_epoch_barrier(osdc, osdc->osdmap->epoch);
2346 
2347 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2348 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2349 		struct rb_node *m;
2350 
2351 		m = rb_first(&osd->o_requests);
2352 		while (m) {
2353 			struct ceph_osd_request *req = rb_entry(m,
2354 					struct ceph_osd_request, r_node);
2355 			m = rb_next(m);
2356 
2357 			if (req->r_abort_on_full &&
2358 			    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2359 			     pool_full(osdc, req->r_t.target_oloc.pool)))
2360 				abort_request(req, -ENOSPC);
2361 		}
2362 	}
2363 out:
2364 	dout("return abort_on_full barrier=%u\n", osdc->epoch_barrier);
2365 }
2366 
2367 static void check_pool_dne(struct ceph_osd_request *req)
2368 {
2369 	struct ceph_osd_client *osdc = req->r_osdc;
2370 	struct ceph_osdmap *map = osdc->osdmap;
2371 
2372 	verify_osdc_wrlocked(osdc);
2373 	WARN_ON(!map->epoch);
2374 
2375 	if (req->r_attempts) {
2376 		/*
2377 		 * We sent a request earlier, which means that
2378 		 * previously the pool existed, and now it does not
2379 		 * (i.e., it was deleted).
2380 		 */
2381 		req->r_map_dne_bound = map->epoch;
2382 		dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2383 		     req->r_tid);
2384 	} else {
2385 		dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2386 		     req, req->r_tid, req->r_map_dne_bound, map->epoch);
2387 	}
2388 
2389 	if (req->r_map_dne_bound) {
2390 		if (map->epoch >= req->r_map_dne_bound) {
2391 			/* we had a new enough map */
2392 			pr_info_ratelimited("tid %llu pool does not exist\n",
2393 					    req->r_tid);
2394 			complete_request(req, -ENOENT);
2395 		}
2396 	} else {
2397 		send_map_check(req);
2398 	}
2399 }
2400 
2401 static void map_check_cb(struct ceph_mon_generic_request *greq)
2402 {
2403 	struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2404 	struct ceph_osd_request *req;
2405 	u64 tid = greq->private_data;
2406 
2407 	WARN_ON(greq->result || !greq->u.newest);
2408 
2409 	down_write(&osdc->lock);
2410 	req = lookup_request_mc(&osdc->map_checks, tid);
2411 	if (!req) {
2412 		dout("%s tid %llu dne\n", __func__, tid);
2413 		goto out_unlock;
2414 	}
2415 
2416 	dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2417 	     req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2418 	if (!req->r_map_dne_bound)
2419 		req->r_map_dne_bound = greq->u.newest;
2420 	erase_request_mc(&osdc->map_checks, req);
2421 	check_pool_dne(req);
2422 
2423 	ceph_osdc_put_request(req);
2424 out_unlock:
2425 	up_write(&osdc->lock);
2426 }
2427 
2428 static void send_map_check(struct ceph_osd_request *req)
2429 {
2430 	struct ceph_osd_client *osdc = req->r_osdc;
2431 	struct ceph_osd_request *lookup_req;
2432 	int ret;
2433 
2434 	verify_osdc_wrlocked(osdc);
2435 
2436 	lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2437 	if (lookup_req) {
2438 		WARN_ON(lookup_req != req);
2439 		return;
2440 	}
2441 
2442 	ceph_osdc_get_request(req);
2443 	insert_request_mc(&osdc->map_checks, req);
2444 	ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2445 					  map_check_cb, req->r_tid);
2446 	WARN_ON(ret);
2447 }
2448 
2449 /*
2450  * lingering requests, watch/notify v2 infrastructure
2451  */
2452 static void linger_release(struct kref *kref)
2453 {
2454 	struct ceph_osd_linger_request *lreq =
2455 	    container_of(kref, struct ceph_osd_linger_request, kref);
2456 
2457 	dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2458 	     lreq->reg_req, lreq->ping_req);
2459 	WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2460 	WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2461 	WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2462 	WARN_ON(!list_empty(&lreq->scan_item));
2463 	WARN_ON(!list_empty(&lreq->pending_lworks));
2464 	WARN_ON(lreq->osd);
2465 
2466 	if (lreq->reg_req)
2467 		ceph_osdc_put_request(lreq->reg_req);
2468 	if (lreq->ping_req)
2469 		ceph_osdc_put_request(lreq->ping_req);
2470 	target_destroy(&lreq->t);
2471 	kfree(lreq);
2472 }
2473 
2474 static void linger_put(struct ceph_osd_linger_request *lreq)
2475 {
2476 	if (lreq)
2477 		kref_put(&lreq->kref, linger_release);
2478 }
2479 
2480 static struct ceph_osd_linger_request *
2481 linger_get(struct ceph_osd_linger_request *lreq)
2482 {
2483 	kref_get(&lreq->kref);
2484 	return lreq;
2485 }
2486 
2487 static struct ceph_osd_linger_request *
2488 linger_alloc(struct ceph_osd_client *osdc)
2489 {
2490 	struct ceph_osd_linger_request *lreq;
2491 
2492 	lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2493 	if (!lreq)
2494 		return NULL;
2495 
2496 	kref_init(&lreq->kref);
2497 	mutex_init(&lreq->lock);
2498 	RB_CLEAR_NODE(&lreq->node);
2499 	RB_CLEAR_NODE(&lreq->osdc_node);
2500 	RB_CLEAR_NODE(&lreq->mc_node);
2501 	INIT_LIST_HEAD(&lreq->scan_item);
2502 	INIT_LIST_HEAD(&lreq->pending_lworks);
2503 	init_completion(&lreq->reg_commit_wait);
2504 	init_completion(&lreq->notify_finish_wait);
2505 
2506 	lreq->osdc = osdc;
2507 	target_init(&lreq->t);
2508 
2509 	dout("%s lreq %p\n", __func__, lreq);
2510 	return lreq;
2511 }
2512 
2513 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2514 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2515 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2516 
2517 /*
2518  * Create linger request <-> OSD session relation.
2519  *
2520  * @lreq has to be registered, @osd may be homeless.
2521  */
2522 static void link_linger(struct ceph_osd *osd,
2523 			struct ceph_osd_linger_request *lreq)
2524 {
2525 	verify_osd_locked(osd);
2526 	WARN_ON(!lreq->linger_id || lreq->osd);
2527 	dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2528 	     osd->o_osd, lreq, lreq->linger_id);
2529 
2530 	if (!osd_homeless(osd))
2531 		__remove_osd_from_lru(osd);
2532 	else
2533 		atomic_inc(&osd->o_osdc->num_homeless);
2534 
2535 	get_osd(osd);
2536 	insert_linger(&osd->o_linger_requests, lreq);
2537 	lreq->osd = osd;
2538 }
2539 
2540 static void unlink_linger(struct ceph_osd *osd,
2541 			  struct ceph_osd_linger_request *lreq)
2542 {
2543 	verify_osd_locked(osd);
2544 	WARN_ON(lreq->osd != osd);
2545 	dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2546 	     osd->o_osd, lreq, lreq->linger_id);
2547 
2548 	lreq->osd = NULL;
2549 	erase_linger(&osd->o_linger_requests, lreq);
2550 	put_osd(osd);
2551 
2552 	if (!osd_homeless(osd))
2553 		maybe_move_osd_to_lru(osd);
2554 	else
2555 		atomic_dec(&osd->o_osdc->num_homeless);
2556 }
2557 
2558 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2559 {
2560 	verify_osdc_locked(lreq->osdc);
2561 
2562 	return !RB_EMPTY_NODE(&lreq->osdc_node);
2563 }
2564 
2565 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2566 {
2567 	struct ceph_osd_client *osdc = lreq->osdc;
2568 	bool registered;
2569 
2570 	down_read(&osdc->lock);
2571 	registered = __linger_registered(lreq);
2572 	up_read(&osdc->lock);
2573 
2574 	return registered;
2575 }
2576 
2577 static void linger_register(struct ceph_osd_linger_request *lreq)
2578 {
2579 	struct ceph_osd_client *osdc = lreq->osdc;
2580 
2581 	verify_osdc_wrlocked(osdc);
2582 	WARN_ON(lreq->linger_id);
2583 
2584 	linger_get(lreq);
2585 	lreq->linger_id = ++osdc->last_linger_id;
2586 	insert_linger_osdc(&osdc->linger_requests, lreq);
2587 }
2588 
2589 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2590 {
2591 	struct ceph_osd_client *osdc = lreq->osdc;
2592 
2593 	verify_osdc_wrlocked(osdc);
2594 
2595 	erase_linger_osdc(&osdc->linger_requests, lreq);
2596 	linger_put(lreq);
2597 }
2598 
2599 static void cancel_linger_request(struct ceph_osd_request *req)
2600 {
2601 	struct ceph_osd_linger_request *lreq = req->r_priv;
2602 
2603 	WARN_ON(!req->r_linger);
2604 	cancel_request(req);
2605 	linger_put(lreq);
2606 }
2607 
2608 struct linger_work {
2609 	struct work_struct work;
2610 	struct ceph_osd_linger_request *lreq;
2611 	struct list_head pending_item;
2612 	unsigned long queued_stamp;
2613 
2614 	union {
2615 		struct {
2616 			u64 notify_id;
2617 			u64 notifier_id;
2618 			void *payload; /* points into @msg front */
2619 			size_t payload_len;
2620 
2621 			struct ceph_msg *msg; /* for ceph_msg_put() */
2622 		} notify;
2623 		struct {
2624 			int err;
2625 		} error;
2626 	};
2627 };
2628 
2629 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2630 				       work_func_t workfn)
2631 {
2632 	struct linger_work *lwork;
2633 
2634 	lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2635 	if (!lwork)
2636 		return NULL;
2637 
2638 	INIT_WORK(&lwork->work, workfn);
2639 	INIT_LIST_HEAD(&lwork->pending_item);
2640 	lwork->lreq = linger_get(lreq);
2641 
2642 	return lwork;
2643 }
2644 
2645 static void lwork_free(struct linger_work *lwork)
2646 {
2647 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2648 
2649 	mutex_lock(&lreq->lock);
2650 	list_del(&lwork->pending_item);
2651 	mutex_unlock(&lreq->lock);
2652 
2653 	linger_put(lreq);
2654 	kfree(lwork);
2655 }
2656 
2657 static void lwork_queue(struct linger_work *lwork)
2658 {
2659 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2660 	struct ceph_osd_client *osdc = lreq->osdc;
2661 
2662 	verify_lreq_locked(lreq);
2663 	WARN_ON(!list_empty(&lwork->pending_item));
2664 
2665 	lwork->queued_stamp = jiffies;
2666 	list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2667 	queue_work(osdc->notify_wq, &lwork->work);
2668 }
2669 
2670 static void do_watch_notify(struct work_struct *w)
2671 {
2672 	struct linger_work *lwork = container_of(w, struct linger_work, work);
2673 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2674 
2675 	if (!linger_registered(lreq)) {
2676 		dout("%s lreq %p not registered\n", __func__, lreq);
2677 		goto out;
2678 	}
2679 
2680 	WARN_ON(!lreq->is_watch);
2681 	dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2682 	     __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2683 	     lwork->notify.payload_len);
2684 	lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2685 		  lwork->notify.notifier_id, lwork->notify.payload,
2686 		  lwork->notify.payload_len);
2687 
2688 out:
2689 	ceph_msg_put(lwork->notify.msg);
2690 	lwork_free(lwork);
2691 }
2692 
2693 static void do_watch_error(struct work_struct *w)
2694 {
2695 	struct linger_work *lwork = container_of(w, struct linger_work, work);
2696 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2697 
2698 	if (!linger_registered(lreq)) {
2699 		dout("%s lreq %p not registered\n", __func__, lreq);
2700 		goto out;
2701 	}
2702 
2703 	dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2704 	lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2705 
2706 out:
2707 	lwork_free(lwork);
2708 }
2709 
2710 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2711 {
2712 	struct linger_work *lwork;
2713 
2714 	lwork = lwork_alloc(lreq, do_watch_error);
2715 	if (!lwork) {
2716 		pr_err("failed to allocate error-lwork\n");
2717 		return;
2718 	}
2719 
2720 	lwork->error.err = lreq->last_error;
2721 	lwork_queue(lwork);
2722 }
2723 
2724 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2725 				       int result)
2726 {
2727 	if (!completion_done(&lreq->reg_commit_wait)) {
2728 		lreq->reg_commit_error = (result <= 0 ? result : 0);
2729 		complete_all(&lreq->reg_commit_wait);
2730 	}
2731 }
2732 
2733 static void linger_commit_cb(struct ceph_osd_request *req)
2734 {
2735 	struct ceph_osd_linger_request *lreq = req->r_priv;
2736 
2737 	mutex_lock(&lreq->lock);
2738 	dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2739 	     lreq->linger_id, req->r_result);
2740 	linger_reg_commit_complete(lreq, req->r_result);
2741 	lreq->committed = true;
2742 
2743 	if (!lreq->is_watch) {
2744 		struct ceph_osd_data *osd_data =
2745 		    osd_req_op_data(req, 0, notify, response_data);
2746 		void *p = page_address(osd_data->pages[0]);
2747 
2748 		WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2749 			osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2750 
2751 		/* make note of the notify_id */
2752 		if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2753 			lreq->notify_id = ceph_decode_64(&p);
2754 			dout("lreq %p notify_id %llu\n", lreq,
2755 			     lreq->notify_id);
2756 		} else {
2757 			dout("lreq %p no notify_id\n", lreq);
2758 		}
2759 	}
2760 
2761 	mutex_unlock(&lreq->lock);
2762 	linger_put(lreq);
2763 }
2764 
2765 static int normalize_watch_error(int err)
2766 {
2767 	/*
2768 	 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2769 	 * notification and a failure to reconnect because we raced with
2770 	 * the delete appear the same to the user.
2771 	 */
2772 	if (err == -ENOENT)
2773 		err = -ENOTCONN;
2774 
2775 	return err;
2776 }
2777 
2778 static void linger_reconnect_cb(struct ceph_osd_request *req)
2779 {
2780 	struct ceph_osd_linger_request *lreq = req->r_priv;
2781 
2782 	mutex_lock(&lreq->lock);
2783 	dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2784 	     lreq, lreq->linger_id, req->r_result, lreq->last_error);
2785 	if (req->r_result < 0) {
2786 		if (!lreq->last_error) {
2787 			lreq->last_error = normalize_watch_error(req->r_result);
2788 			queue_watch_error(lreq);
2789 		}
2790 	}
2791 
2792 	mutex_unlock(&lreq->lock);
2793 	linger_put(lreq);
2794 }
2795 
2796 static void send_linger(struct ceph_osd_linger_request *lreq)
2797 {
2798 	struct ceph_osd_request *req = lreq->reg_req;
2799 	struct ceph_osd_req_op *op = &req->r_ops[0];
2800 
2801 	verify_osdc_wrlocked(req->r_osdc);
2802 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2803 
2804 	if (req->r_osd)
2805 		cancel_linger_request(req);
2806 
2807 	request_reinit(req);
2808 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2809 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2810 	req->r_flags = lreq->t.flags;
2811 	req->r_mtime = lreq->mtime;
2812 
2813 	mutex_lock(&lreq->lock);
2814 	if (lreq->is_watch && lreq->committed) {
2815 		WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2816 			op->watch.cookie != lreq->linger_id);
2817 		op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2818 		op->watch.gen = ++lreq->register_gen;
2819 		dout("lreq %p reconnect register_gen %u\n", lreq,
2820 		     op->watch.gen);
2821 		req->r_callback = linger_reconnect_cb;
2822 	} else {
2823 		if (!lreq->is_watch)
2824 			lreq->notify_id = 0;
2825 		else
2826 			WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2827 		dout("lreq %p register\n", lreq);
2828 		req->r_callback = linger_commit_cb;
2829 	}
2830 	mutex_unlock(&lreq->lock);
2831 
2832 	req->r_priv = linger_get(lreq);
2833 	req->r_linger = true;
2834 
2835 	submit_request(req, true);
2836 }
2837 
2838 static void linger_ping_cb(struct ceph_osd_request *req)
2839 {
2840 	struct ceph_osd_linger_request *lreq = req->r_priv;
2841 
2842 	mutex_lock(&lreq->lock);
2843 	dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2844 	     __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2845 	     lreq->last_error);
2846 	if (lreq->register_gen == req->r_ops[0].watch.gen) {
2847 		if (!req->r_result) {
2848 			lreq->watch_valid_thru = lreq->ping_sent;
2849 		} else if (!lreq->last_error) {
2850 			lreq->last_error = normalize_watch_error(req->r_result);
2851 			queue_watch_error(lreq);
2852 		}
2853 	} else {
2854 		dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2855 		     lreq->register_gen, req->r_ops[0].watch.gen);
2856 	}
2857 
2858 	mutex_unlock(&lreq->lock);
2859 	linger_put(lreq);
2860 }
2861 
2862 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2863 {
2864 	struct ceph_osd_client *osdc = lreq->osdc;
2865 	struct ceph_osd_request *req = lreq->ping_req;
2866 	struct ceph_osd_req_op *op = &req->r_ops[0];
2867 
2868 	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2869 		dout("%s PAUSERD\n", __func__);
2870 		return;
2871 	}
2872 
2873 	lreq->ping_sent = jiffies;
2874 	dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2875 	     __func__, lreq, lreq->linger_id, lreq->ping_sent,
2876 	     lreq->register_gen);
2877 
2878 	if (req->r_osd)
2879 		cancel_linger_request(req);
2880 
2881 	request_reinit(req);
2882 	target_copy(&req->r_t, &lreq->t);
2883 
2884 	WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2885 		op->watch.cookie != lreq->linger_id ||
2886 		op->watch.op != CEPH_OSD_WATCH_OP_PING);
2887 	op->watch.gen = lreq->register_gen;
2888 	req->r_callback = linger_ping_cb;
2889 	req->r_priv = linger_get(lreq);
2890 	req->r_linger = true;
2891 
2892 	ceph_osdc_get_request(req);
2893 	account_request(req);
2894 	req->r_tid = atomic64_inc_return(&osdc->last_tid);
2895 	link_request(lreq->osd, req);
2896 	send_request(req);
2897 }
2898 
2899 static void linger_submit(struct ceph_osd_linger_request *lreq)
2900 {
2901 	struct ceph_osd_client *osdc = lreq->osdc;
2902 	struct ceph_osd *osd;
2903 
2904 	calc_target(osdc, &lreq->t, NULL, false);
2905 	osd = lookup_create_osd(osdc, lreq->t.osd, true);
2906 	link_linger(osd, lreq);
2907 
2908 	send_linger(lreq);
2909 }
2910 
2911 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2912 {
2913 	struct ceph_osd_client *osdc = lreq->osdc;
2914 	struct ceph_osd_linger_request *lookup_lreq;
2915 
2916 	verify_osdc_wrlocked(osdc);
2917 
2918 	lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2919 				       lreq->linger_id);
2920 	if (!lookup_lreq)
2921 		return;
2922 
2923 	WARN_ON(lookup_lreq != lreq);
2924 	erase_linger_mc(&osdc->linger_map_checks, lreq);
2925 	linger_put(lreq);
2926 }
2927 
2928 /*
2929  * @lreq has to be both registered and linked.
2930  */
2931 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2932 {
2933 	if (lreq->is_watch && lreq->ping_req->r_osd)
2934 		cancel_linger_request(lreq->ping_req);
2935 	if (lreq->reg_req->r_osd)
2936 		cancel_linger_request(lreq->reg_req);
2937 	cancel_linger_map_check(lreq);
2938 	unlink_linger(lreq->osd, lreq);
2939 	linger_unregister(lreq);
2940 }
2941 
2942 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2943 {
2944 	struct ceph_osd_client *osdc = lreq->osdc;
2945 
2946 	down_write(&osdc->lock);
2947 	if (__linger_registered(lreq))
2948 		__linger_cancel(lreq);
2949 	up_write(&osdc->lock);
2950 }
2951 
2952 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2953 
2954 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2955 {
2956 	struct ceph_osd_client *osdc = lreq->osdc;
2957 	struct ceph_osdmap *map = osdc->osdmap;
2958 
2959 	verify_osdc_wrlocked(osdc);
2960 	WARN_ON(!map->epoch);
2961 
2962 	if (lreq->register_gen) {
2963 		lreq->map_dne_bound = map->epoch;
2964 		dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2965 		     lreq, lreq->linger_id);
2966 	} else {
2967 		dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2968 		     __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2969 		     map->epoch);
2970 	}
2971 
2972 	if (lreq->map_dne_bound) {
2973 		if (map->epoch >= lreq->map_dne_bound) {
2974 			/* we had a new enough map */
2975 			pr_info("linger_id %llu pool does not exist\n",
2976 				lreq->linger_id);
2977 			linger_reg_commit_complete(lreq, -ENOENT);
2978 			__linger_cancel(lreq);
2979 		}
2980 	} else {
2981 		send_linger_map_check(lreq);
2982 	}
2983 }
2984 
2985 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2986 {
2987 	struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2988 	struct ceph_osd_linger_request *lreq;
2989 	u64 linger_id = greq->private_data;
2990 
2991 	WARN_ON(greq->result || !greq->u.newest);
2992 
2993 	down_write(&osdc->lock);
2994 	lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
2995 	if (!lreq) {
2996 		dout("%s linger_id %llu dne\n", __func__, linger_id);
2997 		goto out_unlock;
2998 	}
2999 
3000 	dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3001 	     __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3002 	     greq->u.newest);
3003 	if (!lreq->map_dne_bound)
3004 		lreq->map_dne_bound = greq->u.newest;
3005 	erase_linger_mc(&osdc->linger_map_checks, lreq);
3006 	check_linger_pool_dne(lreq);
3007 
3008 	linger_put(lreq);
3009 out_unlock:
3010 	up_write(&osdc->lock);
3011 }
3012 
3013 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3014 {
3015 	struct ceph_osd_client *osdc = lreq->osdc;
3016 	struct ceph_osd_linger_request *lookup_lreq;
3017 	int ret;
3018 
3019 	verify_osdc_wrlocked(osdc);
3020 
3021 	lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3022 				       lreq->linger_id);
3023 	if (lookup_lreq) {
3024 		WARN_ON(lookup_lreq != lreq);
3025 		return;
3026 	}
3027 
3028 	linger_get(lreq);
3029 	insert_linger_mc(&osdc->linger_map_checks, lreq);
3030 	ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3031 					  linger_map_check_cb, lreq->linger_id);
3032 	WARN_ON(ret);
3033 }
3034 
3035 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3036 {
3037 	int ret;
3038 
3039 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3040 	ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3041 	return ret ?: lreq->reg_commit_error;
3042 }
3043 
3044 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3045 {
3046 	int ret;
3047 
3048 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3049 	ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3050 	return ret ?: lreq->notify_finish_error;
3051 }
3052 
3053 /*
3054  * Timeout callback, called every N seconds.  When 1 or more OSD
3055  * requests has been active for more than N seconds, we send a keepalive
3056  * (tag + timestamp) to its OSD to ensure any communications channel
3057  * reset is detected.
3058  */
3059 static void handle_timeout(struct work_struct *work)
3060 {
3061 	struct ceph_osd_client *osdc =
3062 		container_of(work, struct ceph_osd_client, timeout_work.work);
3063 	struct ceph_options *opts = osdc->client->options;
3064 	unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3065 	unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3066 	LIST_HEAD(slow_osds);
3067 	struct rb_node *n, *p;
3068 
3069 	dout("%s osdc %p\n", __func__, osdc);
3070 	down_write(&osdc->lock);
3071 
3072 	/*
3073 	 * ping osds that are a bit slow.  this ensures that if there
3074 	 * is a break in the TCP connection we will notice, and reopen
3075 	 * a connection with that osd (from the fault callback).
3076 	 */
3077 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3078 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3079 		bool found = false;
3080 
3081 		for (p = rb_first(&osd->o_requests); p; ) {
3082 			struct ceph_osd_request *req =
3083 			    rb_entry(p, struct ceph_osd_request, r_node);
3084 
3085 			p = rb_next(p); /* abort_request() */
3086 
3087 			if (time_before(req->r_stamp, cutoff)) {
3088 				dout(" req %p tid %llu on osd%d is laggy\n",
3089 				     req, req->r_tid, osd->o_osd);
3090 				found = true;
3091 			}
3092 			if (opts->osd_request_timeout &&
3093 			    time_before(req->r_start_stamp, expiry_cutoff)) {
3094 				pr_err_ratelimited("tid %llu on osd%d timeout\n",
3095 				       req->r_tid, osd->o_osd);
3096 				abort_request(req, -ETIMEDOUT);
3097 			}
3098 		}
3099 		for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3100 			struct ceph_osd_linger_request *lreq =
3101 			    rb_entry(p, struct ceph_osd_linger_request, node);
3102 
3103 			dout(" lreq %p linger_id %llu is served by osd%d\n",
3104 			     lreq, lreq->linger_id, osd->o_osd);
3105 			found = true;
3106 
3107 			mutex_lock(&lreq->lock);
3108 			if (lreq->is_watch && lreq->committed && !lreq->last_error)
3109 				send_linger_ping(lreq);
3110 			mutex_unlock(&lreq->lock);
3111 		}
3112 
3113 		if (found)
3114 			list_move_tail(&osd->o_keepalive_item, &slow_osds);
3115 	}
3116 
3117 	if (opts->osd_request_timeout) {
3118 		for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3119 			struct ceph_osd_request *req =
3120 			    rb_entry(p, struct ceph_osd_request, r_node);
3121 
3122 			p = rb_next(p); /* abort_request() */
3123 
3124 			if (time_before(req->r_start_stamp, expiry_cutoff)) {
3125 				pr_err_ratelimited("tid %llu on osd%d timeout\n",
3126 				       req->r_tid, osdc->homeless_osd.o_osd);
3127 				abort_request(req, -ETIMEDOUT);
3128 			}
3129 		}
3130 	}
3131 
3132 	if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3133 		maybe_request_map(osdc);
3134 
3135 	while (!list_empty(&slow_osds)) {
3136 		struct ceph_osd *osd = list_first_entry(&slow_osds,
3137 							struct ceph_osd,
3138 							o_keepalive_item);
3139 		list_del_init(&osd->o_keepalive_item);
3140 		ceph_con_keepalive(&osd->o_con);
3141 	}
3142 
3143 	up_write(&osdc->lock);
3144 	schedule_delayed_work(&osdc->timeout_work,
3145 			      osdc->client->options->osd_keepalive_timeout);
3146 }
3147 
3148 static void handle_osds_timeout(struct work_struct *work)
3149 {
3150 	struct ceph_osd_client *osdc =
3151 		container_of(work, struct ceph_osd_client,
3152 			     osds_timeout_work.work);
3153 	unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3154 	struct ceph_osd *osd, *nosd;
3155 
3156 	dout("%s osdc %p\n", __func__, osdc);
3157 	down_write(&osdc->lock);
3158 	list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3159 		if (time_before(jiffies, osd->lru_ttl))
3160 			break;
3161 
3162 		WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3163 		WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3164 		close_osd(osd);
3165 	}
3166 
3167 	up_write(&osdc->lock);
3168 	schedule_delayed_work(&osdc->osds_timeout_work,
3169 			      round_jiffies_relative(delay));
3170 }
3171 
3172 static int ceph_oloc_decode(void **p, void *end,
3173 			    struct ceph_object_locator *oloc)
3174 {
3175 	u8 struct_v, struct_cv;
3176 	u32 len;
3177 	void *struct_end;
3178 	int ret = 0;
3179 
3180 	ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3181 	struct_v = ceph_decode_8(p);
3182 	struct_cv = ceph_decode_8(p);
3183 	if (struct_v < 3) {
3184 		pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3185 			struct_v, struct_cv);
3186 		goto e_inval;
3187 	}
3188 	if (struct_cv > 6) {
3189 		pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3190 			struct_v, struct_cv);
3191 		goto e_inval;
3192 	}
3193 	len = ceph_decode_32(p);
3194 	ceph_decode_need(p, end, len, e_inval);
3195 	struct_end = *p + len;
3196 
3197 	oloc->pool = ceph_decode_64(p);
3198 	*p += 4; /* skip preferred */
3199 
3200 	len = ceph_decode_32(p);
3201 	if (len > 0) {
3202 		pr_warn("ceph_object_locator::key is set\n");
3203 		goto e_inval;
3204 	}
3205 
3206 	if (struct_v >= 5) {
3207 		bool changed = false;
3208 
3209 		len = ceph_decode_32(p);
3210 		if (len > 0) {
3211 			ceph_decode_need(p, end, len, e_inval);
3212 			if (!oloc->pool_ns ||
3213 			    ceph_compare_string(oloc->pool_ns, *p, len))
3214 				changed = true;
3215 			*p += len;
3216 		} else {
3217 			if (oloc->pool_ns)
3218 				changed = true;
3219 		}
3220 		if (changed) {
3221 			/* redirect changes namespace */
3222 			pr_warn("ceph_object_locator::nspace is changed\n");
3223 			goto e_inval;
3224 		}
3225 	}
3226 
3227 	if (struct_v >= 6) {
3228 		s64 hash = ceph_decode_64(p);
3229 		if (hash != -1) {
3230 			pr_warn("ceph_object_locator::hash is set\n");
3231 			goto e_inval;
3232 		}
3233 	}
3234 
3235 	/* skip the rest */
3236 	*p = struct_end;
3237 out:
3238 	return ret;
3239 
3240 e_inval:
3241 	ret = -EINVAL;
3242 	goto out;
3243 }
3244 
3245 static int ceph_redirect_decode(void **p, void *end,
3246 				struct ceph_request_redirect *redir)
3247 {
3248 	u8 struct_v, struct_cv;
3249 	u32 len;
3250 	void *struct_end;
3251 	int ret;
3252 
3253 	ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3254 	struct_v = ceph_decode_8(p);
3255 	struct_cv = ceph_decode_8(p);
3256 	if (struct_cv > 1) {
3257 		pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3258 			struct_v, struct_cv);
3259 		goto e_inval;
3260 	}
3261 	len = ceph_decode_32(p);
3262 	ceph_decode_need(p, end, len, e_inval);
3263 	struct_end = *p + len;
3264 
3265 	ret = ceph_oloc_decode(p, end, &redir->oloc);
3266 	if (ret)
3267 		goto out;
3268 
3269 	len = ceph_decode_32(p);
3270 	if (len > 0) {
3271 		pr_warn("ceph_request_redirect::object_name is set\n");
3272 		goto e_inval;
3273 	}
3274 
3275 	len = ceph_decode_32(p);
3276 	*p += len; /* skip osd_instructions */
3277 
3278 	/* skip the rest */
3279 	*p = struct_end;
3280 out:
3281 	return ret;
3282 
3283 e_inval:
3284 	ret = -EINVAL;
3285 	goto out;
3286 }
3287 
3288 struct MOSDOpReply {
3289 	struct ceph_pg pgid;
3290 	u64 flags;
3291 	int result;
3292 	u32 epoch;
3293 	int num_ops;
3294 	u32 outdata_len[CEPH_OSD_MAX_OPS];
3295 	s32 rval[CEPH_OSD_MAX_OPS];
3296 	int retry_attempt;
3297 	struct ceph_eversion replay_version;
3298 	u64 user_version;
3299 	struct ceph_request_redirect redirect;
3300 };
3301 
3302 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3303 {
3304 	void *p = msg->front.iov_base;
3305 	void *const end = p + msg->front.iov_len;
3306 	u16 version = le16_to_cpu(msg->hdr.version);
3307 	struct ceph_eversion bad_replay_version;
3308 	u8 decode_redir;
3309 	u32 len;
3310 	int ret;
3311 	int i;
3312 
3313 	ceph_decode_32_safe(&p, end, len, e_inval);
3314 	ceph_decode_need(&p, end, len, e_inval);
3315 	p += len; /* skip oid */
3316 
3317 	ret = ceph_decode_pgid(&p, end, &m->pgid);
3318 	if (ret)
3319 		return ret;
3320 
3321 	ceph_decode_64_safe(&p, end, m->flags, e_inval);
3322 	ceph_decode_32_safe(&p, end, m->result, e_inval);
3323 	ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3324 	memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3325 	p += sizeof(bad_replay_version);
3326 	ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3327 
3328 	ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3329 	if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3330 		goto e_inval;
3331 
3332 	ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3333 			 e_inval);
3334 	for (i = 0; i < m->num_ops; i++) {
3335 		struct ceph_osd_op *op = p;
3336 
3337 		m->outdata_len[i] = le32_to_cpu(op->payload_len);
3338 		p += sizeof(*op);
3339 	}
3340 
3341 	ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3342 	for (i = 0; i < m->num_ops; i++)
3343 		ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3344 
3345 	if (version >= 5) {
3346 		ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3347 		memcpy(&m->replay_version, p, sizeof(m->replay_version));
3348 		p += sizeof(m->replay_version);
3349 		ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3350 	} else {
3351 		m->replay_version = bad_replay_version; /* struct */
3352 		m->user_version = le64_to_cpu(m->replay_version.version);
3353 	}
3354 
3355 	if (version >= 6) {
3356 		if (version >= 7)
3357 			ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3358 		else
3359 			decode_redir = 1;
3360 	} else {
3361 		decode_redir = 0;
3362 	}
3363 
3364 	if (decode_redir) {
3365 		ret = ceph_redirect_decode(&p, end, &m->redirect);
3366 		if (ret)
3367 			return ret;
3368 	} else {
3369 		ceph_oloc_init(&m->redirect.oloc);
3370 	}
3371 
3372 	return 0;
3373 
3374 e_inval:
3375 	return -EINVAL;
3376 }
3377 
3378 /*
3379  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
3380  * specified.
3381  */
3382 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3383 {
3384 	struct ceph_osd_client *osdc = osd->o_osdc;
3385 	struct ceph_osd_request *req;
3386 	struct MOSDOpReply m;
3387 	u64 tid = le64_to_cpu(msg->hdr.tid);
3388 	u32 data_len = 0;
3389 	int ret;
3390 	int i;
3391 
3392 	dout("%s msg %p tid %llu\n", __func__, msg, tid);
3393 
3394 	down_read(&osdc->lock);
3395 	if (!osd_registered(osd)) {
3396 		dout("%s osd%d unknown\n", __func__, osd->o_osd);
3397 		goto out_unlock_osdc;
3398 	}
3399 	WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3400 
3401 	mutex_lock(&osd->lock);
3402 	req = lookup_request(&osd->o_requests, tid);
3403 	if (!req) {
3404 		dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3405 		goto out_unlock_session;
3406 	}
3407 
3408 	m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3409 	ret = decode_MOSDOpReply(msg, &m);
3410 	m.redirect.oloc.pool_ns = NULL;
3411 	if (ret) {
3412 		pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3413 		       req->r_tid, ret);
3414 		ceph_msg_dump(msg);
3415 		goto fail_request;
3416 	}
3417 	dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3418 	     __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3419 	     m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3420 	     le64_to_cpu(m.replay_version.version), m.user_version);
3421 
3422 	if (m.retry_attempt >= 0) {
3423 		if (m.retry_attempt != req->r_attempts - 1) {
3424 			dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3425 			     req, req->r_tid, m.retry_attempt,
3426 			     req->r_attempts - 1);
3427 			goto out_unlock_session;
3428 		}
3429 	} else {
3430 		WARN_ON(1); /* MOSDOpReply v4 is assumed */
3431 	}
3432 
3433 	if (!ceph_oloc_empty(&m.redirect.oloc)) {
3434 		dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3435 		     m.redirect.oloc.pool);
3436 		unlink_request(osd, req);
3437 		mutex_unlock(&osd->lock);
3438 
3439 		/*
3440 		 * Not ceph_oloc_copy() - changing pool_ns is not
3441 		 * supported.
3442 		 */
3443 		req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3444 		req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
3445 		req->r_tid = 0;
3446 		__submit_request(req, false);
3447 		goto out_unlock_osdc;
3448 	}
3449 
3450 	if (m.num_ops != req->r_num_ops) {
3451 		pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3452 		       req->r_num_ops, req->r_tid);
3453 		goto fail_request;
3454 	}
3455 	for (i = 0; i < req->r_num_ops; i++) {
3456 		dout(" req %p tid %llu op %d rval %d len %u\n", req,
3457 		     req->r_tid, i, m.rval[i], m.outdata_len[i]);
3458 		req->r_ops[i].rval = m.rval[i];
3459 		req->r_ops[i].outdata_len = m.outdata_len[i];
3460 		data_len += m.outdata_len[i];
3461 	}
3462 	if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3463 		pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3464 		       le32_to_cpu(msg->hdr.data_len), req->r_tid);
3465 		goto fail_request;
3466 	}
3467 	dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3468 	     req, req->r_tid, m.result, data_len);
3469 
3470 	/*
3471 	 * Since we only ever request ONDISK, we should only ever get
3472 	 * one (type of) reply back.
3473 	 */
3474 	WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3475 	req->r_result = m.result ?: data_len;
3476 	finish_request(req);
3477 	mutex_unlock(&osd->lock);
3478 	up_read(&osdc->lock);
3479 
3480 	__complete_request(req);
3481 	complete_all(&req->r_completion);
3482 	ceph_osdc_put_request(req);
3483 	return;
3484 
3485 fail_request:
3486 	complete_request(req, -EIO);
3487 out_unlock_session:
3488 	mutex_unlock(&osd->lock);
3489 out_unlock_osdc:
3490 	up_read(&osdc->lock);
3491 }
3492 
3493 static void set_pool_was_full(struct ceph_osd_client *osdc)
3494 {
3495 	struct rb_node *n;
3496 
3497 	for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3498 		struct ceph_pg_pool_info *pi =
3499 		    rb_entry(n, struct ceph_pg_pool_info, node);
3500 
3501 		pi->was_full = __pool_full(pi);
3502 	}
3503 }
3504 
3505 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3506 {
3507 	struct ceph_pg_pool_info *pi;
3508 
3509 	pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3510 	if (!pi)
3511 		return false;
3512 
3513 	return pi->was_full && !__pool_full(pi);
3514 }
3515 
3516 static enum calc_target_result
3517 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3518 {
3519 	struct ceph_osd_client *osdc = lreq->osdc;
3520 	enum calc_target_result ct_res;
3521 
3522 	ct_res = calc_target(osdc, &lreq->t, NULL, true);
3523 	if (ct_res == CALC_TARGET_NEED_RESEND) {
3524 		struct ceph_osd *osd;
3525 
3526 		osd = lookup_create_osd(osdc, lreq->t.osd, true);
3527 		if (osd != lreq->osd) {
3528 			unlink_linger(lreq->osd, lreq);
3529 			link_linger(osd, lreq);
3530 		}
3531 	}
3532 
3533 	return ct_res;
3534 }
3535 
3536 /*
3537  * Requeue requests whose mapping to an OSD has changed.
3538  */
3539 static void scan_requests(struct ceph_osd *osd,
3540 			  bool force_resend,
3541 			  bool cleared_full,
3542 			  bool check_pool_cleared_full,
3543 			  struct rb_root *need_resend,
3544 			  struct list_head *need_resend_linger)
3545 {
3546 	struct ceph_osd_client *osdc = osd->o_osdc;
3547 	struct rb_node *n;
3548 	bool force_resend_writes;
3549 
3550 	for (n = rb_first(&osd->o_linger_requests); n; ) {
3551 		struct ceph_osd_linger_request *lreq =
3552 		    rb_entry(n, struct ceph_osd_linger_request, node);
3553 		enum calc_target_result ct_res;
3554 
3555 		n = rb_next(n); /* recalc_linger_target() */
3556 
3557 		dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3558 		     lreq->linger_id);
3559 		ct_res = recalc_linger_target(lreq);
3560 		switch (ct_res) {
3561 		case CALC_TARGET_NO_ACTION:
3562 			force_resend_writes = cleared_full ||
3563 			    (check_pool_cleared_full &&
3564 			     pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3565 			if (!force_resend && !force_resend_writes)
3566 				break;
3567 
3568 			/* fall through */
3569 		case CALC_TARGET_NEED_RESEND:
3570 			cancel_linger_map_check(lreq);
3571 			/*
3572 			 * scan_requests() for the previous epoch(s)
3573 			 * may have already added it to the list, since
3574 			 * it's not unlinked here.
3575 			 */
3576 			if (list_empty(&lreq->scan_item))
3577 				list_add_tail(&lreq->scan_item, need_resend_linger);
3578 			break;
3579 		case CALC_TARGET_POOL_DNE:
3580 			list_del_init(&lreq->scan_item);
3581 			check_linger_pool_dne(lreq);
3582 			break;
3583 		}
3584 	}
3585 
3586 	for (n = rb_first(&osd->o_requests); n; ) {
3587 		struct ceph_osd_request *req =
3588 		    rb_entry(n, struct ceph_osd_request, r_node);
3589 		enum calc_target_result ct_res;
3590 
3591 		n = rb_next(n); /* unlink_request(), check_pool_dne() */
3592 
3593 		dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3594 		ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con,
3595 				     false);
3596 		switch (ct_res) {
3597 		case CALC_TARGET_NO_ACTION:
3598 			force_resend_writes = cleared_full ||
3599 			    (check_pool_cleared_full &&
3600 			     pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3601 			if (!force_resend &&
3602 			    (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3603 			     !force_resend_writes))
3604 				break;
3605 
3606 			/* fall through */
3607 		case CALC_TARGET_NEED_RESEND:
3608 			cancel_map_check(req);
3609 			unlink_request(osd, req);
3610 			insert_request(need_resend, req);
3611 			break;
3612 		case CALC_TARGET_POOL_DNE:
3613 			check_pool_dne(req);
3614 			break;
3615 		}
3616 	}
3617 }
3618 
3619 static int handle_one_map(struct ceph_osd_client *osdc,
3620 			  void *p, void *end, bool incremental,
3621 			  struct rb_root *need_resend,
3622 			  struct list_head *need_resend_linger)
3623 {
3624 	struct ceph_osdmap *newmap;
3625 	struct rb_node *n;
3626 	bool skipped_map = false;
3627 	bool was_full;
3628 
3629 	was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3630 	set_pool_was_full(osdc);
3631 
3632 	if (incremental)
3633 		newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3634 	else
3635 		newmap = ceph_osdmap_decode(&p, end);
3636 	if (IS_ERR(newmap))
3637 		return PTR_ERR(newmap);
3638 
3639 	if (newmap != osdc->osdmap) {
3640 		/*
3641 		 * Preserve ->was_full before destroying the old map.
3642 		 * For pools that weren't in the old map, ->was_full
3643 		 * should be false.
3644 		 */
3645 		for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3646 			struct ceph_pg_pool_info *pi =
3647 			    rb_entry(n, struct ceph_pg_pool_info, node);
3648 			struct ceph_pg_pool_info *old_pi;
3649 
3650 			old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3651 			if (old_pi)
3652 				pi->was_full = old_pi->was_full;
3653 			else
3654 				WARN_ON(pi->was_full);
3655 		}
3656 
3657 		if (osdc->osdmap->epoch &&
3658 		    osdc->osdmap->epoch + 1 < newmap->epoch) {
3659 			WARN_ON(incremental);
3660 			skipped_map = true;
3661 		}
3662 
3663 		ceph_osdmap_destroy(osdc->osdmap);
3664 		osdc->osdmap = newmap;
3665 	}
3666 
3667 	was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3668 	scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3669 		      need_resend, need_resend_linger);
3670 
3671 	for (n = rb_first(&osdc->osds); n; ) {
3672 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3673 
3674 		n = rb_next(n); /* close_osd() */
3675 
3676 		scan_requests(osd, skipped_map, was_full, true, need_resend,
3677 			      need_resend_linger);
3678 		if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3679 		    memcmp(&osd->o_con.peer_addr,
3680 			   ceph_osd_addr(osdc->osdmap, osd->o_osd),
3681 			   sizeof(struct ceph_entity_addr)))
3682 			close_osd(osd);
3683 	}
3684 
3685 	return 0;
3686 }
3687 
3688 static void kick_requests(struct ceph_osd_client *osdc,
3689 			  struct rb_root *need_resend,
3690 			  struct list_head *need_resend_linger)
3691 {
3692 	struct ceph_osd_linger_request *lreq, *nlreq;
3693 	enum calc_target_result ct_res;
3694 	struct rb_node *n;
3695 
3696 	/* make sure need_resend targets reflect latest map */
3697 	for (n = rb_first(need_resend); n; ) {
3698 		struct ceph_osd_request *req =
3699 		    rb_entry(n, struct ceph_osd_request, r_node);
3700 
3701 		n = rb_next(n);
3702 
3703 		if (req->r_t.epoch < osdc->osdmap->epoch) {
3704 			ct_res = calc_target(osdc, &req->r_t, NULL, false);
3705 			if (ct_res == CALC_TARGET_POOL_DNE) {
3706 				erase_request(need_resend, req);
3707 				check_pool_dne(req);
3708 			}
3709 		}
3710 	}
3711 
3712 	for (n = rb_first(need_resend); n; ) {
3713 		struct ceph_osd_request *req =
3714 		    rb_entry(n, struct ceph_osd_request, r_node);
3715 		struct ceph_osd *osd;
3716 
3717 		n = rb_next(n);
3718 		erase_request(need_resend, req); /* before link_request() */
3719 
3720 		osd = lookup_create_osd(osdc, req->r_t.osd, true);
3721 		link_request(osd, req);
3722 		if (!req->r_linger) {
3723 			if (!osd_homeless(osd) && !req->r_t.paused)
3724 				send_request(req);
3725 		} else {
3726 			cancel_linger_request(req);
3727 		}
3728 	}
3729 
3730 	list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3731 		if (!osd_homeless(lreq->osd))
3732 			send_linger(lreq);
3733 
3734 		list_del_init(&lreq->scan_item);
3735 	}
3736 }
3737 
3738 /*
3739  * Process updated osd map.
3740  *
3741  * The message contains any number of incremental and full maps, normally
3742  * indicating some sort of topology change in the cluster.  Kick requests
3743  * off to different OSDs as needed.
3744  */
3745 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3746 {
3747 	void *p = msg->front.iov_base;
3748 	void *const end = p + msg->front.iov_len;
3749 	u32 nr_maps, maplen;
3750 	u32 epoch;
3751 	struct ceph_fsid fsid;
3752 	struct rb_root need_resend = RB_ROOT;
3753 	LIST_HEAD(need_resend_linger);
3754 	bool handled_incremental = false;
3755 	bool was_pauserd, was_pausewr;
3756 	bool pauserd, pausewr;
3757 	int err;
3758 
3759 	dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3760 	down_write(&osdc->lock);
3761 
3762 	/* verify fsid */
3763 	ceph_decode_need(&p, end, sizeof(fsid), bad);
3764 	ceph_decode_copy(&p, &fsid, sizeof(fsid));
3765 	if (ceph_check_fsid(osdc->client, &fsid) < 0)
3766 		goto bad;
3767 
3768 	was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3769 	was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3770 		      ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3771 		      have_pool_full(osdc);
3772 
3773 	/* incremental maps */
3774 	ceph_decode_32_safe(&p, end, nr_maps, bad);
3775 	dout(" %d inc maps\n", nr_maps);
3776 	while (nr_maps > 0) {
3777 		ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3778 		epoch = ceph_decode_32(&p);
3779 		maplen = ceph_decode_32(&p);
3780 		ceph_decode_need(&p, end, maplen, bad);
3781 		if (osdc->osdmap->epoch &&
3782 		    osdc->osdmap->epoch + 1 == epoch) {
3783 			dout("applying incremental map %u len %d\n",
3784 			     epoch, maplen);
3785 			err = handle_one_map(osdc, p, p + maplen, true,
3786 					     &need_resend, &need_resend_linger);
3787 			if (err)
3788 				goto bad;
3789 			handled_incremental = true;
3790 		} else {
3791 			dout("ignoring incremental map %u len %d\n",
3792 			     epoch, maplen);
3793 		}
3794 		p += maplen;
3795 		nr_maps--;
3796 	}
3797 	if (handled_incremental)
3798 		goto done;
3799 
3800 	/* full maps */
3801 	ceph_decode_32_safe(&p, end, nr_maps, bad);
3802 	dout(" %d full maps\n", nr_maps);
3803 	while (nr_maps) {
3804 		ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3805 		epoch = ceph_decode_32(&p);
3806 		maplen = ceph_decode_32(&p);
3807 		ceph_decode_need(&p, end, maplen, bad);
3808 		if (nr_maps > 1) {
3809 			dout("skipping non-latest full map %u len %d\n",
3810 			     epoch, maplen);
3811 		} else if (osdc->osdmap->epoch >= epoch) {
3812 			dout("skipping full map %u len %d, "
3813 			     "older than our %u\n", epoch, maplen,
3814 			     osdc->osdmap->epoch);
3815 		} else {
3816 			dout("taking full map %u len %d\n", epoch, maplen);
3817 			err = handle_one_map(osdc, p, p + maplen, false,
3818 					     &need_resend, &need_resend_linger);
3819 			if (err)
3820 				goto bad;
3821 		}
3822 		p += maplen;
3823 		nr_maps--;
3824 	}
3825 
3826 done:
3827 	/*
3828 	 * subscribe to subsequent osdmap updates if full to ensure
3829 	 * we find out when we are no longer full and stop returning
3830 	 * ENOSPC.
3831 	 */
3832 	pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3833 	pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3834 		  ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3835 		  have_pool_full(osdc);
3836 	if (was_pauserd || was_pausewr || pauserd || pausewr ||
3837 	    osdc->osdmap->epoch < osdc->epoch_barrier)
3838 		maybe_request_map(osdc);
3839 
3840 	kick_requests(osdc, &need_resend, &need_resend_linger);
3841 
3842 	ceph_osdc_abort_on_full(osdc);
3843 	ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3844 			  osdc->osdmap->epoch);
3845 	up_write(&osdc->lock);
3846 	wake_up_all(&osdc->client->auth_wq);
3847 	return;
3848 
3849 bad:
3850 	pr_err("osdc handle_map corrupt msg\n");
3851 	ceph_msg_dump(msg);
3852 	up_write(&osdc->lock);
3853 }
3854 
3855 /*
3856  * Resubmit requests pending on the given osd.
3857  */
3858 static void kick_osd_requests(struct ceph_osd *osd)
3859 {
3860 	struct rb_node *n;
3861 
3862 	clear_backoffs(osd);
3863 
3864 	for (n = rb_first(&osd->o_requests); n; ) {
3865 		struct ceph_osd_request *req =
3866 		    rb_entry(n, struct ceph_osd_request, r_node);
3867 
3868 		n = rb_next(n); /* cancel_linger_request() */
3869 
3870 		if (!req->r_linger) {
3871 			if (!req->r_t.paused)
3872 				send_request(req);
3873 		} else {
3874 			cancel_linger_request(req);
3875 		}
3876 	}
3877 	for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3878 		struct ceph_osd_linger_request *lreq =
3879 		    rb_entry(n, struct ceph_osd_linger_request, node);
3880 
3881 		send_linger(lreq);
3882 	}
3883 }
3884 
3885 /*
3886  * If the osd connection drops, we need to resubmit all requests.
3887  */
3888 static void osd_fault(struct ceph_connection *con)
3889 {
3890 	struct ceph_osd *osd = con->private;
3891 	struct ceph_osd_client *osdc = osd->o_osdc;
3892 
3893 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3894 
3895 	down_write(&osdc->lock);
3896 	if (!osd_registered(osd)) {
3897 		dout("%s osd%d unknown\n", __func__, osd->o_osd);
3898 		goto out_unlock;
3899 	}
3900 
3901 	if (!reopen_osd(osd))
3902 		kick_osd_requests(osd);
3903 	maybe_request_map(osdc);
3904 
3905 out_unlock:
3906 	up_write(&osdc->lock);
3907 }
3908 
3909 struct MOSDBackoff {
3910 	struct ceph_spg spgid;
3911 	u32 map_epoch;
3912 	u8 op;
3913 	u64 id;
3914 	struct ceph_hobject_id *begin;
3915 	struct ceph_hobject_id *end;
3916 };
3917 
3918 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
3919 {
3920 	void *p = msg->front.iov_base;
3921 	void *const end = p + msg->front.iov_len;
3922 	u8 struct_v;
3923 	u32 struct_len;
3924 	int ret;
3925 
3926 	ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
3927 	if (ret)
3928 		return ret;
3929 
3930 	ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
3931 	if (ret)
3932 		return ret;
3933 
3934 	ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
3935 	ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
3936 	ceph_decode_8_safe(&p, end, m->op, e_inval);
3937 	ceph_decode_64_safe(&p, end, m->id, e_inval);
3938 
3939 	m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
3940 	if (!m->begin)
3941 		return -ENOMEM;
3942 
3943 	ret = decode_hoid(&p, end, m->begin);
3944 	if (ret) {
3945 		free_hoid(m->begin);
3946 		return ret;
3947 	}
3948 
3949 	m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
3950 	if (!m->end) {
3951 		free_hoid(m->begin);
3952 		return -ENOMEM;
3953 	}
3954 
3955 	ret = decode_hoid(&p, end, m->end);
3956 	if (ret) {
3957 		free_hoid(m->begin);
3958 		free_hoid(m->end);
3959 		return ret;
3960 	}
3961 
3962 	return 0;
3963 
3964 e_inval:
3965 	return -EINVAL;
3966 }
3967 
3968 static struct ceph_msg *create_backoff_message(
3969 				const struct ceph_osd_backoff *backoff,
3970 				u32 map_epoch)
3971 {
3972 	struct ceph_msg *msg;
3973 	void *p, *end;
3974 	int msg_size;
3975 
3976 	msg_size = CEPH_ENCODING_START_BLK_LEN +
3977 			CEPH_PGID_ENCODING_LEN + 1; /* spgid */
3978 	msg_size += 4 + 1 + 8; /* map_epoch, op, id */
3979 	msg_size += CEPH_ENCODING_START_BLK_LEN +
3980 			hoid_encoding_size(backoff->begin);
3981 	msg_size += CEPH_ENCODING_START_BLK_LEN +
3982 			hoid_encoding_size(backoff->end);
3983 
3984 	msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
3985 	if (!msg)
3986 		return NULL;
3987 
3988 	p = msg->front.iov_base;
3989 	end = p + msg->front_alloc_len;
3990 
3991 	encode_spgid(&p, &backoff->spgid);
3992 	ceph_encode_32(&p, map_epoch);
3993 	ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
3994 	ceph_encode_64(&p, backoff->id);
3995 	encode_hoid(&p, end, backoff->begin);
3996 	encode_hoid(&p, end, backoff->end);
3997 	BUG_ON(p != end);
3998 
3999 	msg->front.iov_len = p - msg->front.iov_base;
4000 	msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4001 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4002 
4003 	return msg;
4004 }
4005 
4006 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4007 {
4008 	struct ceph_spg_mapping *spg;
4009 	struct ceph_osd_backoff *backoff;
4010 	struct ceph_msg *msg;
4011 
4012 	dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4013 	     m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4014 
4015 	spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4016 	if (!spg) {
4017 		spg = alloc_spg_mapping();
4018 		if (!spg) {
4019 			pr_err("%s failed to allocate spg\n", __func__);
4020 			return;
4021 		}
4022 		spg->spgid = m->spgid; /* struct */
4023 		insert_spg_mapping(&osd->o_backoff_mappings, spg);
4024 	}
4025 
4026 	backoff = alloc_backoff();
4027 	if (!backoff) {
4028 		pr_err("%s failed to allocate backoff\n", __func__);
4029 		return;
4030 	}
4031 	backoff->spgid = m->spgid; /* struct */
4032 	backoff->id = m->id;
4033 	backoff->begin = m->begin;
4034 	m->begin = NULL; /* backoff now owns this */
4035 	backoff->end = m->end;
4036 	m->end = NULL;   /* ditto */
4037 
4038 	insert_backoff(&spg->backoffs, backoff);
4039 	insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4040 
4041 	/*
4042 	 * Ack with original backoff's epoch so that the OSD can
4043 	 * discard this if there was a PG split.
4044 	 */
4045 	msg = create_backoff_message(backoff, m->map_epoch);
4046 	if (!msg) {
4047 		pr_err("%s failed to allocate msg\n", __func__);
4048 		return;
4049 	}
4050 	ceph_con_send(&osd->o_con, msg);
4051 }
4052 
4053 static bool target_contained_by(const struct ceph_osd_request_target *t,
4054 				const struct ceph_hobject_id *begin,
4055 				const struct ceph_hobject_id *end)
4056 {
4057 	struct ceph_hobject_id hoid;
4058 	int cmp;
4059 
4060 	hoid_fill_from_target(&hoid, t);
4061 	cmp = hoid_compare(&hoid, begin);
4062 	return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4063 }
4064 
4065 static void handle_backoff_unblock(struct ceph_osd *osd,
4066 				   const struct MOSDBackoff *m)
4067 {
4068 	struct ceph_spg_mapping *spg;
4069 	struct ceph_osd_backoff *backoff;
4070 	struct rb_node *n;
4071 
4072 	dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4073 	     m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4074 
4075 	backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4076 	if (!backoff) {
4077 		pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4078 		       __func__, osd->o_osd, m->spgid.pgid.pool,
4079 		       m->spgid.pgid.seed, m->spgid.shard, m->id);
4080 		return;
4081 	}
4082 
4083 	if (hoid_compare(backoff->begin, m->begin) &&
4084 	    hoid_compare(backoff->end, m->end)) {
4085 		pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4086 		       __func__, osd->o_osd, m->spgid.pgid.pool,
4087 		       m->spgid.pgid.seed, m->spgid.shard, m->id);
4088 		/* unblock it anyway... */
4089 	}
4090 
4091 	spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4092 	BUG_ON(!spg);
4093 
4094 	erase_backoff(&spg->backoffs, backoff);
4095 	erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4096 	free_backoff(backoff);
4097 
4098 	if (RB_EMPTY_ROOT(&spg->backoffs)) {
4099 		erase_spg_mapping(&osd->o_backoff_mappings, spg);
4100 		free_spg_mapping(spg);
4101 	}
4102 
4103 	for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4104 		struct ceph_osd_request *req =
4105 		    rb_entry(n, struct ceph_osd_request, r_node);
4106 
4107 		if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4108 			/*
4109 			 * Match against @m, not @backoff -- the PG may
4110 			 * have split on the OSD.
4111 			 */
4112 			if (target_contained_by(&req->r_t, m->begin, m->end)) {
4113 				/*
4114 				 * If no other installed backoff applies,
4115 				 * resend.
4116 				 */
4117 				send_request(req);
4118 			}
4119 		}
4120 	}
4121 }
4122 
4123 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4124 {
4125 	struct ceph_osd_client *osdc = osd->o_osdc;
4126 	struct MOSDBackoff m;
4127 	int ret;
4128 
4129 	down_read(&osdc->lock);
4130 	if (!osd_registered(osd)) {
4131 		dout("%s osd%d unknown\n", __func__, osd->o_osd);
4132 		up_read(&osdc->lock);
4133 		return;
4134 	}
4135 	WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4136 
4137 	mutex_lock(&osd->lock);
4138 	ret = decode_MOSDBackoff(msg, &m);
4139 	if (ret) {
4140 		pr_err("failed to decode MOSDBackoff: %d\n", ret);
4141 		ceph_msg_dump(msg);
4142 		goto out_unlock;
4143 	}
4144 
4145 	switch (m.op) {
4146 	case CEPH_OSD_BACKOFF_OP_BLOCK:
4147 		handle_backoff_block(osd, &m);
4148 		break;
4149 	case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4150 		handle_backoff_unblock(osd, &m);
4151 		break;
4152 	default:
4153 		pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4154 	}
4155 
4156 	free_hoid(m.begin);
4157 	free_hoid(m.end);
4158 
4159 out_unlock:
4160 	mutex_unlock(&osd->lock);
4161 	up_read(&osdc->lock);
4162 }
4163 
4164 /*
4165  * Process osd watch notifications
4166  */
4167 static void handle_watch_notify(struct ceph_osd_client *osdc,
4168 				struct ceph_msg *msg)
4169 {
4170 	void *p = msg->front.iov_base;
4171 	void *const end = p + msg->front.iov_len;
4172 	struct ceph_osd_linger_request *lreq;
4173 	struct linger_work *lwork;
4174 	u8 proto_ver, opcode;
4175 	u64 cookie, notify_id;
4176 	u64 notifier_id = 0;
4177 	s32 return_code = 0;
4178 	void *payload = NULL;
4179 	u32 payload_len = 0;
4180 
4181 	ceph_decode_8_safe(&p, end, proto_ver, bad);
4182 	ceph_decode_8_safe(&p, end, opcode, bad);
4183 	ceph_decode_64_safe(&p, end, cookie, bad);
4184 	p += 8; /* skip ver */
4185 	ceph_decode_64_safe(&p, end, notify_id, bad);
4186 
4187 	if (proto_ver >= 1) {
4188 		ceph_decode_32_safe(&p, end, payload_len, bad);
4189 		ceph_decode_need(&p, end, payload_len, bad);
4190 		payload = p;
4191 		p += payload_len;
4192 	}
4193 
4194 	if (le16_to_cpu(msg->hdr.version) >= 2)
4195 		ceph_decode_32_safe(&p, end, return_code, bad);
4196 
4197 	if (le16_to_cpu(msg->hdr.version) >= 3)
4198 		ceph_decode_64_safe(&p, end, notifier_id, bad);
4199 
4200 	down_read(&osdc->lock);
4201 	lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4202 	if (!lreq) {
4203 		dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4204 		     cookie);
4205 		goto out_unlock_osdc;
4206 	}
4207 
4208 	mutex_lock(&lreq->lock);
4209 	dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4210 	     opcode, cookie, lreq, lreq->is_watch);
4211 	if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4212 		if (!lreq->last_error) {
4213 			lreq->last_error = -ENOTCONN;
4214 			queue_watch_error(lreq);
4215 		}
4216 	} else if (!lreq->is_watch) {
4217 		/* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4218 		if (lreq->notify_id && lreq->notify_id != notify_id) {
4219 			dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4220 			     lreq->notify_id, notify_id);
4221 		} else if (!completion_done(&lreq->notify_finish_wait)) {
4222 			struct ceph_msg_data *data =
4223 			    list_first_entry_or_null(&msg->data,
4224 						     struct ceph_msg_data,
4225 						     links);
4226 
4227 			if (data) {
4228 				if (lreq->preply_pages) {
4229 					WARN_ON(data->type !=
4230 							CEPH_MSG_DATA_PAGES);
4231 					*lreq->preply_pages = data->pages;
4232 					*lreq->preply_len = data->length;
4233 				} else {
4234 					ceph_release_page_vector(data->pages,
4235 					       calc_pages_for(0, data->length));
4236 				}
4237 			}
4238 			lreq->notify_finish_error = return_code;
4239 			complete_all(&lreq->notify_finish_wait);
4240 		}
4241 	} else {
4242 		/* CEPH_WATCH_EVENT_NOTIFY */
4243 		lwork = lwork_alloc(lreq, do_watch_notify);
4244 		if (!lwork) {
4245 			pr_err("failed to allocate notify-lwork\n");
4246 			goto out_unlock_lreq;
4247 		}
4248 
4249 		lwork->notify.notify_id = notify_id;
4250 		lwork->notify.notifier_id = notifier_id;
4251 		lwork->notify.payload = payload;
4252 		lwork->notify.payload_len = payload_len;
4253 		lwork->notify.msg = ceph_msg_get(msg);
4254 		lwork_queue(lwork);
4255 	}
4256 
4257 out_unlock_lreq:
4258 	mutex_unlock(&lreq->lock);
4259 out_unlock_osdc:
4260 	up_read(&osdc->lock);
4261 	return;
4262 
4263 bad:
4264 	pr_err("osdc handle_watch_notify corrupt msg\n");
4265 }
4266 
4267 /*
4268  * Register request, send initial attempt.
4269  */
4270 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4271 			    struct ceph_osd_request *req,
4272 			    bool nofail)
4273 {
4274 	down_read(&osdc->lock);
4275 	submit_request(req, false);
4276 	up_read(&osdc->lock);
4277 
4278 	return 0;
4279 }
4280 EXPORT_SYMBOL(ceph_osdc_start_request);
4281 
4282 /*
4283  * Unregister a registered request.  The request is not completed:
4284  * ->r_result isn't set and __complete_request() isn't called.
4285  */
4286 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4287 {
4288 	struct ceph_osd_client *osdc = req->r_osdc;
4289 
4290 	down_write(&osdc->lock);
4291 	if (req->r_osd)
4292 		cancel_request(req);
4293 	up_write(&osdc->lock);
4294 }
4295 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4296 
4297 /*
4298  * @timeout: in jiffies, 0 means "wait forever"
4299  */
4300 static int wait_request_timeout(struct ceph_osd_request *req,
4301 				unsigned long timeout)
4302 {
4303 	long left;
4304 
4305 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4306 	left = wait_for_completion_killable_timeout(&req->r_completion,
4307 						ceph_timeout_jiffies(timeout));
4308 	if (left <= 0) {
4309 		left = left ?: -ETIMEDOUT;
4310 		ceph_osdc_cancel_request(req);
4311 	} else {
4312 		left = req->r_result; /* completed */
4313 	}
4314 
4315 	return left;
4316 }
4317 
4318 /*
4319  * wait for a request to complete
4320  */
4321 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4322 			   struct ceph_osd_request *req)
4323 {
4324 	return wait_request_timeout(req, 0);
4325 }
4326 EXPORT_SYMBOL(ceph_osdc_wait_request);
4327 
4328 /*
4329  * sync - wait for all in-flight requests to flush.  avoid starvation.
4330  */
4331 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4332 {
4333 	struct rb_node *n, *p;
4334 	u64 last_tid = atomic64_read(&osdc->last_tid);
4335 
4336 again:
4337 	down_read(&osdc->lock);
4338 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4339 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4340 
4341 		mutex_lock(&osd->lock);
4342 		for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4343 			struct ceph_osd_request *req =
4344 			    rb_entry(p, struct ceph_osd_request, r_node);
4345 
4346 			if (req->r_tid > last_tid)
4347 				break;
4348 
4349 			if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4350 				continue;
4351 
4352 			ceph_osdc_get_request(req);
4353 			mutex_unlock(&osd->lock);
4354 			up_read(&osdc->lock);
4355 			dout("%s waiting on req %p tid %llu last_tid %llu\n",
4356 			     __func__, req, req->r_tid, last_tid);
4357 			wait_for_completion(&req->r_completion);
4358 			ceph_osdc_put_request(req);
4359 			goto again;
4360 		}
4361 
4362 		mutex_unlock(&osd->lock);
4363 	}
4364 
4365 	up_read(&osdc->lock);
4366 	dout("%s done last_tid %llu\n", __func__, last_tid);
4367 }
4368 EXPORT_SYMBOL(ceph_osdc_sync);
4369 
4370 static struct ceph_osd_request *
4371 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4372 {
4373 	struct ceph_osd_request *req;
4374 
4375 	req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4376 	if (!req)
4377 		return NULL;
4378 
4379 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4380 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4381 
4382 	if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4383 		ceph_osdc_put_request(req);
4384 		return NULL;
4385 	}
4386 
4387 	return req;
4388 }
4389 
4390 /*
4391  * Returns a handle, caller owns a ref.
4392  */
4393 struct ceph_osd_linger_request *
4394 ceph_osdc_watch(struct ceph_osd_client *osdc,
4395 		struct ceph_object_id *oid,
4396 		struct ceph_object_locator *oloc,
4397 		rados_watchcb2_t wcb,
4398 		rados_watcherrcb_t errcb,
4399 		void *data)
4400 {
4401 	struct ceph_osd_linger_request *lreq;
4402 	int ret;
4403 
4404 	lreq = linger_alloc(osdc);
4405 	if (!lreq)
4406 		return ERR_PTR(-ENOMEM);
4407 
4408 	lreq->is_watch = true;
4409 	lreq->wcb = wcb;
4410 	lreq->errcb = errcb;
4411 	lreq->data = data;
4412 	lreq->watch_valid_thru = jiffies;
4413 
4414 	ceph_oid_copy(&lreq->t.base_oid, oid);
4415 	ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4416 	lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4417 	ktime_get_real_ts(&lreq->mtime);
4418 
4419 	lreq->reg_req = alloc_linger_request(lreq);
4420 	if (!lreq->reg_req) {
4421 		ret = -ENOMEM;
4422 		goto err_put_lreq;
4423 	}
4424 
4425 	lreq->ping_req = alloc_linger_request(lreq);
4426 	if (!lreq->ping_req) {
4427 		ret = -ENOMEM;
4428 		goto err_put_lreq;
4429 	}
4430 
4431 	down_write(&osdc->lock);
4432 	linger_register(lreq); /* before osd_req_op_* */
4433 	osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
4434 			      CEPH_OSD_WATCH_OP_WATCH);
4435 	osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
4436 			      CEPH_OSD_WATCH_OP_PING);
4437 	linger_submit(lreq);
4438 	up_write(&osdc->lock);
4439 
4440 	ret = linger_reg_commit_wait(lreq);
4441 	if (ret) {
4442 		linger_cancel(lreq);
4443 		goto err_put_lreq;
4444 	}
4445 
4446 	return lreq;
4447 
4448 err_put_lreq:
4449 	linger_put(lreq);
4450 	return ERR_PTR(ret);
4451 }
4452 EXPORT_SYMBOL(ceph_osdc_watch);
4453 
4454 /*
4455  * Releases a ref.
4456  *
4457  * Times out after mount_timeout to preserve rbd unmap behaviour
4458  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4459  * with mount_timeout").
4460  */
4461 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4462 		      struct ceph_osd_linger_request *lreq)
4463 {
4464 	struct ceph_options *opts = osdc->client->options;
4465 	struct ceph_osd_request *req;
4466 	int ret;
4467 
4468 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4469 	if (!req)
4470 		return -ENOMEM;
4471 
4472 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4473 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4474 	req->r_flags = CEPH_OSD_FLAG_WRITE;
4475 	ktime_get_real_ts(&req->r_mtime);
4476 	osd_req_op_watch_init(req, 0, lreq->linger_id,
4477 			      CEPH_OSD_WATCH_OP_UNWATCH);
4478 
4479 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4480 	if (ret)
4481 		goto out_put_req;
4482 
4483 	ceph_osdc_start_request(osdc, req, false);
4484 	linger_cancel(lreq);
4485 	linger_put(lreq);
4486 	ret = wait_request_timeout(req, opts->mount_timeout);
4487 
4488 out_put_req:
4489 	ceph_osdc_put_request(req);
4490 	return ret;
4491 }
4492 EXPORT_SYMBOL(ceph_osdc_unwatch);
4493 
4494 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4495 				      u64 notify_id, u64 cookie, void *payload,
4496 				      size_t payload_len)
4497 {
4498 	struct ceph_osd_req_op *op;
4499 	struct ceph_pagelist *pl;
4500 	int ret;
4501 
4502 	op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4503 
4504 	pl = kmalloc(sizeof(*pl), GFP_NOIO);
4505 	if (!pl)
4506 		return -ENOMEM;
4507 
4508 	ceph_pagelist_init(pl);
4509 	ret = ceph_pagelist_encode_64(pl, notify_id);
4510 	ret |= ceph_pagelist_encode_64(pl, cookie);
4511 	if (payload) {
4512 		ret |= ceph_pagelist_encode_32(pl, payload_len);
4513 		ret |= ceph_pagelist_append(pl, payload, payload_len);
4514 	} else {
4515 		ret |= ceph_pagelist_encode_32(pl, 0);
4516 	}
4517 	if (ret) {
4518 		ceph_pagelist_release(pl);
4519 		return -ENOMEM;
4520 	}
4521 
4522 	ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4523 	op->indata_len = pl->length;
4524 	return 0;
4525 }
4526 
4527 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4528 			 struct ceph_object_id *oid,
4529 			 struct ceph_object_locator *oloc,
4530 			 u64 notify_id,
4531 			 u64 cookie,
4532 			 void *payload,
4533 			 size_t payload_len)
4534 {
4535 	struct ceph_osd_request *req;
4536 	int ret;
4537 
4538 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4539 	if (!req)
4540 		return -ENOMEM;
4541 
4542 	ceph_oid_copy(&req->r_base_oid, oid);
4543 	ceph_oloc_copy(&req->r_base_oloc, oloc);
4544 	req->r_flags = CEPH_OSD_FLAG_READ;
4545 
4546 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4547 	if (ret)
4548 		goto out_put_req;
4549 
4550 	ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4551 					 payload_len);
4552 	if (ret)
4553 		goto out_put_req;
4554 
4555 	ceph_osdc_start_request(osdc, req, false);
4556 	ret = ceph_osdc_wait_request(osdc, req);
4557 
4558 out_put_req:
4559 	ceph_osdc_put_request(req);
4560 	return ret;
4561 }
4562 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4563 
4564 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4565 				  u64 cookie, u32 prot_ver, u32 timeout,
4566 				  void *payload, size_t payload_len)
4567 {
4568 	struct ceph_osd_req_op *op;
4569 	struct ceph_pagelist *pl;
4570 	int ret;
4571 
4572 	op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4573 	op->notify.cookie = cookie;
4574 
4575 	pl = kmalloc(sizeof(*pl), GFP_NOIO);
4576 	if (!pl)
4577 		return -ENOMEM;
4578 
4579 	ceph_pagelist_init(pl);
4580 	ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4581 	ret |= ceph_pagelist_encode_32(pl, timeout);
4582 	ret |= ceph_pagelist_encode_32(pl, payload_len);
4583 	ret |= ceph_pagelist_append(pl, payload, payload_len);
4584 	if (ret) {
4585 		ceph_pagelist_release(pl);
4586 		return -ENOMEM;
4587 	}
4588 
4589 	ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4590 	op->indata_len = pl->length;
4591 	return 0;
4592 }
4593 
4594 /*
4595  * @timeout: in seconds
4596  *
4597  * @preply_{pages,len} are initialized both on success and error.
4598  * The caller is responsible for:
4599  *
4600  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4601  */
4602 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4603 		     struct ceph_object_id *oid,
4604 		     struct ceph_object_locator *oloc,
4605 		     void *payload,
4606 		     size_t payload_len,
4607 		     u32 timeout,
4608 		     struct page ***preply_pages,
4609 		     size_t *preply_len)
4610 {
4611 	struct ceph_osd_linger_request *lreq;
4612 	struct page **pages;
4613 	int ret;
4614 
4615 	WARN_ON(!timeout);
4616 	if (preply_pages) {
4617 		*preply_pages = NULL;
4618 		*preply_len = 0;
4619 	}
4620 
4621 	lreq = linger_alloc(osdc);
4622 	if (!lreq)
4623 		return -ENOMEM;
4624 
4625 	lreq->preply_pages = preply_pages;
4626 	lreq->preply_len = preply_len;
4627 
4628 	ceph_oid_copy(&lreq->t.base_oid, oid);
4629 	ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4630 	lreq->t.flags = CEPH_OSD_FLAG_READ;
4631 
4632 	lreq->reg_req = alloc_linger_request(lreq);
4633 	if (!lreq->reg_req) {
4634 		ret = -ENOMEM;
4635 		goto out_put_lreq;
4636 	}
4637 
4638 	/* for notify_id */
4639 	pages = ceph_alloc_page_vector(1, GFP_NOIO);
4640 	if (IS_ERR(pages)) {
4641 		ret = PTR_ERR(pages);
4642 		goto out_put_lreq;
4643 	}
4644 
4645 	down_write(&osdc->lock);
4646 	linger_register(lreq); /* before osd_req_op_* */
4647 	ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
4648 				     timeout, payload, payload_len);
4649 	if (ret) {
4650 		linger_unregister(lreq);
4651 		up_write(&osdc->lock);
4652 		ceph_release_page_vector(pages, 1);
4653 		goto out_put_lreq;
4654 	}
4655 	ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4656 						 response_data),
4657 				 pages, PAGE_SIZE, 0, false, true);
4658 	linger_submit(lreq);
4659 	up_write(&osdc->lock);
4660 
4661 	ret = linger_reg_commit_wait(lreq);
4662 	if (!ret)
4663 		ret = linger_notify_finish_wait(lreq);
4664 	else
4665 		dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4666 
4667 	linger_cancel(lreq);
4668 out_put_lreq:
4669 	linger_put(lreq);
4670 	return ret;
4671 }
4672 EXPORT_SYMBOL(ceph_osdc_notify);
4673 
4674 /*
4675  * Return the number of milliseconds since the watch was last
4676  * confirmed, or an error.  If there is an error, the watch is no
4677  * longer valid, and should be destroyed with ceph_osdc_unwatch().
4678  */
4679 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4680 			  struct ceph_osd_linger_request *lreq)
4681 {
4682 	unsigned long stamp, age;
4683 	int ret;
4684 
4685 	down_read(&osdc->lock);
4686 	mutex_lock(&lreq->lock);
4687 	stamp = lreq->watch_valid_thru;
4688 	if (!list_empty(&lreq->pending_lworks)) {
4689 		struct linger_work *lwork =
4690 		    list_first_entry(&lreq->pending_lworks,
4691 				     struct linger_work,
4692 				     pending_item);
4693 
4694 		if (time_before(lwork->queued_stamp, stamp))
4695 			stamp = lwork->queued_stamp;
4696 	}
4697 	age = jiffies - stamp;
4698 	dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4699 	     lreq, lreq->linger_id, age, lreq->last_error);
4700 	/* we are truncating to msecs, so return a safe upper bound */
4701 	ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4702 
4703 	mutex_unlock(&lreq->lock);
4704 	up_read(&osdc->lock);
4705 	return ret;
4706 }
4707 
4708 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
4709 {
4710 	u8 struct_v;
4711 	u32 struct_len;
4712 	int ret;
4713 
4714 	ret = ceph_start_decoding(p, end, 2, "watch_item_t",
4715 				  &struct_v, &struct_len);
4716 	if (ret)
4717 		return ret;
4718 
4719 	ceph_decode_copy(p, &item->name, sizeof(item->name));
4720 	item->cookie = ceph_decode_64(p);
4721 	*p += 4; /* skip timeout_seconds */
4722 	if (struct_v >= 2) {
4723 		ceph_decode_copy(p, &item->addr, sizeof(item->addr));
4724 		ceph_decode_addr(&item->addr);
4725 	}
4726 
4727 	dout("%s %s%llu cookie %llu addr %s\n", __func__,
4728 	     ENTITY_NAME(item->name), item->cookie,
4729 	     ceph_pr_addr(&item->addr.in_addr));
4730 	return 0;
4731 }
4732 
4733 static int decode_watchers(void **p, void *end,
4734 			   struct ceph_watch_item **watchers,
4735 			   u32 *num_watchers)
4736 {
4737 	u8 struct_v;
4738 	u32 struct_len;
4739 	int i;
4740 	int ret;
4741 
4742 	ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4743 				  &struct_v, &struct_len);
4744 	if (ret)
4745 		return ret;
4746 
4747 	*num_watchers = ceph_decode_32(p);
4748 	*watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4749 	if (!*watchers)
4750 		return -ENOMEM;
4751 
4752 	for (i = 0; i < *num_watchers; i++) {
4753 		ret = decode_watcher(p, end, *watchers + i);
4754 		if (ret) {
4755 			kfree(*watchers);
4756 			return ret;
4757 		}
4758 	}
4759 
4760 	return 0;
4761 }
4762 
4763 /*
4764  * On success, the caller is responsible for:
4765  *
4766  *     kfree(watchers);
4767  */
4768 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4769 			    struct ceph_object_id *oid,
4770 			    struct ceph_object_locator *oloc,
4771 			    struct ceph_watch_item **watchers,
4772 			    u32 *num_watchers)
4773 {
4774 	struct ceph_osd_request *req;
4775 	struct page **pages;
4776 	int ret;
4777 
4778 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4779 	if (!req)
4780 		return -ENOMEM;
4781 
4782 	ceph_oid_copy(&req->r_base_oid, oid);
4783 	ceph_oloc_copy(&req->r_base_oloc, oloc);
4784 	req->r_flags = CEPH_OSD_FLAG_READ;
4785 
4786 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4787 	if (ret)
4788 		goto out_put_req;
4789 
4790 	pages = ceph_alloc_page_vector(1, GFP_NOIO);
4791 	if (IS_ERR(pages)) {
4792 		ret = PTR_ERR(pages);
4793 		goto out_put_req;
4794 	}
4795 
4796 	osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4797 	ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4798 						 response_data),
4799 				 pages, PAGE_SIZE, 0, false, true);
4800 
4801 	ceph_osdc_start_request(osdc, req, false);
4802 	ret = ceph_osdc_wait_request(osdc, req);
4803 	if (ret >= 0) {
4804 		void *p = page_address(pages[0]);
4805 		void *const end = p + req->r_ops[0].outdata_len;
4806 
4807 		ret = decode_watchers(&p, end, watchers, num_watchers);
4808 	}
4809 
4810 out_put_req:
4811 	ceph_osdc_put_request(req);
4812 	return ret;
4813 }
4814 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4815 
4816 /*
4817  * Call all pending notify callbacks - for use after a watch is
4818  * unregistered, to make sure no more callbacks for it will be invoked
4819  */
4820 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4821 {
4822 	dout("%s osdc %p\n", __func__, osdc);
4823 	flush_workqueue(osdc->notify_wq);
4824 }
4825 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4826 
4827 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4828 {
4829 	down_read(&osdc->lock);
4830 	maybe_request_map(osdc);
4831 	up_read(&osdc->lock);
4832 }
4833 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4834 
4835 /*
4836  * Execute an OSD class method on an object.
4837  *
4838  * @flags: CEPH_OSD_FLAG_*
4839  * @resp_len: in/out param for reply length
4840  */
4841 int ceph_osdc_call(struct ceph_osd_client *osdc,
4842 		   struct ceph_object_id *oid,
4843 		   struct ceph_object_locator *oloc,
4844 		   const char *class, const char *method,
4845 		   unsigned int flags,
4846 		   struct page *req_page, size_t req_len,
4847 		   struct page *resp_page, size_t *resp_len)
4848 {
4849 	struct ceph_osd_request *req;
4850 	int ret;
4851 
4852 	if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
4853 		return -E2BIG;
4854 
4855 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4856 	if (!req)
4857 		return -ENOMEM;
4858 
4859 	ceph_oid_copy(&req->r_base_oid, oid);
4860 	ceph_oloc_copy(&req->r_base_oloc, oloc);
4861 	req->r_flags = flags;
4862 
4863 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4864 	if (ret)
4865 		goto out_put_req;
4866 
4867 	osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4868 	if (req_page)
4869 		osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4870 						  0, false, false);
4871 	if (resp_page)
4872 		osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4873 						   *resp_len, 0, false, false);
4874 
4875 	ceph_osdc_start_request(osdc, req, false);
4876 	ret = ceph_osdc_wait_request(osdc, req);
4877 	if (ret >= 0) {
4878 		ret = req->r_ops[0].rval;
4879 		if (resp_page)
4880 			*resp_len = req->r_ops[0].outdata_len;
4881 	}
4882 
4883 out_put_req:
4884 	ceph_osdc_put_request(req);
4885 	return ret;
4886 }
4887 EXPORT_SYMBOL(ceph_osdc_call);
4888 
4889 /*
4890  * init, shutdown
4891  */
4892 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4893 {
4894 	int err;
4895 
4896 	dout("init\n");
4897 	osdc->client = client;
4898 	init_rwsem(&osdc->lock);
4899 	osdc->osds = RB_ROOT;
4900 	INIT_LIST_HEAD(&osdc->osd_lru);
4901 	spin_lock_init(&osdc->osd_lru_lock);
4902 	osd_init(&osdc->homeless_osd);
4903 	osdc->homeless_osd.o_osdc = osdc;
4904 	osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4905 	osdc->last_linger_id = CEPH_LINGER_ID_START;
4906 	osdc->linger_requests = RB_ROOT;
4907 	osdc->map_checks = RB_ROOT;
4908 	osdc->linger_map_checks = RB_ROOT;
4909 	INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
4910 	INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
4911 
4912 	err = -ENOMEM;
4913 	osdc->osdmap = ceph_osdmap_alloc();
4914 	if (!osdc->osdmap)
4915 		goto out;
4916 
4917 	osdc->req_mempool = mempool_create_slab_pool(10,
4918 						     ceph_osd_request_cache);
4919 	if (!osdc->req_mempool)
4920 		goto out_map;
4921 
4922 	err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
4923 				PAGE_SIZE, 10, true, "osd_op");
4924 	if (err < 0)
4925 		goto out_mempool;
4926 	err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4927 				PAGE_SIZE, 10, true, "osd_op_reply");
4928 	if (err < 0)
4929 		goto out_msgpool;
4930 
4931 	err = -ENOMEM;
4932 	osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
4933 	if (!osdc->notify_wq)
4934 		goto out_msgpool_reply;
4935 
4936 	schedule_delayed_work(&osdc->timeout_work,
4937 			      osdc->client->options->osd_keepalive_timeout);
4938 	schedule_delayed_work(&osdc->osds_timeout_work,
4939 	    round_jiffies_relative(osdc->client->options->osd_idle_ttl));
4940 
4941 	return 0;
4942 
4943 out_msgpool_reply:
4944 	ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4945 out_msgpool:
4946 	ceph_msgpool_destroy(&osdc->msgpool_op);
4947 out_mempool:
4948 	mempool_destroy(osdc->req_mempool);
4949 out_map:
4950 	ceph_osdmap_destroy(osdc->osdmap);
4951 out:
4952 	return err;
4953 }
4954 
4955 void ceph_osdc_stop(struct ceph_osd_client *osdc)
4956 {
4957 	flush_workqueue(osdc->notify_wq);
4958 	destroy_workqueue(osdc->notify_wq);
4959 	cancel_delayed_work_sync(&osdc->timeout_work);
4960 	cancel_delayed_work_sync(&osdc->osds_timeout_work);
4961 
4962 	down_write(&osdc->lock);
4963 	while (!RB_EMPTY_ROOT(&osdc->osds)) {
4964 		struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
4965 						struct ceph_osd, o_node);
4966 		close_osd(osd);
4967 	}
4968 	up_write(&osdc->lock);
4969 	WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
4970 	osd_cleanup(&osdc->homeless_osd);
4971 
4972 	WARN_ON(!list_empty(&osdc->osd_lru));
4973 	WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
4974 	WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
4975 	WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
4976 	WARN_ON(atomic_read(&osdc->num_requests));
4977 	WARN_ON(atomic_read(&osdc->num_homeless));
4978 
4979 	ceph_osdmap_destroy(osdc->osdmap);
4980 	mempool_destroy(osdc->req_mempool);
4981 	ceph_msgpool_destroy(&osdc->msgpool_op);
4982 	ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4983 }
4984 
4985 /*
4986  * Read some contiguous pages.  If we cross a stripe boundary, shorten
4987  * *plen.  Return number of bytes read, or error.
4988  */
4989 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
4990 			struct ceph_vino vino, struct ceph_file_layout *layout,
4991 			u64 off, u64 *plen,
4992 			u32 truncate_seq, u64 truncate_size,
4993 			struct page **pages, int num_pages, int page_align)
4994 {
4995 	struct ceph_osd_request *req;
4996 	int rc = 0;
4997 
4998 	dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
4999 	     vino.snap, off, *plen);
5000 	req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
5001 				    CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
5002 				    NULL, truncate_seq, truncate_size,
5003 				    false);
5004 	if (IS_ERR(req))
5005 		return PTR_ERR(req);
5006 
5007 	/* it may be a short read due to an object boundary */
5008 	osd_req_op_extent_osd_data_pages(req, 0,
5009 				pages, *plen, page_align, false, false);
5010 
5011 	dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
5012 	     off, *plen, *plen, page_align);
5013 
5014 	rc = ceph_osdc_start_request(osdc, req, false);
5015 	if (!rc)
5016 		rc = ceph_osdc_wait_request(osdc, req);
5017 
5018 	ceph_osdc_put_request(req);
5019 	dout("readpages result %d\n", rc);
5020 	return rc;
5021 }
5022 EXPORT_SYMBOL(ceph_osdc_readpages);
5023 
5024 /*
5025  * do a synchronous write on N pages
5026  */
5027 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
5028 			 struct ceph_file_layout *layout,
5029 			 struct ceph_snap_context *snapc,
5030 			 u64 off, u64 len,
5031 			 u32 truncate_seq, u64 truncate_size,
5032 			 struct timespec *mtime,
5033 			 struct page **pages, int num_pages)
5034 {
5035 	struct ceph_osd_request *req;
5036 	int rc = 0;
5037 	int page_align = off & ~PAGE_MASK;
5038 
5039 	req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
5040 				    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
5041 				    snapc, truncate_seq, truncate_size,
5042 				    true);
5043 	if (IS_ERR(req))
5044 		return PTR_ERR(req);
5045 
5046 	/* it may be a short write due to an object boundary */
5047 	osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
5048 				false, false);
5049 	dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
5050 
5051 	req->r_mtime = *mtime;
5052 	rc = ceph_osdc_start_request(osdc, req, true);
5053 	if (!rc)
5054 		rc = ceph_osdc_wait_request(osdc, req);
5055 
5056 	ceph_osdc_put_request(req);
5057 	if (rc == 0)
5058 		rc = len;
5059 	dout("writepages result %d\n", rc);
5060 	return rc;
5061 }
5062 EXPORT_SYMBOL(ceph_osdc_writepages);
5063 
5064 int ceph_osdc_setup(void)
5065 {
5066 	size_t size = sizeof(struct ceph_osd_request) +
5067 	    CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5068 
5069 	BUG_ON(ceph_osd_request_cache);
5070 	ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5071 						   0, 0, NULL);
5072 
5073 	return ceph_osd_request_cache ? 0 : -ENOMEM;
5074 }
5075 EXPORT_SYMBOL(ceph_osdc_setup);
5076 
5077 void ceph_osdc_cleanup(void)
5078 {
5079 	BUG_ON(!ceph_osd_request_cache);
5080 	kmem_cache_destroy(ceph_osd_request_cache);
5081 	ceph_osd_request_cache = NULL;
5082 }
5083 EXPORT_SYMBOL(ceph_osdc_cleanup);
5084 
5085 /*
5086  * handle incoming message
5087  */
5088 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5089 {
5090 	struct ceph_osd *osd = con->private;
5091 	struct ceph_osd_client *osdc = osd->o_osdc;
5092 	int type = le16_to_cpu(msg->hdr.type);
5093 
5094 	switch (type) {
5095 	case CEPH_MSG_OSD_MAP:
5096 		ceph_osdc_handle_map(osdc, msg);
5097 		break;
5098 	case CEPH_MSG_OSD_OPREPLY:
5099 		handle_reply(osd, msg);
5100 		break;
5101 	case CEPH_MSG_OSD_BACKOFF:
5102 		handle_backoff(osd, msg);
5103 		break;
5104 	case CEPH_MSG_WATCH_NOTIFY:
5105 		handle_watch_notify(osdc, msg);
5106 		break;
5107 
5108 	default:
5109 		pr_err("received unknown message type %d %s\n", type,
5110 		       ceph_msg_type_name(type));
5111 	}
5112 
5113 	ceph_msg_put(msg);
5114 }
5115 
5116 /*
5117  * Lookup and return message for incoming reply.  Don't try to do
5118  * anything about a larger than preallocated data portion of the
5119  * message at the moment - for now, just skip the message.
5120  */
5121 static struct ceph_msg *get_reply(struct ceph_connection *con,
5122 				  struct ceph_msg_header *hdr,
5123 				  int *skip)
5124 {
5125 	struct ceph_osd *osd = con->private;
5126 	struct ceph_osd_client *osdc = osd->o_osdc;
5127 	struct ceph_msg *m = NULL;
5128 	struct ceph_osd_request *req;
5129 	int front_len = le32_to_cpu(hdr->front_len);
5130 	int data_len = le32_to_cpu(hdr->data_len);
5131 	u64 tid = le64_to_cpu(hdr->tid);
5132 
5133 	down_read(&osdc->lock);
5134 	if (!osd_registered(osd)) {
5135 		dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5136 		*skip = 1;
5137 		goto out_unlock_osdc;
5138 	}
5139 	WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5140 
5141 	mutex_lock(&osd->lock);
5142 	req = lookup_request(&osd->o_requests, tid);
5143 	if (!req) {
5144 		dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5145 		     osd->o_osd, tid);
5146 		*skip = 1;
5147 		goto out_unlock_session;
5148 	}
5149 
5150 	ceph_msg_revoke_incoming(req->r_reply);
5151 
5152 	if (front_len > req->r_reply->front_alloc_len) {
5153 		pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5154 			__func__, osd->o_osd, req->r_tid, front_len,
5155 			req->r_reply->front_alloc_len);
5156 		m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5157 				 false);
5158 		if (!m)
5159 			goto out_unlock_session;
5160 		ceph_msg_put(req->r_reply);
5161 		req->r_reply = m;
5162 	}
5163 
5164 	if (data_len > req->r_reply->data_length) {
5165 		pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5166 			__func__, osd->o_osd, req->r_tid, data_len,
5167 			req->r_reply->data_length);
5168 		m = NULL;
5169 		*skip = 1;
5170 		goto out_unlock_session;
5171 	}
5172 
5173 	m = ceph_msg_get(req->r_reply);
5174 	dout("get_reply tid %lld %p\n", tid, m);
5175 
5176 out_unlock_session:
5177 	mutex_unlock(&osd->lock);
5178 out_unlock_osdc:
5179 	up_read(&osdc->lock);
5180 	return m;
5181 }
5182 
5183 /*
5184  * TODO: switch to a msg-owned pagelist
5185  */
5186 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5187 {
5188 	struct ceph_msg *m;
5189 	int type = le16_to_cpu(hdr->type);
5190 	u32 front_len = le32_to_cpu(hdr->front_len);
5191 	u32 data_len = le32_to_cpu(hdr->data_len);
5192 
5193 	m = ceph_msg_new(type, front_len, GFP_NOIO, false);
5194 	if (!m)
5195 		return NULL;
5196 
5197 	if (data_len) {
5198 		struct page **pages;
5199 		struct ceph_osd_data osd_data;
5200 
5201 		pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5202 					       GFP_NOIO);
5203 		if (IS_ERR(pages)) {
5204 			ceph_msg_put(m);
5205 			return NULL;
5206 		}
5207 
5208 		ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
5209 					 false);
5210 		ceph_osdc_msg_data_add(m, &osd_data);
5211 	}
5212 
5213 	return m;
5214 }
5215 
5216 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5217 				  struct ceph_msg_header *hdr,
5218 				  int *skip)
5219 {
5220 	struct ceph_osd *osd = con->private;
5221 	int type = le16_to_cpu(hdr->type);
5222 
5223 	*skip = 0;
5224 	switch (type) {
5225 	case CEPH_MSG_OSD_MAP:
5226 	case CEPH_MSG_OSD_BACKOFF:
5227 	case CEPH_MSG_WATCH_NOTIFY:
5228 		return alloc_msg_with_page_vector(hdr);
5229 	case CEPH_MSG_OSD_OPREPLY:
5230 		return get_reply(con, hdr, skip);
5231 	default:
5232 		pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5233 			osd->o_osd, type);
5234 		*skip = 1;
5235 		return NULL;
5236 	}
5237 }
5238 
5239 /*
5240  * Wrappers to refcount containing ceph_osd struct
5241  */
5242 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5243 {
5244 	struct ceph_osd *osd = con->private;
5245 	if (get_osd(osd))
5246 		return con;
5247 	return NULL;
5248 }
5249 
5250 static void put_osd_con(struct ceph_connection *con)
5251 {
5252 	struct ceph_osd *osd = con->private;
5253 	put_osd(osd);
5254 }
5255 
5256 /*
5257  * authentication
5258  */
5259 /*
5260  * Note: returned pointer is the address of a structure that's
5261  * managed separately.  Caller must *not* attempt to free it.
5262  */
5263 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5264 					int *proto, int force_new)
5265 {
5266 	struct ceph_osd *o = con->private;
5267 	struct ceph_osd_client *osdc = o->o_osdc;
5268 	struct ceph_auth_client *ac = osdc->client->monc.auth;
5269 	struct ceph_auth_handshake *auth = &o->o_auth;
5270 
5271 	if (force_new && auth->authorizer) {
5272 		ceph_auth_destroy_authorizer(auth->authorizer);
5273 		auth->authorizer = NULL;
5274 	}
5275 	if (!auth->authorizer) {
5276 		int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5277 						      auth);
5278 		if (ret)
5279 			return ERR_PTR(ret);
5280 	} else {
5281 		int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5282 						     auth);
5283 		if (ret)
5284 			return ERR_PTR(ret);
5285 	}
5286 	*proto = ac->protocol;
5287 
5288 	return auth;
5289 }
5290 
5291 
5292 static int verify_authorizer_reply(struct ceph_connection *con)
5293 {
5294 	struct ceph_osd *o = con->private;
5295 	struct ceph_osd_client *osdc = o->o_osdc;
5296 	struct ceph_auth_client *ac = osdc->client->monc.auth;
5297 
5298 	return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5299 }
5300 
5301 static int invalidate_authorizer(struct ceph_connection *con)
5302 {
5303 	struct ceph_osd *o = con->private;
5304 	struct ceph_osd_client *osdc = o->o_osdc;
5305 	struct ceph_auth_client *ac = osdc->client->monc.auth;
5306 
5307 	ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5308 	return ceph_monc_validate_auth(&osdc->client->monc);
5309 }
5310 
5311 static void osd_reencode_message(struct ceph_msg *msg)
5312 {
5313 	int type = le16_to_cpu(msg->hdr.type);
5314 
5315 	if (type == CEPH_MSG_OSD_OP)
5316 		encode_request_finish(msg);
5317 }
5318 
5319 static int osd_sign_message(struct ceph_msg *msg)
5320 {
5321 	struct ceph_osd *o = msg->con->private;
5322 	struct ceph_auth_handshake *auth = &o->o_auth;
5323 
5324 	return ceph_auth_sign_message(auth, msg);
5325 }
5326 
5327 static int osd_check_message_signature(struct ceph_msg *msg)
5328 {
5329 	struct ceph_osd *o = msg->con->private;
5330 	struct ceph_auth_handshake *auth = &o->o_auth;
5331 
5332 	return ceph_auth_check_message_signature(auth, msg);
5333 }
5334 
5335 static const struct ceph_connection_operations osd_con_ops = {
5336 	.get = get_osd_con,
5337 	.put = put_osd_con,
5338 	.dispatch = dispatch,
5339 	.get_authorizer = get_authorizer,
5340 	.verify_authorizer_reply = verify_authorizer_reply,
5341 	.invalidate_authorizer = invalidate_authorizer,
5342 	.alloc_msg = alloc_msg,
5343 	.reencode_message = osd_reencode_message,
5344 	.sign_message = osd_sign_message,
5345 	.check_message_signature = osd_check_message_signature,
5346 	.fault = osd_fault,
5347 };
5348