xref: /openbmc/linux/net/ceph/osd_client.c (revision dee0c5f8)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/ceph/ceph_debug.h>
4 
5 #include <linux/module.h>
6 #include <linux/err.h>
7 #include <linux/highmem.h>
8 #include <linux/mm.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
12 #ifdef CONFIG_BLOCK
13 #include <linux/bio.h>
14 #endif
15 
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/striper.h>
24 
25 #define OSD_OPREPLY_FRONT_LEN	512
26 
27 static struct kmem_cache	*ceph_osd_request_cache;
28 
29 static const struct ceph_connection_operations osd_con_ops;
30 
31 /*
32  * Implement client access to distributed object storage cluster.
33  *
34  * All data objects are stored within a cluster/cloud of OSDs, or
35  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
36  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
37  * remote daemons serving up and coordinating consistent and safe
38  * access to storage.
39  *
40  * Cluster membership and the mapping of data objects onto storage devices
41  * are described by the osd map.
42  *
43  * We keep track of pending OSD requests (read, write), resubmit
44  * requests to different OSDs when the cluster topology/data layout
45  * change, or retry the affected requests when the communications
46  * channel with an OSD is reset.
47  */
48 
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
52 			struct ceph_osd_linger_request *lreq);
53 static void unlink_linger(struct ceph_osd *osd,
54 			  struct ceph_osd_linger_request *lreq);
55 static void clear_backoffs(struct ceph_osd *osd);
56 
57 #if 1
58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
59 {
60 	bool wrlocked = true;
61 
62 	if (unlikely(down_read_trylock(sem))) {
63 		wrlocked = false;
64 		up_read(sem);
65 	}
66 
67 	return wrlocked;
68 }
69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
70 {
71 	WARN_ON(!rwsem_is_locked(&osdc->lock));
72 }
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
74 {
75 	WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
76 }
77 static inline void verify_osd_locked(struct ceph_osd *osd)
78 {
79 	struct ceph_osd_client *osdc = osd->o_osdc;
80 
81 	WARN_ON(!(mutex_is_locked(&osd->lock) &&
82 		  rwsem_is_locked(&osdc->lock)) &&
83 		!rwsem_is_wrlocked(&osdc->lock));
84 }
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
86 {
87 	WARN_ON(!mutex_is_locked(&lreq->lock));
88 }
89 #else
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
92 static inline void verify_osd_locked(struct ceph_osd *osd) { }
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
94 #endif
95 
96 /*
97  * calculate the mapping of a file extent onto an object, and fill out the
98  * request accordingly.  shorten extent as necessary if it crosses an
99  * object boundary.
100  *
101  * fill osd op in request message.
102  */
103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
104 			u64 *objnum, u64 *objoff, u64 *objlen)
105 {
106 	u64 orig_len = *plen;
107 	u32 xlen;
108 
109 	/* object extent? */
110 	ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
111 					  objoff, &xlen);
112 	*objlen = xlen;
113 	if (*objlen < orig_len) {
114 		*plen = *objlen;
115 		dout(" skipping last %llu, final file extent %llu~%llu\n",
116 		     orig_len - *plen, off, *plen);
117 	}
118 
119 	dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
120 	return 0;
121 }
122 
123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
124 {
125 	memset(osd_data, 0, sizeof (*osd_data));
126 	osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
127 }
128 
129 /*
130  * Consumes @pages if @own_pages is true.
131  */
132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
133 			struct page **pages, u64 length, u32 alignment,
134 			bool pages_from_pool, bool own_pages)
135 {
136 	osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
137 	osd_data->pages = pages;
138 	osd_data->length = length;
139 	osd_data->alignment = alignment;
140 	osd_data->pages_from_pool = pages_from_pool;
141 	osd_data->own_pages = own_pages;
142 }
143 
144 /*
145  * Consumes a ref on @pagelist.
146  */
147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
148 			struct ceph_pagelist *pagelist)
149 {
150 	osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
151 	osd_data->pagelist = pagelist;
152 }
153 
154 #ifdef CONFIG_BLOCK
155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
156 				   struct ceph_bio_iter *bio_pos,
157 				   u32 bio_length)
158 {
159 	osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
160 	osd_data->bio_pos = *bio_pos;
161 	osd_data->bio_length = bio_length;
162 }
163 #endif /* CONFIG_BLOCK */
164 
165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
166 				     struct ceph_bvec_iter *bvec_pos,
167 				     u32 num_bvecs)
168 {
169 	osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
170 	osd_data->bvec_pos = *bvec_pos;
171 	osd_data->num_bvecs = num_bvecs;
172 }
173 
174 static void ceph_osd_iter_init(struct ceph_osd_data *osd_data,
175 			       struct iov_iter *iter)
176 {
177 	osd_data->type = CEPH_OSD_DATA_TYPE_ITER;
178 	osd_data->iter = *iter;
179 }
180 
181 static struct ceph_osd_data *
182 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
183 {
184 	BUG_ON(which >= osd_req->r_num_ops);
185 
186 	return &osd_req->r_ops[which].raw_data_in;
187 }
188 
189 struct ceph_osd_data *
190 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
191 			unsigned int which)
192 {
193 	return osd_req_op_data(osd_req, which, extent, osd_data);
194 }
195 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
196 
197 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
198 			unsigned int which, struct page **pages,
199 			u64 length, u32 alignment,
200 			bool pages_from_pool, bool own_pages)
201 {
202 	struct ceph_osd_data *osd_data;
203 
204 	osd_data = osd_req_op_raw_data_in(osd_req, which);
205 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
206 				pages_from_pool, own_pages);
207 }
208 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
209 
210 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
211 			unsigned int which, struct page **pages,
212 			u64 length, u32 alignment,
213 			bool pages_from_pool, bool own_pages)
214 {
215 	struct ceph_osd_data *osd_data;
216 
217 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
218 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
219 				pages_from_pool, own_pages);
220 }
221 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
222 
223 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
224 			unsigned int which, struct ceph_pagelist *pagelist)
225 {
226 	struct ceph_osd_data *osd_data;
227 
228 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
229 	ceph_osd_data_pagelist_init(osd_data, pagelist);
230 }
231 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
232 
233 #ifdef CONFIG_BLOCK
234 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
235 				    unsigned int which,
236 				    struct ceph_bio_iter *bio_pos,
237 				    u32 bio_length)
238 {
239 	struct ceph_osd_data *osd_data;
240 
241 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
242 	ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
243 }
244 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
245 #endif /* CONFIG_BLOCK */
246 
247 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
248 				      unsigned int which,
249 				      struct bio_vec *bvecs, u32 num_bvecs,
250 				      u32 bytes)
251 {
252 	struct ceph_osd_data *osd_data;
253 	struct ceph_bvec_iter it = {
254 		.bvecs = bvecs,
255 		.iter = { .bi_size = bytes },
256 	};
257 
258 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
259 	ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
260 }
261 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
262 
263 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
264 					 unsigned int which,
265 					 struct ceph_bvec_iter *bvec_pos)
266 {
267 	struct ceph_osd_data *osd_data;
268 
269 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
270 	ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
271 }
272 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
273 
274 /**
275  * osd_req_op_extent_osd_iter - Set up an operation with an iterator buffer
276  * @osd_req: The request to set up
277  * @which: Index of the operation in which to set the iter
278  * @iter: The buffer iterator
279  */
280 void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
281 				unsigned int which, struct iov_iter *iter)
282 {
283 	struct ceph_osd_data *osd_data;
284 
285 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
286 	ceph_osd_iter_init(osd_data, iter);
287 }
288 EXPORT_SYMBOL(osd_req_op_extent_osd_iter);
289 
290 static void osd_req_op_cls_request_info_pagelist(
291 			struct ceph_osd_request *osd_req,
292 			unsigned int which, struct ceph_pagelist *pagelist)
293 {
294 	struct ceph_osd_data *osd_data;
295 
296 	osd_data = osd_req_op_data(osd_req, which, cls, request_info);
297 	ceph_osd_data_pagelist_init(osd_data, pagelist);
298 }
299 
300 void osd_req_op_cls_request_data_pagelist(
301 			struct ceph_osd_request *osd_req,
302 			unsigned int which, struct ceph_pagelist *pagelist)
303 {
304 	struct ceph_osd_data *osd_data;
305 
306 	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
307 	ceph_osd_data_pagelist_init(osd_data, pagelist);
308 	osd_req->r_ops[which].cls.indata_len += pagelist->length;
309 	osd_req->r_ops[which].indata_len += pagelist->length;
310 }
311 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
312 
313 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
314 			unsigned int which, struct page **pages, u64 length,
315 			u32 alignment, bool pages_from_pool, bool own_pages)
316 {
317 	struct ceph_osd_data *osd_data;
318 
319 	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
320 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
321 				pages_from_pool, own_pages);
322 	osd_req->r_ops[which].cls.indata_len += length;
323 	osd_req->r_ops[which].indata_len += length;
324 }
325 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
326 
327 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
328 				       unsigned int which,
329 				       struct bio_vec *bvecs, u32 num_bvecs,
330 				       u32 bytes)
331 {
332 	struct ceph_osd_data *osd_data;
333 	struct ceph_bvec_iter it = {
334 		.bvecs = bvecs,
335 		.iter = { .bi_size = bytes },
336 	};
337 
338 	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
339 	ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
340 	osd_req->r_ops[which].cls.indata_len += bytes;
341 	osd_req->r_ops[which].indata_len += bytes;
342 }
343 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
344 
345 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
346 			unsigned int which, struct page **pages, u64 length,
347 			u32 alignment, bool pages_from_pool, bool own_pages)
348 {
349 	struct ceph_osd_data *osd_data;
350 
351 	osd_data = osd_req_op_data(osd_req, which, cls, response_data);
352 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
353 				pages_from_pool, own_pages);
354 }
355 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
356 
357 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
358 {
359 	switch (osd_data->type) {
360 	case CEPH_OSD_DATA_TYPE_NONE:
361 		return 0;
362 	case CEPH_OSD_DATA_TYPE_PAGES:
363 		return osd_data->length;
364 	case CEPH_OSD_DATA_TYPE_PAGELIST:
365 		return (u64)osd_data->pagelist->length;
366 #ifdef CONFIG_BLOCK
367 	case CEPH_OSD_DATA_TYPE_BIO:
368 		return (u64)osd_data->bio_length;
369 #endif /* CONFIG_BLOCK */
370 	case CEPH_OSD_DATA_TYPE_BVECS:
371 		return osd_data->bvec_pos.iter.bi_size;
372 	case CEPH_OSD_DATA_TYPE_ITER:
373 		return iov_iter_count(&osd_data->iter);
374 	default:
375 		WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
376 		return 0;
377 	}
378 }
379 
380 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
381 {
382 	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
383 		int num_pages;
384 
385 		num_pages = calc_pages_for((u64)osd_data->alignment,
386 						(u64)osd_data->length);
387 		ceph_release_page_vector(osd_data->pages, num_pages);
388 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
389 		ceph_pagelist_release(osd_data->pagelist);
390 	}
391 	ceph_osd_data_init(osd_data);
392 }
393 
394 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
395 			unsigned int which)
396 {
397 	struct ceph_osd_req_op *op;
398 
399 	BUG_ON(which >= osd_req->r_num_ops);
400 	op = &osd_req->r_ops[which];
401 
402 	switch (op->op) {
403 	case CEPH_OSD_OP_READ:
404 	case CEPH_OSD_OP_SPARSE_READ:
405 	case CEPH_OSD_OP_WRITE:
406 	case CEPH_OSD_OP_WRITEFULL:
407 		kfree(op->extent.sparse_ext);
408 		ceph_osd_data_release(&op->extent.osd_data);
409 		break;
410 	case CEPH_OSD_OP_CALL:
411 		ceph_osd_data_release(&op->cls.request_info);
412 		ceph_osd_data_release(&op->cls.request_data);
413 		ceph_osd_data_release(&op->cls.response_data);
414 		break;
415 	case CEPH_OSD_OP_SETXATTR:
416 	case CEPH_OSD_OP_CMPXATTR:
417 		ceph_osd_data_release(&op->xattr.osd_data);
418 		break;
419 	case CEPH_OSD_OP_STAT:
420 		ceph_osd_data_release(&op->raw_data_in);
421 		break;
422 	case CEPH_OSD_OP_NOTIFY_ACK:
423 		ceph_osd_data_release(&op->notify_ack.request_data);
424 		break;
425 	case CEPH_OSD_OP_NOTIFY:
426 		ceph_osd_data_release(&op->notify.request_data);
427 		ceph_osd_data_release(&op->notify.response_data);
428 		break;
429 	case CEPH_OSD_OP_LIST_WATCHERS:
430 		ceph_osd_data_release(&op->list_watchers.response_data);
431 		break;
432 	case CEPH_OSD_OP_COPY_FROM2:
433 		ceph_osd_data_release(&op->copy_from.osd_data);
434 		break;
435 	default:
436 		break;
437 	}
438 }
439 
440 /*
441  * Assumes @t is zero-initialized.
442  */
443 static void target_init(struct ceph_osd_request_target *t)
444 {
445 	ceph_oid_init(&t->base_oid);
446 	ceph_oloc_init(&t->base_oloc);
447 	ceph_oid_init(&t->target_oid);
448 	ceph_oloc_init(&t->target_oloc);
449 
450 	ceph_osds_init(&t->acting);
451 	ceph_osds_init(&t->up);
452 	t->size = -1;
453 	t->min_size = -1;
454 
455 	t->osd = CEPH_HOMELESS_OSD;
456 }
457 
458 static void target_copy(struct ceph_osd_request_target *dest,
459 			const struct ceph_osd_request_target *src)
460 {
461 	ceph_oid_copy(&dest->base_oid, &src->base_oid);
462 	ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
463 	ceph_oid_copy(&dest->target_oid, &src->target_oid);
464 	ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
465 
466 	dest->pgid = src->pgid; /* struct */
467 	dest->spgid = src->spgid; /* struct */
468 	dest->pg_num = src->pg_num;
469 	dest->pg_num_mask = src->pg_num_mask;
470 	ceph_osds_copy(&dest->acting, &src->acting);
471 	ceph_osds_copy(&dest->up, &src->up);
472 	dest->size = src->size;
473 	dest->min_size = src->min_size;
474 	dest->sort_bitwise = src->sort_bitwise;
475 	dest->recovery_deletes = src->recovery_deletes;
476 
477 	dest->flags = src->flags;
478 	dest->used_replica = src->used_replica;
479 	dest->paused = src->paused;
480 
481 	dest->epoch = src->epoch;
482 	dest->last_force_resend = src->last_force_resend;
483 
484 	dest->osd = src->osd;
485 }
486 
487 static void target_destroy(struct ceph_osd_request_target *t)
488 {
489 	ceph_oid_destroy(&t->base_oid);
490 	ceph_oloc_destroy(&t->base_oloc);
491 	ceph_oid_destroy(&t->target_oid);
492 	ceph_oloc_destroy(&t->target_oloc);
493 }
494 
495 /*
496  * requests
497  */
498 static void request_release_checks(struct ceph_osd_request *req)
499 {
500 	WARN_ON(!RB_EMPTY_NODE(&req->r_node));
501 	WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
502 	WARN_ON(!list_empty(&req->r_private_item));
503 	WARN_ON(req->r_osd);
504 }
505 
506 static void ceph_osdc_release_request(struct kref *kref)
507 {
508 	struct ceph_osd_request *req = container_of(kref,
509 					    struct ceph_osd_request, r_kref);
510 	unsigned int which;
511 
512 	dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
513 	     req->r_request, req->r_reply);
514 	request_release_checks(req);
515 
516 	if (req->r_request)
517 		ceph_msg_put(req->r_request);
518 	if (req->r_reply)
519 		ceph_msg_put(req->r_reply);
520 
521 	for (which = 0; which < req->r_num_ops; which++)
522 		osd_req_op_data_release(req, which);
523 
524 	target_destroy(&req->r_t);
525 	ceph_put_snap_context(req->r_snapc);
526 
527 	if (req->r_mempool)
528 		mempool_free(req, req->r_osdc->req_mempool);
529 	else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
530 		kmem_cache_free(ceph_osd_request_cache, req);
531 	else
532 		kfree(req);
533 }
534 
535 void ceph_osdc_get_request(struct ceph_osd_request *req)
536 {
537 	dout("%s %p (was %d)\n", __func__, req,
538 	     kref_read(&req->r_kref));
539 	kref_get(&req->r_kref);
540 }
541 EXPORT_SYMBOL(ceph_osdc_get_request);
542 
543 void ceph_osdc_put_request(struct ceph_osd_request *req)
544 {
545 	if (req) {
546 		dout("%s %p (was %d)\n", __func__, req,
547 		     kref_read(&req->r_kref));
548 		kref_put(&req->r_kref, ceph_osdc_release_request);
549 	}
550 }
551 EXPORT_SYMBOL(ceph_osdc_put_request);
552 
553 static void request_init(struct ceph_osd_request *req)
554 {
555 	/* req only, each op is zeroed in osd_req_op_init() */
556 	memset(req, 0, sizeof(*req));
557 
558 	kref_init(&req->r_kref);
559 	init_completion(&req->r_completion);
560 	RB_CLEAR_NODE(&req->r_node);
561 	RB_CLEAR_NODE(&req->r_mc_node);
562 	INIT_LIST_HEAD(&req->r_private_item);
563 
564 	target_init(&req->r_t);
565 }
566 
567 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
568 					       struct ceph_snap_context *snapc,
569 					       unsigned int num_ops,
570 					       bool use_mempool,
571 					       gfp_t gfp_flags)
572 {
573 	struct ceph_osd_request *req;
574 
575 	if (use_mempool) {
576 		BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
577 		req = mempool_alloc(osdc->req_mempool, gfp_flags);
578 	} else if (num_ops <= CEPH_OSD_SLAB_OPS) {
579 		req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
580 	} else {
581 		BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
582 		req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
583 	}
584 	if (unlikely(!req))
585 		return NULL;
586 
587 	request_init(req);
588 	req->r_osdc = osdc;
589 	req->r_mempool = use_mempool;
590 	req->r_num_ops = num_ops;
591 	req->r_snapid = CEPH_NOSNAP;
592 	req->r_snapc = ceph_get_snap_context(snapc);
593 
594 	dout("%s req %p\n", __func__, req);
595 	return req;
596 }
597 EXPORT_SYMBOL(ceph_osdc_alloc_request);
598 
599 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
600 {
601 	return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
602 }
603 
604 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
605 				      int num_request_data_items,
606 				      int num_reply_data_items)
607 {
608 	struct ceph_osd_client *osdc = req->r_osdc;
609 	struct ceph_msg *msg;
610 	int msg_size;
611 
612 	WARN_ON(req->r_request || req->r_reply);
613 	WARN_ON(ceph_oid_empty(&req->r_base_oid));
614 	WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
615 
616 	/* create request message */
617 	msg_size = CEPH_ENCODING_START_BLK_LEN +
618 			CEPH_PGID_ENCODING_LEN + 1; /* spgid */
619 	msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
620 	msg_size += CEPH_ENCODING_START_BLK_LEN +
621 			sizeof(struct ceph_osd_reqid); /* reqid */
622 	msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
623 	msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
624 	msg_size += CEPH_ENCODING_START_BLK_LEN +
625 			ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
626 	msg_size += 4 + req->r_base_oid.name_len; /* oid */
627 	msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
628 	msg_size += 8; /* snapid */
629 	msg_size += 8; /* snap_seq */
630 	msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
631 	msg_size += 4 + 8; /* retry_attempt, features */
632 
633 	if (req->r_mempool)
634 		msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
635 				       num_request_data_items);
636 	else
637 		msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
638 				    num_request_data_items, gfp, true);
639 	if (!msg)
640 		return -ENOMEM;
641 
642 	memset(msg->front.iov_base, 0, msg->front.iov_len);
643 	req->r_request = msg;
644 
645 	/* create reply message */
646 	msg_size = OSD_OPREPLY_FRONT_LEN;
647 	msg_size += req->r_base_oid.name_len;
648 	msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
649 
650 	if (req->r_mempool)
651 		msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
652 				       num_reply_data_items);
653 	else
654 		msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
655 				    num_reply_data_items, gfp, true);
656 	if (!msg)
657 		return -ENOMEM;
658 
659 	req->r_reply = msg;
660 
661 	return 0;
662 }
663 
664 static bool osd_req_opcode_valid(u16 opcode)
665 {
666 	switch (opcode) {
667 #define GENERATE_CASE(op, opcode, str)	case CEPH_OSD_OP_##op: return true;
668 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
669 #undef GENERATE_CASE
670 	default:
671 		return false;
672 	}
673 }
674 
675 static void get_num_data_items(struct ceph_osd_request *req,
676 			       int *num_request_data_items,
677 			       int *num_reply_data_items)
678 {
679 	struct ceph_osd_req_op *op;
680 
681 	*num_request_data_items = 0;
682 	*num_reply_data_items = 0;
683 
684 	for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
685 		switch (op->op) {
686 		/* request */
687 		case CEPH_OSD_OP_WRITE:
688 		case CEPH_OSD_OP_WRITEFULL:
689 		case CEPH_OSD_OP_SETXATTR:
690 		case CEPH_OSD_OP_CMPXATTR:
691 		case CEPH_OSD_OP_NOTIFY_ACK:
692 		case CEPH_OSD_OP_COPY_FROM2:
693 			*num_request_data_items += 1;
694 			break;
695 
696 		/* reply */
697 		case CEPH_OSD_OP_STAT:
698 		case CEPH_OSD_OP_READ:
699 		case CEPH_OSD_OP_SPARSE_READ:
700 		case CEPH_OSD_OP_LIST_WATCHERS:
701 			*num_reply_data_items += 1;
702 			break;
703 
704 		/* both */
705 		case CEPH_OSD_OP_NOTIFY:
706 			*num_request_data_items += 1;
707 			*num_reply_data_items += 1;
708 			break;
709 		case CEPH_OSD_OP_CALL:
710 			*num_request_data_items += 2;
711 			*num_reply_data_items += 1;
712 			break;
713 
714 		default:
715 			WARN_ON(!osd_req_opcode_valid(op->op));
716 			break;
717 		}
718 	}
719 }
720 
721 /*
722  * oid, oloc and OSD op opcode(s) must be filled in before this function
723  * is called.
724  */
725 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
726 {
727 	int num_request_data_items, num_reply_data_items;
728 
729 	get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
730 	return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
731 					  num_reply_data_items);
732 }
733 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
734 
735 /*
736  * This is an osd op init function for opcodes that have no data or
737  * other information associated with them.  It also serves as a
738  * common init routine for all the other init functions, below.
739  */
740 struct ceph_osd_req_op *
741 osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
742 		 u16 opcode, u32 flags)
743 {
744 	struct ceph_osd_req_op *op;
745 
746 	BUG_ON(which >= osd_req->r_num_ops);
747 	BUG_ON(!osd_req_opcode_valid(opcode));
748 
749 	op = &osd_req->r_ops[which];
750 	memset(op, 0, sizeof (*op));
751 	op->op = opcode;
752 	op->flags = flags;
753 
754 	return op;
755 }
756 EXPORT_SYMBOL(osd_req_op_init);
757 
758 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
759 				unsigned int which, u16 opcode,
760 				u64 offset, u64 length,
761 				u64 truncate_size, u32 truncate_seq)
762 {
763 	struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which,
764 						     opcode, 0);
765 	size_t payload_len = 0;
766 
767 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
768 	       opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
769 	       opcode != CEPH_OSD_OP_TRUNCATE && opcode != CEPH_OSD_OP_SPARSE_READ);
770 
771 	op->extent.offset = offset;
772 	op->extent.length = length;
773 	op->extent.truncate_size = truncate_size;
774 	op->extent.truncate_seq = truncate_seq;
775 	if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
776 		payload_len += length;
777 
778 	op->indata_len = payload_len;
779 }
780 EXPORT_SYMBOL(osd_req_op_extent_init);
781 
782 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
783 				unsigned int which, u64 length)
784 {
785 	struct ceph_osd_req_op *op;
786 	u64 previous;
787 
788 	BUG_ON(which >= osd_req->r_num_ops);
789 	op = &osd_req->r_ops[which];
790 	previous = op->extent.length;
791 
792 	if (length == previous)
793 		return;		/* Nothing to do */
794 	BUG_ON(length > previous);
795 
796 	op->extent.length = length;
797 	if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
798 		op->indata_len -= previous - length;
799 }
800 EXPORT_SYMBOL(osd_req_op_extent_update);
801 
802 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
803 				unsigned int which, u64 offset_inc)
804 {
805 	struct ceph_osd_req_op *op, *prev_op;
806 
807 	BUG_ON(which + 1 >= osd_req->r_num_ops);
808 
809 	prev_op = &osd_req->r_ops[which];
810 	op = osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
811 	/* dup previous one */
812 	op->indata_len = prev_op->indata_len;
813 	op->outdata_len = prev_op->outdata_len;
814 	op->extent = prev_op->extent;
815 	/* adjust offset */
816 	op->extent.offset += offset_inc;
817 	op->extent.length -= offset_inc;
818 
819 	if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
820 		op->indata_len -= offset_inc;
821 }
822 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
823 
824 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
825 			const char *class, const char *method)
826 {
827 	struct ceph_osd_req_op *op;
828 	struct ceph_pagelist *pagelist;
829 	size_t payload_len = 0;
830 	size_t size;
831 	int ret;
832 
833 	op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
834 
835 	pagelist = ceph_pagelist_alloc(GFP_NOFS);
836 	if (!pagelist)
837 		return -ENOMEM;
838 
839 	op->cls.class_name = class;
840 	size = strlen(class);
841 	BUG_ON(size > (size_t) U8_MAX);
842 	op->cls.class_len = size;
843 	ret = ceph_pagelist_append(pagelist, class, size);
844 	if (ret)
845 		goto err_pagelist_free;
846 	payload_len += size;
847 
848 	op->cls.method_name = method;
849 	size = strlen(method);
850 	BUG_ON(size > (size_t) U8_MAX);
851 	op->cls.method_len = size;
852 	ret = ceph_pagelist_append(pagelist, method, size);
853 	if (ret)
854 		goto err_pagelist_free;
855 	payload_len += size;
856 
857 	osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
858 	op->indata_len = payload_len;
859 	return 0;
860 
861 err_pagelist_free:
862 	ceph_pagelist_release(pagelist);
863 	return ret;
864 }
865 EXPORT_SYMBOL(osd_req_op_cls_init);
866 
867 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
868 			  u16 opcode, const char *name, const void *value,
869 			  size_t size, u8 cmp_op, u8 cmp_mode)
870 {
871 	struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which,
872 						     opcode, 0);
873 	struct ceph_pagelist *pagelist;
874 	size_t payload_len;
875 	int ret;
876 
877 	BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
878 
879 	pagelist = ceph_pagelist_alloc(GFP_NOFS);
880 	if (!pagelist)
881 		return -ENOMEM;
882 
883 	payload_len = strlen(name);
884 	op->xattr.name_len = payload_len;
885 	ret = ceph_pagelist_append(pagelist, name, payload_len);
886 	if (ret)
887 		goto err_pagelist_free;
888 
889 	op->xattr.value_len = size;
890 	ret = ceph_pagelist_append(pagelist, value, size);
891 	if (ret)
892 		goto err_pagelist_free;
893 	payload_len += size;
894 
895 	op->xattr.cmp_op = cmp_op;
896 	op->xattr.cmp_mode = cmp_mode;
897 
898 	ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
899 	op->indata_len = payload_len;
900 	return 0;
901 
902 err_pagelist_free:
903 	ceph_pagelist_release(pagelist);
904 	return ret;
905 }
906 EXPORT_SYMBOL(osd_req_op_xattr_init);
907 
908 /*
909  * @watch_opcode: CEPH_OSD_WATCH_OP_*
910  */
911 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
912 				  u8 watch_opcode, u64 cookie, u32 gen)
913 {
914 	struct ceph_osd_req_op *op;
915 
916 	op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
917 	op->watch.cookie = cookie;
918 	op->watch.op = watch_opcode;
919 	op->watch.gen = gen;
920 }
921 
922 /*
923  * prot_ver, timeout and notify payload (may be empty) should already be
924  * encoded in @request_pl
925  */
926 static void osd_req_op_notify_init(struct ceph_osd_request *req, int which,
927 				   u64 cookie, struct ceph_pagelist *request_pl)
928 {
929 	struct ceph_osd_req_op *op;
930 
931 	op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
932 	op->notify.cookie = cookie;
933 
934 	ceph_osd_data_pagelist_init(&op->notify.request_data, request_pl);
935 	op->indata_len = request_pl->length;
936 }
937 
938 /*
939  * @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_*
940  */
941 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
942 				unsigned int which,
943 				u64 expected_object_size,
944 				u64 expected_write_size,
945 				u32 flags)
946 {
947 	struct ceph_osd_req_op *op;
948 
949 	op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_SETALLOCHINT, 0);
950 	op->alloc_hint.expected_object_size = expected_object_size;
951 	op->alloc_hint.expected_write_size = expected_write_size;
952 	op->alloc_hint.flags = flags;
953 
954 	/*
955 	 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
956 	 * not worth a feature bit.  Set FAILOK per-op flag to make
957 	 * sure older osds don't trip over an unsupported opcode.
958 	 */
959 	op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
960 }
961 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
962 
963 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
964 				struct ceph_osd_data *osd_data)
965 {
966 	u64 length = ceph_osd_data_length(osd_data);
967 
968 	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
969 		BUG_ON(length > (u64) SIZE_MAX);
970 		if (length)
971 			ceph_msg_data_add_pages(msg, osd_data->pages,
972 					length, osd_data->alignment, false);
973 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
974 		BUG_ON(!length);
975 		ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
976 #ifdef CONFIG_BLOCK
977 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
978 		ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
979 #endif
980 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
981 		ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
982 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_ITER) {
983 		ceph_msg_data_add_iter(msg, &osd_data->iter);
984 	} else {
985 		BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
986 	}
987 }
988 
989 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
990 			     const struct ceph_osd_req_op *src)
991 {
992 	switch (src->op) {
993 	case CEPH_OSD_OP_STAT:
994 		break;
995 	case CEPH_OSD_OP_READ:
996 	case CEPH_OSD_OP_SPARSE_READ:
997 	case CEPH_OSD_OP_WRITE:
998 	case CEPH_OSD_OP_WRITEFULL:
999 	case CEPH_OSD_OP_ZERO:
1000 	case CEPH_OSD_OP_TRUNCATE:
1001 		dst->extent.offset = cpu_to_le64(src->extent.offset);
1002 		dst->extent.length = cpu_to_le64(src->extent.length);
1003 		dst->extent.truncate_size =
1004 			cpu_to_le64(src->extent.truncate_size);
1005 		dst->extent.truncate_seq =
1006 			cpu_to_le32(src->extent.truncate_seq);
1007 		break;
1008 	case CEPH_OSD_OP_CALL:
1009 		dst->cls.class_len = src->cls.class_len;
1010 		dst->cls.method_len = src->cls.method_len;
1011 		dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
1012 		break;
1013 	case CEPH_OSD_OP_WATCH:
1014 		dst->watch.cookie = cpu_to_le64(src->watch.cookie);
1015 		dst->watch.ver = cpu_to_le64(0);
1016 		dst->watch.op = src->watch.op;
1017 		dst->watch.gen = cpu_to_le32(src->watch.gen);
1018 		break;
1019 	case CEPH_OSD_OP_NOTIFY_ACK:
1020 		break;
1021 	case CEPH_OSD_OP_NOTIFY:
1022 		dst->notify.cookie = cpu_to_le64(src->notify.cookie);
1023 		break;
1024 	case CEPH_OSD_OP_LIST_WATCHERS:
1025 		break;
1026 	case CEPH_OSD_OP_SETALLOCHINT:
1027 		dst->alloc_hint.expected_object_size =
1028 		    cpu_to_le64(src->alloc_hint.expected_object_size);
1029 		dst->alloc_hint.expected_write_size =
1030 		    cpu_to_le64(src->alloc_hint.expected_write_size);
1031 		dst->alloc_hint.flags = cpu_to_le32(src->alloc_hint.flags);
1032 		break;
1033 	case CEPH_OSD_OP_SETXATTR:
1034 	case CEPH_OSD_OP_CMPXATTR:
1035 		dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
1036 		dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
1037 		dst->xattr.cmp_op = src->xattr.cmp_op;
1038 		dst->xattr.cmp_mode = src->xattr.cmp_mode;
1039 		break;
1040 	case CEPH_OSD_OP_CREATE:
1041 	case CEPH_OSD_OP_DELETE:
1042 		break;
1043 	case CEPH_OSD_OP_COPY_FROM2:
1044 		dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
1045 		dst->copy_from.src_version =
1046 			cpu_to_le64(src->copy_from.src_version);
1047 		dst->copy_from.flags = src->copy_from.flags;
1048 		dst->copy_from.src_fadvise_flags =
1049 			cpu_to_le32(src->copy_from.src_fadvise_flags);
1050 		break;
1051 	default:
1052 		pr_err("unsupported osd opcode %s\n",
1053 			ceph_osd_op_name(src->op));
1054 		WARN_ON(1);
1055 
1056 		return 0;
1057 	}
1058 
1059 	dst->op = cpu_to_le16(src->op);
1060 	dst->flags = cpu_to_le32(src->flags);
1061 	dst->payload_len = cpu_to_le32(src->indata_len);
1062 
1063 	return src->indata_len;
1064 }
1065 
1066 /*
1067  * build new request AND message, calculate layout, and adjust file
1068  * extent as needed.
1069  *
1070  * if the file was recently truncated, we include information about its
1071  * old and new size so that the object can be updated appropriately.  (we
1072  * avoid synchronously deleting truncated objects because it's slow.)
1073  */
1074 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
1075 					       struct ceph_file_layout *layout,
1076 					       struct ceph_vino vino,
1077 					       u64 off, u64 *plen,
1078 					       unsigned int which, int num_ops,
1079 					       int opcode, int flags,
1080 					       struct ceph_snap_context *snapc,
1081 					       u32 truncate_seq,
1082 					       u64 truncate_size,
1083 					       bool use_mempool)
1084 {
1085 	struct ceph_osd_request *req;
1086 	u64 objnum = 0;
1087 	u64 objoff = 0;
1088 	u64 objlen = 0;
1089 	int r;
1090 
1091 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
1092 	       opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
1093 	       opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE &&
1094 	       opcode != CEPH_OSD_OP_SPARSE_READ);
1095 
1096 	req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1097 					GFP_NOFS);
1098 	if (!req) {
1099 		r = -ENOMEM;
1100 		goto fail;
1101 	}
1102 
1103 	/* calculate max write size */
1104 	r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
1105 	if (r)
1106 		goto fail;
1107 
1108 	if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
1109 		osd_req_op_init(req, which, opcode, 0);
1110 	} else {
1111 		u32 object_size = layout->object_size;
1112 		u32 object_base = off - objoff;
1113 		if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
1114 			if (truncate_size <= object_base) {
1115 				truncate_size = 0;
1116 			} else {
1117 				truncate_size -= object_base;
1118 				if (truncate_size > object_size)
1119 					truncate_size = object_size;
1120 			}
1121 		}
1122 		osd_req_op_extent_init(req, which, opcode, objoff, objlen,
1123 				       truncate_size, truncate_seq);
1124 	}
1125 
1126 	req->r_base_oloc.pool = layout->pool_id;
1127 	req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
1128 	ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
1129 	req->r_flags = flags | osdc->client->options->read_from_replica;
1130 
1131 	req->r_snapid = vino.snap;
1132 	if (flags & CEPH_OSD_FLAG_WRITE)
1133 		req->r_data_offset = off;
1134 
1135 	if (num_ops > 1)
1136 		/*
1137 		 * This is a special case for ceph_writepages_start(), but it
1138 		 * also covers ceph_uninline_data().  If more multi-op request
1139 		 * use cases emerge, we will need a separate helper.
1140 		 */
1141 		r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
1142 	else
1143 		r = ceph_osdc_alloc_messages(req, GFP_NOFS);
1144 	if (r)
1145 		goto fail;
1146 
1147 	return req;
1148 
1149 fail:
1150 	ceph_osdc_put_request(req);
1151 	return ERR_PTR(r);
1152 }
1153 EXPORT_SYMBOL(ceph_osdc_new_request);
1154 
1155 int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
1156 {
1157 	op->extent.sparse_ext_cnt = cnt;
1158 	op->extent.sparse_ext = kmalloc_array(cnt,
1159 					      sizeof(*op->extent.sparse_ext),
1160 					      GFP_NOFS);
1161 	if (!op->extent.sparse_ext)
1162 		return -ENOMEM;
1163 	return 0;
1164 }
1165 EXPORT_SYMBOL(__ceph_alloc_sparse_ext_map);
1166 
1167 /*
1168  * We keep osd requests in an rbtree, sorted by ->r_tid.
1169  */
1170 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1171 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1172 
1173 /*
1174  * Call @fn on each OSD request as long as @fn returns 0.
1175  */
1176 static void for_each_request(struct ceph_osd_client *osdc,
1177 			int (*fn)(struct ceph_osd_request *req, void *arg),
1178 			void *arg)
1179 {
1180 	struct rb_node *n, *p;
1181 
1182 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1183 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1184 
1185 		for (p = rb_first(&osd->o_requests); p; ) {
1186 			struct ceph_osd_request *req =
1187 			    rb_entry(p, struct ceph_osd_request, r_node);
1188 
1189 			p = rb_next(p);
1190 			if (fn(req, arg))
1191 				return;
1192 		}
1193 	}
1194 
1195 	for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1196 		struct ceph_osd_request *req =
1197 		    rb_entry(p, struct ceph_osd_request, r_node);
1198 
1199 		p = rb_next(p);
1200 		if (fn(req, arg))
1201 			return;
1202 	}
1203 }
1204 
1205 static bool osd_homeless(struct ceph_osd *osd)
1206 {
1207 	return osd->o_osd == CEPH_HOMELESS_OSD;
1208 }
1209 
1210 static bool osd_registered(struct ceph_osd *osd)
1211 {
1212 	verify_osdc_locked(osd->o_osdc);
1213 
1214 	return !RB_EMPTY_NODE(&osd->o_node);
1215 }
1216 
1217 /*
1218  * Assumes @osd is zero-initialized.
1219  */
1220 static void osd_init(struct ceph_osd *osd)
1221 {
1222 	refcount_set(&osd->o_ref, 1);
1223 	RB_CLEAR_NODE(&osd->o_node);
1224 	spin_lock_init(&osd->o_requests_lock);
1225 	osd->o_requests = RB_ROOT;
1226 	osd->o_linger_requests = RB_ROOT;
1227 	osd->o_backoff_mappings = RB_ROOT;
1228 	osd->o_backoffs_by_id = RB_ROOT;
1229 	INIT_LIST_HEAD(&osd->o_osd_lru);
1230 	INIT_LIST_HEAD(&osd->o_keepalive_item);
1231 	osd->o_incarnation = 1;
1232 	mutex_init(&osd->lock);
1233 }
1234 
1235 static void ceph_init_sparse_read(struct ceph_sparse_read *sr)
1236 {
1237 	kfree(sr->sr_extent);
1238 	memset(sr, '\0', sizeof(*sr));
1239 	sr->sr_state = CEPH_SPARSE_READ_HDR;
1240 }
1241 
1242 static void osd_cleanup(struct ceph_osd *osd)
1243 {
1244 	WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1245 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1246 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1247 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1248 	WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1249 	WARN_ON(!list_empty(&osd->o_osd_lru));
1250 	WARN_ON(!list_empty(&osd->o_keepalive_item));
1251 
1252 	ceph_init_sparse_read(&osd->o_sparse_read);
1253 
1254 	if (osd->o_auth.authorizer) {
1255 		WARN_ON(osd_homeless(osd));
1256 		ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1257 	}
1258 }
1259 
1260 /*
1261  * Track open sessions with osds.
1262  */
1263 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1264 {
1265 	struct ceph_osd *osd;
1266 
1267 	WARN_ON(onum == CEPH_HOMELESS_OSD);
1268 
1269 	osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1270 	osd_init(osd);
1271 	osd->o_osdc = osdc;
1272 	osd->o_osd = onum;
1273 	osd->o_sparse_op_idx = -1;
1274 
1275 	ceph_init_sparse_read(&osd->o_sparse_read);
1276 
1277 	ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1278 
1279 	return osd;
1280 }
1281 
1282 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1283 {
1284 	if (refcount_inc_not_zero(&osd->o_ref)) {
1285 		dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1286 		     refcount_read(&osd->o_ref));
1287 		return osd;
1288 	} else {
1289 		dout("get_osd %p FAIL\n", osd);
1290 		return NULL;
1291 	}
1292 }
1293 
1294 static void put_osd(struct ceph_osd *osd)
1295 {
1296 	dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1297 	     refcount_read(&osd->o_ref) - 1);
1298 	if (refcount_dec_and_test(&osd->o_ref)) {
1299 		osd_cleanup(osd);
1300 		kfree(osd);
1301 	}
1302 }
1303 
1304 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1305 
1306 static void __move_osd_to_lru(struct ceph_osd *osd)
1307 {
1308 	struct ceph_osd_client *osdc = osd->o_osdc;
1309 
1310 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1311 	BUG_ON(!list_empty(&osd->o_osd_lru));
1312 
1313 	spin_lock(&osdc->osd_lru_lock);
1314 	list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1315 	spin_unlock(&osdc->osd_lru_lock);
1316 
1317 	osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1318 }
1319 
1320 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1321 {
1322 	if (RB_EMPTY_ROOT(&osd->o_requests) &&
1323 	    RB_EMPTY_ROOT(&osd->o_linger_requests))
1324 		__move_osd_to_lru(osd);
1325 }
1326 
1327 static void __remove_osd_from_lru(struct ceph_osd *osd)
1328 {
1329 	struct ceph_osd_client *osdc = osd->o_osdc;
1330 
1331 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1332 
1333 	spin_lock(&osdc->osd_lru_lock);
1334 	if (!list_empty(&osd->o_osd_lru))
1335 		list_del_init(&osd->o_osd_lru);
1336 	spin_unlock(&osdc->osd_lru_lock);
1337 }
1338 
1339 /*
1340  * Close the connection and assign any leftover requests to the
1341  * homeless session.
1342  */
1343 static void close_osd(struct ceph_osd *osd)
1344 {
1345 	struct ceph_osd_client *osdc = osd->o_osdc;
1346 	struct rb_node *n;
1347 
1348 	verify_osdc_wrlocked(osdc);
1349 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1350 
1351 	ceph_con_close(&osd->o_con);
1352 
1353 	for (n = rb_first(&osd->o_requests); n; ) {
1354 		struct ceph_osd_request *req =
1355 		    rb_entry(n, struct ceph_osd_request, r_node);
1356 
1357 		n = rb_next(n); /* unlink_request() */
1358 
1359 		dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1360 		unlink_request(osd, req);
1361 		link_request(&osdc->homeless_osd, req);
1362 	}
1363 	for (n = rb_first(&osd->o_linger_requests); n; ) {
1364 		struct ceph_osd_linger_request *lreq =
1365 		    rb_entry(n, struct ceph_osd_linger_request, node);
1366 
1367 		n = rb_next(n); /* unlink_linger() */
1368 
1369 		dout(" reassigning lreq %p linger_id %llu\n", lreq,
1370 		     lreq->linger_id);
1371 		unlink_linger(osd, lreq);
1372 		link_linger(&osdc->homeless_osd, lreq);
1373 	}
1374 	clear_backoffs(osd);
1375 
1376 	__remove_osd_from_lru(osd);
1377 	erase_osd(&osdc->osds, osd);
1378 	put_osd(osd);
1379 }
1380 
1381 /*
1382  * reset osd connect
1383  */
1384 static int reopen_osd(struct ceph_osd *osd)
1385 {
1386 	struct ceph_entity_addr *peer_addr;
1387 
1388 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1389 
1390 	if (RB_EMPTY_ROOT(&osd->o_requests) &&
1391 	    RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1392 		close_osd(osd);
1393 		return -ENODEV;
1394 	}
1395 
1396 	peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1397 	if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1398 			!ceph_con_opened(&osd->o_con)) {
1399 		struct rb_node *n;
1400 
1401 		dout("osd addr hasn't changed and connection never opened, "
1402 		     "letting msgr retry\n");
1403 		/* touch each r_stamp for handle_timeout()'s benfit */
1404 		for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1405 			struct ceph_osd_request *req =
1406 			    rb_entry(n, struct ceph_osd_request, r_node);
1407 			req->r_stamp = jiffies;
1408 		}
1409 
1410 		return -EAGAIN;
1411 	}
1412 
1413 	ceph_con_close(&osd->o_con);
1414 	ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1415 	osd->o_incarnation++;
1416 
1417 	return 0;
1418 }
1419 
1420 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1421 					  bool wrlocked)
1422 {
1423 	struct ceph_osd *osd;
1424 
1425 	if (wrlocked)
1426 		verify_osdc_wrlocked(osdc);
1427 	else
1428 		verify_osdc_locked(osdc);
1429 
1430 	if (o != CEPH_HOMELESS_OSD)
1431 		osd = lookup_osd(&osdc->osds, o);
1432 	else
1433 		osd = &osdc->homeless_osd;
1434 	if (!osd) {
1435 		if (!wrlocked)
1436 			return ERR_PTR(-EAGAIN);
1437 
1438 		osd = create_osd(osdc, o);
1439 		insert_osd(&osdc->osds, osd);
1440 		ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1441 			      &osdc->osdmap->osd_addr[osd->o_osd]);
1442 	}
1443 
1444 	dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1445 	return osd;
1446 }
1447 
1448 /*
1449  * Create request <-> OSD session relation.
1450  *
1451  * @req has to be assigned a tid, @osd may be homeless.
1452  */
1453 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1454 {
1455 	verify_osd_locked(osd);
1456 	WARN_ON(!req->r_tid || req->r_osd);
1457 	dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1458 	     req, req->r_tid);
1459 
1460 	if (!osd_homeless(osd))
1461 		__remove_osd_from_lru(osd);
1462 	else
1463 		atomic_inc(&osd->o_osdc->num_homeless);
1464 
1465 	get_osd(osd);
1466 	spin_lock(&osd->o_requests_lock);
1467 	insert_request(&osd->o_requests, req);
1468 	spin_unlock(&osd->o_requests_lock);
1469 	req->r_osd = osd;
1470 }
1471 
1472 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1473 {
1474 	verify_osd_locked(osd);
1475 	WARN_ON(req->r_osd != osd);
1476 	dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1477 	     req, req->r_tid);
1478 
1479 	req->r_osd = NULL;
1480 	spin_lock(&osd->o_requests_lock);
1481 	erase_request(&osd->o_requests, req);
1482 	spin_unlock(&osd->o_requests_lock);
1483 	put_osd(osd);
1484 
1485 	if (!osd_homeless(osd))
1486 		maybe_move_osd_to_lru(osd);
1487 	else
1488 		atomic_dec(&osd->o_osdc->num_homeless);
1489 }
1490 
1491 static bool __pool_full(struct ceph_pg_pool_info *pi)
1492 {
1493 	return pi->flags & CEPH_POOL_FLAG_FULL;
1494 }
1495 
1496 static bool have_pool_full(struct ceph_osd_client *osdc)
1497 {
1498 	struct rb_node *n;
1499 
1500 	for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1501 		struct ceph_pg_pool_info *pi =
1502 		    rb_entry(n, struct ceph_pg_pool_info, node);
1503 
1504 		if (__pool_full(pi))
1505 			return true;
1506 	}
1507 
1508 	return false;
1509 }
1510 
1511 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1512 {
1513 	struct ceph_pg_pool_info *pi;
1514 
1515 	pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1516 	if (!pi)
1517 		return false;
1518 
1519 	return __pool_full(pi);
1520 }
1521 
1522 /*
1523  * Returns whether a request should be blocked from being sent
1524  * based on the current osdmap and osd_client settings.
1525  */
1526 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1527 				    const struct ceph_osd_request_target *t,
1528 				    struct ceph_pg_pool_info *pi)
1529 {
1530 	bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1531 	bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1532 		       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1533 		       __pool_full(pi);
1534 
1535 	WARN_ON(pi->id != t->target_oloc.pool);
1536 	return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1537 	       ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1538 	       (osdc->osdmap->epoch < osdc->epoch_barrier);
1539 }
1540 
1541 static int pick_random_replica(const struct ceph_osds *acting)
1542 {
1543 	int i = get_random_u32_below(acting->size);
1544 
1545 	dout("%s picked osd%d, primary osd%d\n", __func__,
1546 	     acting->osds[i], acting->primary);
1547 	return i;
1548 }
1549 
1550 /*
1551  * Picks the closest replica based on client's location given by
1552  * crush_location option.  Prefers the primary if the locality is
1553  * the same.
1554  */
1555 static int pick_closest_replica(struct ceph_osd_client *osdc,
1556 				const struct ceph_osds *acting)
1557 {
1558 	struct ceph_options *opt = osdc->client->options;
1559 	int best_i, best_locality;
1560 	int i = 0, locality;
1561 
1562 	do {
1563 		locality = ceph_get_crush_locality(osdc->osdmap,
1564 						   acting->osds[i],
1565 						   &opt->crush_locs);
1566 		if (i == 0 ||
1567 		    (locality >= 0 && best_locality < 0) ||
1568 		    (locality >= 0 && best_locality >= 0 &&
1569 		     locality < best_locality)) {
1570 			best_i = i;
1571 			best_locality = locality;
1572 		}
1573 	} while (++i < acting->size);
1574 
1575 	dout("%s picked osd%d with locality %d, primary osd%d\n", __func__,
1576 	     acting->osds[best_i], best_locality, acting->primary);
1577 	return best_i;
1578 }
1579 
1580 enum calc_target_result {
1581 	CALC_TARGET_NO_ACTION = 0,
1582 	CALC_TARGET_NEED_RESEND,
1583 	CALC_TARGET_POOL_DNE,
1584 };
1585 
1586 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1587 					   struct ceph_osd_request_target *t,
1588 					   bool any_change)
1589 {
1590 	struct ceph_pg_pool_info *pi;
1591 	struct ceph_pg pgid, last_pgid;
1592 	struct ceph_osds up, acting;
1593 	bool is_read = t->flags & CEPH_OSD_FLAG_READ;
1594 	bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
1595 	bool force_resend = false;
1596 	bool unpaused = false;
1597 	bool legacy_change = false;
1598 	bool split = false;
1599 	bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1600 	bool recovery_deletes = ceph_osdmap_flag(osdc,
1601 						 CEPH_OSDMAP_RECOVERY_DELETES);
1602 	enum calc_target_result ct_res;
1603 
1604 	t->epoch = osdc->osdmap->epoch;
1605 	pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1606 	if (!pi) {
1607 		t->osd = CEPH_HOMELESS_OSD;
1608 		ct_res = CALC_TARGET_POOL_DNE;
1609 		goto out;
1610 	}
1611 
1612 	if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1613 		if (t->last_force_resend < pi->last_force_request_resend) {
1614 			t->last_force_resend = pi->last_force_request_resend;
1615 			force_resend = true;
1616 		} else if (t->last_force_resend == 0) {
1617 			force_resend = true;
1618 		}
1619 	}
1620 
1621 	/* apply tiering */
1622 	ceph_oid_copy(&t->target_oid, &t->base_oid);
1623 	ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1624 	if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1625 		if (is_read && pi->read_tier >= 0)
1626 			t->target_oloc.pool = pi->read_tier;
1627 		if (is_write && pi->write_tier >= 0)
1628 			t->target_oloc.pool = pi->write_tier;
1629 
1630 		pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1631 		if (!pi) {
1632 			t->osd = CEPH_HOMELESS_OSD;
1633 			ct_res = CALC_TARGET_POOL_DNE;
1634 			goto out;
1635 		}
1636 	}
1637 
1638 	__ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
1639 	last_pgid.pool = pgid.pool;
1640 	last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1641 
1642 	ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1643 	if (any_change &&
1644 	    ceph_is_new_interval(&t->acting,
1645 				 &acting,
1646 				 &t->up,
1647 				 &up,
1648 				 t->size,
1649 				 pi->size,
1650 				 t->min_size,
1651 				 pi->min_size,
1652 				 t->pg_num,
1653 				 pi->pg_num,
1654 				 t->sort_bitwise,
1655 				 sort_bitwise,
1656 				 t->recovery_deletes,
1657 				 recovery_deletes,
1658 				 &last_pgid))
1659 		force_resend = true;
1660 
1661 	if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1662 		t->paused = false;
1663 		unpaused = true;
1664 	}
1665 	legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1666 			ceph_osds_changed(&t->acting, &acting,
1667 					  t->used_replica || any_change);
1668 	if (t->pg_num)
1669 		split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1670 
1671 	if (legacy_change || force_resend || split) {
1672 		t->pgid = pgid; /* struct */
1673 		ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1674 		ceph_osds_copy(&t->acting, &acting);
1675 		ceph_osds_copy(&t->up, &up);
1676 		t->size = pi->size;
1677 		t->min_size = pi->min_size;
1678 		t->pg_num = pi->pg_num;
1679 		t->pg_num_mask = pi->pg_num_mask;
1680 		t->sort_bitwise = sort_bitwise;
1681 		t->recovery_deletes = recovery_deletes;
1682 
1683 		if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS |
1684 				 CEPH_OSD_FLAG_LOCALIZE_READS)) &&
1685 		    !is_write && pi->type == CEPH_POOL_TYPE_REP &&
1686 		    acting.size > 1) {
1687 			int pos;
1688 
1689 			WARN_ON(!is_read || acting.osds[0] != acting.primary);
1690 			if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) {
1691 				pos = pick_random_replica(&acting);
1692 			} else {
1693 				pos = pick_closest_replica(osdc, &acting);
1694 			}
1695 			t->osd = acting.osds[pos];
1696 			t->used_replica = pos > 0;
1697 		} else {
1698 			t->osd = acting.primary;
1699 			t->used_replica = false;
1700 		}
1701 	}
1702 
1703 	if (unpaused || legacy_change || force_resend || split)
1704 		ct_res = CALC_TARGET_NEED_RESEND;
1705 	else
1706 		ct_res = CALC_TARGET_NO_ACTION;
1707 
1708 out:
1709 	dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1710 	     legacy_change, force_resend, split, ct_res, t->osd);
1711 	return ct_res;
1712 }
1713 
1714 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1715 {
1716 	struct ceph_spg_mapping *spg;
1717 
1718 	spg = kmalloc(sizeof(*spg), GFP_NOIO);
1719 	if (!spg)
1720 		return NULL;
1721 
1722 	RB_CLEAR_NODE(&spg->node);
1723 	spg->backoffs = RB_ROOT;
1724 	return spg;
1725 }
1726 
1727 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1728 {
1729 	WARN_ON(!RB_EMPTY_NODE(&spg->node));
1730 	WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1731 
1732 	kfree(spg);
1733 }
1734 
1735 /*
1736  * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1737  * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
1738  * defined only within a specific spgid; it does not pass anything to
1739  * children on split, or to another primary.
1740  */
1741 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1742 		 RB_BYPTR, const struct ceph_spg *, node)
1743 
1744 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1745 {
1746 	return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1747 }
1748 
1749 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1750 				   void **pkey, size_t *pkey_len)
1751 {
1752 	if (hoid->key_len) {
1753 		*pkey = hoid->key;
1754 		*pkey_len = hoid->key_len;
1755 	} else {
1756 		*pkey = hoid->oid;
1757 		*pkey_len = hoid->oid_len;
1758 	}
1759 }
1760 
1761 static int compare_names(const void *name1, size_t name1_len,
1762 			 const void *name2, size_t name2_len)
1763 {
1764 	int ret;
1765 
1766 	ret = memcmp(name1, name2, min(name1_len, name2_len));
1767 	if (!ret) {
1768 		if (name1_len < name2_len)
1769 			ret = -1;
1770 		else if (name1_len > name2_len)
1771 			ret = 1;
1772 	}
1773 	return ret;
1774 }
1775 
1776 static int hoid_compare(const struct ceph_hobject_id *lhs,
1777 			const struct ceph_hobject_id *rhs)
1778 {
1779 	void *effective_key1, *effective_key2;
1780 	size_t effective_key1_len, effective_key2_len;
1781 	int ret;
1782 
1783 	if (lhs->is_max < rhs->is_max)
1784 		return -1;
1785 	if (lhs->is_max > rhs->is_max)
1786 		return 1;
1787 
1788 	if (lhs->pool < rhs->pool)
1789 		return -1;
1790 	if (lhs->pool > rhs->pool)
1791 		return 1;
1792 
1793 	if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1794 		return -1;
1795 	if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1796 		return 1;
1797 
1798 	ret = compare_names(lhs->nspace, lhs->nspace_len,
1799 			    rhs->nspace, rhs->nspace_len);
1800 	if (ret)
1801 		return ret;
1802 
1803 	hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1804 	hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1805 	ret = compare_names(effective_key1, effective_key1_len,
1806 			    effective_key2, effective_key2_len);
1807 	if (ret)
1808 		return ret;
1809 
1810 	ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1811 	if (ret)
1812 		return ret;
1813 
1814 	if (lhs->snapid < rhs->snapid)
1815 		return -1;
1816 	if (lhs->snapid > rhs->snapid)
1817 		return 1;
1818 
1819 	return 0;
1820 }
1821 
1822 /*
1823  * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1824  * compat stuff here.
1825  *
1826  * Assumes @hoid is zero-initialized.
1827  */
1828 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1829 {
1830 	u8 struct_v;
1831 	u32 struct_len;
1832 	int ret;
1833 
1834 	ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1835 				  &struct_len);
1836 	if (ret)
1837 		return ret;
1838 
1839 	if (struct_v < 4) {
1840 		pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1841 		goto e_inval;
1842 	}
1843 
1844 	hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1845 						GFP_NOIO);
1846 	if (IS_ERR(hoid->key)) {
1847 		ret = PTR_ERR(hoid->key);
1848 		hoid->key = NULL;
1849 		return ret;
1850 	}
1851 
1852 	hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1853 						GFP_NOIO);
1854 	if (IS_ERR(hoid->oid)) {
1855 		ret = PTR_ERR(hoid->oid);
1856 		hoid->oid = NULL;
1857 		return ret;
1858 	}
1859 
1860 	ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1861 	ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1862 	ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1863 
1864 	hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1865 						   GFP_NOIO);
1866 	if (IS_ERR(hoid->nspace)) {
1867 		ret = PTR_ERR(hoid->nspace);
1868 		hoid->nspace = NULL;
1869 		return ret;
1870 	}
1871 
1872 	ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1873 
1874 	ceph_hoid_build_hash_cache(hoid);
1875 	return 0;
1876 
1877 e_inval:
1878 	return -EINVAL;
1879 }
1880 
1881 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1882 {
1883 	return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1884 	       4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1885 }
1886 
1887 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1888 {
1889 	ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1890 	ceph_encode_string(p, end, hoid->key, hoid->key_len);
1891 	ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1892 	ceph_encode_64(p, hoid->snapid);
1893 	ceph_encode_32(p, hoid->hash);
1894 	ceph_encode_8(p, hoid->is_max);
1895 	ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1896 	ceph_encode_64(p, hoid->pool);
1897 }
1898 
1899 static void free_hoid(struct ceph_hobject_id *hoid)
1900 {
1901 	if (hoid) {
1902 		kfree(hoid->key);
1903 		kfree(hoid->oid);
1904 		kfree(hoid->nspace);
1905 		kfree(hoid);
1906 	}
1907 }
1908 
1909 static struct ceph_osd_backoff *alloc_backoff(void)
1910 {
1911 	struct ceph_osd_backoff *backoff;
1912 
1913 	backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1914 	if (!backoff)
1915 		return NULL;
1916 
1917 	RB_CLEAR_NODE(&backoff->spg_node);
1918 	RB_CLEAR_NODE(&backoff->id_node);
1919 	return backoff;
1920 }
1921 
1922 static void free_backoff(struct ceph_osd_backoff *backoff)
1923 {
1924 	WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1925 	WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1926 
1927 	free_hoid(backoff->begin);
1928 	free_hoid(backoff->end);
1929 	kfree(backoff);
1930 }
1931 
1932 /*
1933  * Within a specific spgid, backoffs are managed by ->begin hoid.
1934  */
1935 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1936 			RB_BYVAL, spg_node);
1937 
1938 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1939 					    const struct ceph_hobject_id *hoid)
1940 {
1941 	struct rb_node *n = root->rb_node;
1942 
1943 	while (n) {
1944 		struct ceph_osd_backoff *cur =
1945 		    rb_entry(n, struct ceph_osd_backoff, spg_node);
1946 		int cmp;
1947 
1948 		cmp = hoid_compare(hoid, cur->begin);
1949 		if (cmp < 0) {
1950 			n = n->rb_left;
1951 		} else if (cmp > 0) {
1952 			if (hoid_compare(hoid, cur->end) < 0)
1953 				return cur;
1954 
1955 			n = n->rb_right;
1956 		} else {
1957 			return cur;
1958 		}
1959 	}
1960 
1961 	return NULL;
1962 }
1963 
1964 /*
1965  * Each backoff has a unique id within its OSD session.
1966  */
1967 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1968 
1969 static void clear_backoffs(struct ceph_osd *osd)
1970 {
1971 	while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1972 		struct ceph_spg_mapping *spg =
1973 		    rb_entry(rb_first(&osd->o_backoff_mappings),
1974 			     struct ceph_spg_mapping, node);
1975 
1976 		while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1977 			struct ceph_osd_backoff *backoff =
1978 			    rb_entry(rb_first(&spg->backoffs),
1979 				     struct ceph_osd_backoff, spg_node);
1980 
1981 			erase_backoff(&spg->backoffs, backoff);
1982 			erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1983 			free_backoff(backoff);
1984 		}
1985 		erase_spg_mapping(&osd->o_backoff_mappings, spg);
1986 		free_spg_mapping(spg);
1987 	}
1988 }
1989 
1990 /*
1991  * Set up a temporary, non-owning view into @t.
1992  */
1993 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1994 				  const struct ceph_osd_request_target *t)
1995 {
1996 	hoid->key = NULL;
1997 	hoid->key_len = 0;
1998 	hoid->oid = t->target_oid.name;
1999 	hoid->oid_len = t->target_oid.name_len;
2000 	hoid->snapid = CEPH_NOSNAP;
2001 	hoid->hash = t->pgid.seed;
2002 	hoid->is_max = false;
2003 	if (t->target_oloc.pool_ns) {
2004 		hoid->nspace = t->target_oloc.pool_ns->str;
2005 		hoid->nspace_len = t->target_oloc.pool_ns->len;
2006 	} else {
2007 		hoid->nspace = NULL;
2008 		hoid->nspace_len = 0;
2009 	}
2010 	hoid->pool = t->target_oloc.pool;
2011 	ceph_hoid_build_hash_cache(hoid);
2012 }
2013 
2014 static bool should_plug_request(struct ceph_osd_request *req)
2015 {
2016 	struct ceph_osd *osd = req->r_osd;
2017 	struct ceph_spg_mapping *spg;
2018 	struct ceph_osd_backoff *backoff;
2019 	struct ceph_hobject_id hoid;
2020 
2021 	spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
2022 	if (!spg)
2023 		return false;
2024 
2025 	hoid_fill_from_target(&hoid, &req->r_t);
2026 	backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
2027 	if (!backoff)
2028 		return false;
2029 
2030 	dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
2031 	     __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
2032 	     backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
2033 	return true;
2034 }
2035 
2036 /*
2037  * Keep get_num_data_items() in sync with this function.
2038  */
2039 static void setup_request_data(struct ceph_osd_request *req)
2040 {
2041 	struct ceph_msg *request_msg = req->r_request;
2042 	struct ceph_msg *reply_msg = req->r_reply;
2043 	struct ceph_osd_req_op *op;
2044 
2045 	if (req->r_request->num_data_items || req->r_reply->num_data_items)
2046 		return;
2047 
2048 	WARN_ON(request_msg->data_length || reply_msg->data_length);
2049 	for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
2050 		switch (op->op) {
2051 		/* request */
2052 		case CEPH_OSD_OP_WRITE:
2053 		case CEPH_OSD_OP_WRITEFULL:
2054 			WARN_ON(op->indata_len != op->extent.length);
2055 			ceph_osdc_msg_data_add(request_msg,
2056 					       &op->extent.osd_data);
2057 			break;
2058 		case CEPH_OSD_OP_SETXATTR:
2059 		case CEPH_OSD_OP_CMPXATTR:
2060 			WARN_ON(op->indata_len != op->xattr.name_len +
2061 						  op->xattr.value_len);
2062 			ceph_osdc_msg_data_add(request_msg,
2063 					       &op->xattr.osd_data);
2064 			break;
2065 		case CEPH_OSD_OP_NOTIFY_ACK:
2066 			ceph_osdc_msg_data_add(request_msg,
2067 					       &op->notify_ack.request_data);
2068 			break;
2069 		case CEPH_OSD_OP_COPY_FROM2:
2070 			ceph_osdc_msg_data_add(request_msg,
2071 					       &op->copy_from.osd_data);
2072 			break;
2073 
2074 		/* reply */
2075 		case CEPH_OSD_OP_STAT:
2076 			ceph_osdc_msg_data_add(reply_msg,
2077 					       &op->raw_data_in);
2078 			break;
2079 		case CEPH_OSD_OP_READ:
2080 		case CEPH_OSD_OP_SPARSE_READ:
2081 			ceph_osdc_msg_data_add(reply_msg,
2082 					       &op->extent.osd_data);
2083 			break;
2084 		case CEPH_OSD_OP_LIST_WATCHERS:
2085 			ceph_osdc_msg_data_add(reply_msg,
2086 					       &op->list_watchers.response_data);
2087 			break;
2088 
2089 		/* both */
2090 		case CEPH_OSD_OP_CALL:
2091 			WARN_ON(op->indata_len != op->cls.class_len +
2092 						  op->cls.method_len +
2093 						  op->cls.indata_len);
2094 			ceph_osdc_msg_data_add(request_msg,
2095 					       &op->cls.request_info);
2096 			/* optional, can be NONE */
2097 			ceph_osdc_msg_data_add(request_msg,
2098 					       &op->cls.request_data);
2099 			/* optional, can be NONE */
2100 			ceph_osdc_msg_data_add(reply_msg,
2101 					       &op->cls.response_data);
2102 			break;
2103 		case CEPH_OSD_OP_NOTIFY:
2104 			ceph_osdc_msg_data_add(request_msg,
2105 					       &op->notify.request_data);
2106 			ceph_osdc_msg_data_add(reply_msg,
2107 					       &op->notify.response_data);
2108 			break;
2109 		}
2110 	}
2111 }
2112 
2113 static void encode_pgid(void **p, const struct ceph_pg *pgid)
2114 {
2115 	ceph_encode_8(p, 1);
2116 	ceph_encode_64(p, pgid->pool);
2117 	ceph_encode_32(p, pgid->seed);
2118 	ceph_encode_32(p, -1); /* preferred */
2119 }
2120 
2121 static void encode_spgid(void **p, const struct ceph_spg *spgid)
2122 {
2123 	ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
2124 	encode_pgid(p, &spgid->pgid);
2125 	ceph_encode_8(p, spgid->shard);
2126 }
2127 
2128 static void encode_oloc(void **p, void *end,
2129 			const struct ceph_object_locator *oloc)
2130 {
2131 	ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
2132 	ceph_encode_64(p, oloc->pool);
2133 	ceph_encode_32(p, -1); /* preferred */
2134 	ceph_encode_32(p, 0);  /* key len */
2135 	if (oloc->pool_ns)
2136 		ceph_encode_string(p, end, oloc->pool_ns->str,
2137 				   oloc->pool_ns->len);
2138 	else
2139 		ceph_encode_32(p, 0);
2140 }
2141 
2142 static void encode_request_partial(struct ceph_osd_request *req,
2143 				   struct ceph_msg *msg)
2144 {
2145 	void *p = msg->front.iov_base;
2146 	void *const end = p + msg->front_alloc_len;
2147 	u32 data_len = 0;
2148 	int i;
2149 
2150 	if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
2151 		/* snapshots aren't writeable */
2152 		WARN_ON(req->r_snapid != CEPH_NOSNAP);
2153 	} else {
2154 		WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
2155 			req->r_data_offset || req->r_snapc);
2156 	}
2157 
2158 	setup_request_data(req);
2159 
2160 	encode_spgid(&p, &req->r_t.spgid); /* actual spg */
2161 	ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
2162 	ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
2163 	ceph_encode_32(&p, req->r_flags);
2164 
2165 	/* reqid */
2166 	ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
2167 	memset(p, 0, sizeof(struct ceph_osd_reqid));
2168 	p += sizeof(struct ceph_osd_reqid);
2169 
2170 	/* trace */
2171 	memset(p, 0, sizeof(struct ceph_blkin_trace_info));
2172 	p += sizeof(struct ceph_blkin_trace_info);
2173 
2174 	ceph_encode_32(&p, 0); /* client_inc, always 0 */
2175 	ceph_encode_timespec64(p, &req->r_mtime);
2176 	p += sizeof(struct ceph_timespec);
2177 
2178 	encode_oloc(&p, end, &req->r_t.target_oloc);
2179 	ceph_encode_string(&p, end, req->r_t.target_oid.name,
2180 			   req->r_t.target_oid.name_len);
2181 
2182 	/* ops, can imply data */
2183 	ceph_encode_16(&p, req->r_num_ops);
2184 	for (i = 0; i < req->r_num_ops; i++) {
2185 		data_len += osd_req_encode_op(p, &req->r_ops[i]);
2186 		p += sizeof(struct ceph_osd_op);
2187 	}
2188 
2189 	ceph_encode_64(&p, req->r_snapid); /* snapid */
2190 	if (req->r_snapc) {
2191 		ceph_encode_64(&p, req->r_snapc->seq);
2192 		ceph_encode_32(&p, req->r_snapc->num_snaps);
2193 		for (i = 0; i < req->r_snapc->num_snaps; i++)
2194 			ceph_encode_64(&p, req->r_snapc->snaps[i]);
2195 	} else {
2196 		ceph_encode_64(&p, 0); /* snap_seq */
2197 		ceph_encode_32(&p, 0); /* snaps len */
2198 	}
2199 
2200 	ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
2201 	BUG_ON(p > end - 8); /* space for features */
2202 
2203 	msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
2204 	/* front_len is finalized in encode_request_finish() */
2205 	msg->front.iov_len = p - msg->front.iov_base;
2206 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2207 	msg->hdr.data_len = cpu_to_le32(data_len);
2208 	/*
2209 	 * The header "data_off" is a hint to the receiver allowing it
2210 	 * to align received data into its buffers such that there's no
2211 	 * need to re-copy it before writing it to disk (direct I/O).
2212 	 */
2213 	msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
2214 
2215 	dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
2216 	     req->r_t.target_oid.name, req->r_t.target_oid.name_len);
2217 }
2218 
2219 static void encode_request_finish(struct ceph_msg *msg)
2220 {
2221 	void *p = msg->front.iov_base;
2222 	void *const partial_end = p + msg->front.iov_len;
2223 	void *const end = p + msg->front_alloc_len;
2224 
2225 	if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
2226 		/* luminous OSD -- encode features and be done */
2227 		p = partial_end;
2228 		ceph_encode_64(&p, msg->con->peer_features);
2229 	} else {
2230 		struct {
2231 			char spgid[CEPH_ENCODING_START_BLK_LEN +
2232 				   CEPH_PGID_ENCODING_LEN + 1];
2233 			__le32 hash;
2234 			__le32 epoch;
2235 			__le32 flags;
2236 			char reqid[CEPH_ENCODING_START_BLK_LEN +
2237 				   sizeof(struct ceph_osd_reqid)];
2238 			char trace[sizeof(struct ceph_blkin_trace_info)];
2239 			__le32 client_inc;
2240 			struct ceph_timespec mtime;
2241 		} __packed head;
2242 		struct ceph_pg pgid;
2243 		void *oloc, *oid, *tail;
2244 		int oloc_len, oid_len, tail_len;
2245 		int len;
2246 
2247 		/*
2248 		 * Pre-luminous OSD -- reencode v8 into v4 using @head
2249 		 * as a temporary buffer.  Encode the raw PG; the rest
2250 		 * is just a matter of moving oloc, oid and tail blobs
2251 		 * around.
2252 		 */
2253 		memcpy(&head, p, sizeof(head));
2254 		p += sizeof(head);
2255 
2256 		oloc = p;
2257 		p += CEPH_ENCODING_START_BLK_LEN;
2258 		pgid.pool = ceph_decode_64(&p);
2259 		p += 4 + 4; /* preferred, key len */
2260 		len = ceph_decode_32(&p);
2261 		p += len;   /* nspace */
2262 		oloc_len = p - oloc;
2263 
2264 		oid = p;
2265 		len = ceph_decode_32(&p);
2266 		p += len;
2267 		oid_len = p - oid;
2268 
2269 		tail = p;
2270 		tail_len = partial_end - p;
2271 
2272 		p = msg->front.iov_base;
2273 		ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
2274 		ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2275 		ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2276 		ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2277 
2278 		/* reassert_version */
2279 		memset(p, 0, sizeof(struct ceph_eversion));
2280 		p += sizeof(struct ceph_eversion);
2281 
2282 		BUG_ON(p >= oloc);
2283 		memmove(p, oloc, oloc_len);
2284 		p += oloc_len;
2285 
2286 		pgid.seed = le32_to_cpu(head.hash);
2287 		encode_pgid(&p, &pgid); /* raw pg */
2288 
2289 		BUG_ON(p >= oid);
2290 		memmove(p, oid, oid_len);
2291 		p += oid_len;
2292 
2293 		/* tail -- ops, snapid, snapc, retry_attempt */
2294 		BUG_ON(p >= tail);
2295 		memmove(p, tail, tail_len);
2296 		p += tail_len;
2297 
2298 		msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2299 	}
2300 
2301 	BUG_ON(p > end);
2302 	msg->front.iov_len = p - msg->front.iov_base;
2303 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2304 
2305 	dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2306 	     le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2307 	     le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2308 	     le16_to_cpu(msg->hdr.version));
2309 }
2310 
2311 /*
2312  * @req has to be assigned a tid and registered.
2313  */
2314 static void send_request(struct ceph_osd_request *req)
2315 {
2316 	struct ceph_osd *osd = req->r_osd;
2317 
2318 	verify_osd_locked(osd);
2319 	WARN_ON(osd->o_osd != req->r_t.osd);
2320 
2321 	/* backoff? */
2322 	if (should_plug_request(req))
2323 		return;
2324 
2325 	/*
2326 	 * We may have a previously queued request message hanging
2327 	 * around.  Cancel it to avoid corrupting the msgr.
2328 	 */
2329 	if (req->r_sent)
2330 		ceph_msg_revoke(req->r_request);
2331 
2332 	req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2333 	if (req->r_attempts)
2334 		req->r_flags |= CEPH_OSD_FLAG_RETRY;
2335 	else
2336 		WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2337 
2338 	encode_request_partial(req, req->r_request);
2339 
2340 	dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2341 	     __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2342 	     req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2343 	     req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2344 	     req->r_attempts);
2345 
2346 	req->r_t.paused = false;
2347 	req->r_stamp = jiffies;
2348 	req->r_attempts++;
2349 
2350 	req->r_sent = osd->o_incarnation;
2351 	req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2352 	ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2353 }
2354 
2355 static void maybe_request_map(struct ceph_osd_client *osdc)
2356 {
2357 	bool continuous = false;
2358 
2359 	verify_osdc_locked(osdc);
2360 	WARN_ON(!osdc->osdmap->epoch);
2361 
2362 	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2363 	    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2364 	    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2365 		dout("%s osdc %p continuous\n", __func__, osdc);
2366 		continuous = true;
2367 	} else {
2368 		dout("%s osdc %p onetime\n", __func__, osdc);
2369 	}
2370 
2371 	if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2372 			       osdc->osdmap->epoch + 1, continuous))
2373 		ceph_monc_renew_subs(&osdc->client->monc);
2374 }
2375 
2376 static void complete_request(struct ceph_osd_request *req, int err);
2377 static void send_map_check(struct ceph_osd_request *req);
2378 
2379 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2380 {
2381 	struct ceph_osd_client *osdc = req->r_osdc;
2382 	struct ceph_osd *osd;
2383 	enum calc_target_result ct_res;
2384 	int err = 0;
2385 	bool need_send = false;
2386 	bool promoted = false;
2387 
2388 	WARN_ON(req->r_tid);
2389 	dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2390 
2391 again:
2392 	ct_res = calc_target(osdc, &req->r_t, false);
2393 	if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2394 		goto promote;
2395 
2396 	osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2397 	if (IS_ERR(osd)) {
2398 		WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2399 		goto promote;
2400 	}
2401 
2402 	if (osdc->abort_err) {
2403 		dout("req %p abort_err %d\n", req, osdc->abort_err);
2404 		err = osdc->abort_err;
2405 	} else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2406 		dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2407 		     osdc->epoch_barrier);
2408 		req->r_t.paused = true;
2409 		maybe_request_map(osdc);
2410 	} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2411 		   ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2412 		dout("req %p pausewr\n", req);
2413 		req->r_t.paused = true;
2414 		maybe_request_map(osdc);
2415 	} else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2416 		   ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2417 		dout("req %p pauserd\n", req);
2418 		req->r_t.paused = true;
2419 		maybe_request_map(osdc);
2420 	} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2421 		   !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2422 				     CEPH_OSD_FLAG_FULL_FORCE)) &&
2423 		   (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2424 		    pool_full(osdc, req->r_t.base_oloc.pool))) {
2425 		dout("req %p full/pool_full\n", req);
2426 		if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2427 			err = -ENOSPC;
2428 		} else {
2429 			if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL))
2430 				pr_warn_ratelimited("cluster is full (osdmap FULL)\n");
2431 			else
2432 				pr_warn_ratelimited("pool %lld is full or reached quota\n",
2433 						    req->r_t.base_oloc.pool);
2434 			req->r_t.paused = true;
2435 			maybe_request_map(osdc);
2436 		}
2437 	} else if (!osd_homeless(osd)) {
2438 		need_send = true;
2439 	} else {
2440 		maybe_request_map(osdc);
2441 	}
2442 
2443 	mutex_lock(&osd->lock);
2444 	/*
2445 	 * Assign the tid atomically with send_request() to protect
2446 	 * multiple writes to the same object from racing with each
2447 	 * other, resulting in out of order ops on the OSDs.
2448 	 */
2449 	req->r_tid = atomic64_inc_return(&osdc->last_tid);
2450 	link_request(osd, req);
2451 	if (need_send)
2452 		send_request(req);
2453 	else if (err)
2454 		complete_request(req, err);
2455 	mutex_unlock(&osd->lock);
2456 
2457 	if (!err && ct_res == CALC_TARGET_POOL_DNE)
2458 		send_map_check(req);
2459 
2460 	if (promoted)
2461 		downgrade_write(&osdc->lock);
2462 	return;
2463 
2464 promote:
2465 	up_read(&osdc->lock);
2466 	down_write(&osdc->lock);
2467 	wrlocked = true;
2468 	promoted = true;
2469 	goto again;
2470 }
2471 
2472 static void account_request(struct ceph_osd_request *req)
2473 {
2474 	WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2475 	WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2476 
2477 	req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2478 	atomic_inc(&req->r_osdc->num_requests);
2479 
2480 	req->r_start_stamp = jiffies;
2481 	req->r_start_latency = ktime_get();
2482 }
2483 
2484 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2485 {
2486 	ceph_osdc_get_request(req);
2487 	account_request(req);
2488 	__submit_request(req, wrlocked);
2489 }
2490 
2491 static void finish_request(struct ceph_osd_request *req)
2492 {
2493 	struct ceph_osd_client *osdc = req->r_osdc;
2494 
2495 	WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2496 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2497 
2498 	req->r_end_latency = ktime_get();
2499 
2500 	if (req->r_osd) {
2501 		ceph_init_sparse_read(&req->r_osd->o_sparse_read);
2502 		unlink_request(req->r_osd, req);
2503 	}
2504 	atomic_dec(&osdc->num_requests);
2505 
2506 	/*
2507 	 * If an OSD has failed or returned and a request has been sent
2508 	 * twice, it's possible to get a reply and end up here while the
2509 	 * request message is queued for delivery.  We will ignore the
2510 	 * reply, so not a big deal, but better to try and catch it.
2511 	 */
2512 	ceph_msg_revoke(req->r_request);
2513 	ceph_msg_revoke_incoming(req->r_reply);
2514 }
2515 
2516 static void __complete_request(struct ceph_osd_request *req)
2517 {
2518 	dout("%s req %p tid %llu cb %ps result %d\n", __func__, req,
2519 	     req->r_tid, req->r_callback, req->r_result);
2520 
2521 	if (req->r_callback)
2522 		req->r_callback(req);
2523 	complete_all(&req->r_completion);
2524 	ceph_osdc_put_request(req);
2525 }
2526 
2527 static void complete_request_workfn(struct work_struct *work)
2528 {
2529 	struct ceph_osd_request *req =
2530 	    container_of(work, struct ceph_osd_request, r_complete_work);
2531 
2532 	__complete_request(req);
2533 }
2534 
2535 /*
2536  * This is open-coded in handle_reply().
2537  */
2538 static void complete_request(struct ceph_osd_request *req, int err)
2539 {
2540 	dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2541 
2542 	req->r_result = err;
2543 	finish_request(req);
2544 
2545 	INIT_WORK(&req->r_complete_work, complete_request_workfn);
2546 	queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
2547 }
2548 
2549 static void cancel_map_check(struct ceph_osd_request *req)
2550 {
2551 	struct ceph_osd_client *osdc = req->r_osdc;
2552 	struct ceph_osd_request *lookup_req;
2553 
2554 	verify_osdc_wrlocked(osdc);
2555 
2556 	lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2557 	if (!lookup_req)
2558 		return;
2559 
2560 	WARN_ON(lookup_req != req);
2561 	erase_request_mc(&osdc->map_checks, req);
2562 	ceph_osdc_put_request(req);
2563 }
2564 
2565 static void cancel_request(struct ceph_osd_request *req)
2566 {
2567 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2568 
2569 	cancel_map_check(req);
2570 	finish_request(req);
2571 	complete_all(&req->r_completion);
2572 	ceph_osdc_put_request(req);
2573 }
2574 
2575 static void abort_request(struct ceph_osd_request *req, int err)
2576 {
2577 	dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2578 
2579 	cancel_map_check(req);
2580 	complete_request(req, err);
2581 }
2582 
2583 static int abort_fn(struct ceph_osd_request *req, void *arg)
2584 {
2585 	int err = *(int *)arg;
2586 
2587 	abort_request(req, err);
2588 	return 0; /* continue iteration */
2589 }
2590 
2591 /*
2592  * Abort all in-flight requests with @err and arrange for all future
2593  * requests to be failed immediately.
2594  */
2595 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2596 {
2597 	dout("%s osdc %p err %d\n", __func__, osdc, err);
2598 	down_write(&osdc->lock);
2599 	for_each_request(osdc, abort_fn, &err);
2600 	osdc->abort_err = err;
2601 	up_write(&osdc->lock);
2602 }
2603 EXPORT_SYMBOL(ceph_osdc_abort_requests);
2604 
2605 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
2606 {
2607 	down_write(&osdc->lock);
2608 	osdc->abort_err = 0;
2609 	up_write(&osdc->lock);
2610 }
2611 EXPORT_SYMBOL(ceph_osdc_clear_abort_err);
2612 
2613 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2614 {
2615 	if (likely(eb > osdc->epoch_barrier)) {
2616 		dout("updating epoch_barrier from %u to %u\n",
2617 				osdc->epoch_barrier, eb);
2618 		osdc->epoch_barrier = eb;
2619 		/* Request map if we're not to the barrier yet */
2620 		if (eb > osdc->osdmap->epoch)
2621 			maybe_request_map(osdc);
2622 	}
2623 }
2624 
2625 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2626 {
2627 	down_read(&osdc->lock);
2628 	if (unlikely(eb > osdc->epoch_barrier)) {
2629 		up_read(&osdc->lock);
2630 		down_write(&osdc->lock);
2631 		update_epoch_barrier(osdc, eb);
2632 		up_write(&osdc->lock);
2633 	} else {
2634 		up_read(&osdc->lock);
2635 	}
2636 }
2637 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2638 
2639 /*
2640  * We can end up releasing caps as a result of abort_request().
2641  * In that case, we probably want to ensure that the cap release message
2642  * has an updated epoch barrier in it, so set the epoch barrier prior to
2643  * aborting the first request.
2644  */
2645 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
2646 {
2647 	struct ceph_osd_client *osdc = req->r_osdc;
2648 	bool *victims = arg;
2649 
2650 	if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2651 	    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2652 	     pool_full(osdc, req->r_t.base_oloc.pool))) {
2653 		if (!*victims) {
2654 			update_epoch_barrier(osdc, osdc->osdmap->epoch);
2655 			*victims = true;
2656 		}
2657 		abort_request(req, -ENOSPC);
2658 	}
2659 
2660 	return 0; /* continue iteration */
2661 }
2662 
2663 /*
2664  * Drop all pending requests that are stalled waiting on a full condition to
2665  * clear, and complete them with ENOSPC as the return code. Set the
2666  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2667  * cancelled.
2668  */
2669 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2670 {
2671 	bool victims = false;
2672 
2673 	if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2674 	    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2675 		for_each_request(osdc, abort_on_full_fn, &victims);
2676 }
2677 
2678 static void check_pool_dne(struct ceph_osd_request *req)
2679 {
2680 	struct ceph_osd_client *osdc = req->r_osdc;
2681 	struct ceph_osdmap *map = osdc->osdmap;
2682 
2683 	verify_osdc_wrlocked(osdc);
2684 	WARN_ON(!map->epoch);
2685 
2686 	if (req->r_attempts) {
2687 		/*
2688 		 * We sent a request earlier, which means that
2689 		 * previously the pool existed, and now it does not
2690 		 * (i.e., it was deleted).
2691 		 */
2692 		req->r_map_dne_bound = map->epoch;
2693 		dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2694 		     req->r_tid);
2695 	} else {
2696 		dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2697 		     req, req->r_tid, req->r_map_dne_bound, map->epoch);
2698 	}
2699 
2700 	if (req->r_map_dne_bound) {
2701 		if (map->epoch >= req->r_map_dne_bound) {
2702 			/* we had a new enough map */
2703 			pr_info_ratelimited("tid %llu pool does not exist\n",
2704 					    req->r_tid);
2705 			complete_request(req, -ENOENT);
2706 		}
2707 	} else {
2708 		send_map_check(req);
2709 	}
2710 }
2711 
2712 static void map_check_cb(struct ceph_mon_generic_request *greq)
2713 {
2714 	struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2715 	struct ceph_osd_request *req;
2716 	u64 tid = greq->private_data;
2717 
2718 	WARN_ON(greq->result || !greq->u.newest);
2719 
2720 	down_write(&osdc->lock);
2721 	req = lookup_request_mc(&osdc->map_checks, tid);
2722 	if (!req) {
2723 		dout("%s tid %llu dne\n", __func__, tid);
2724 		goto out_unlock;
2725 	}
2726 
2727 	dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2728 	     req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2729 	if (!req->r_map_dne_bound)
2730 		req->r_map_dne_bound = greq->u.newest;
2731 	erase_request_mc(&osdc->map_checks, req);
2732 	check_pool_dne(req);
2733 
2734 	ceph_osdc_put_request(req);
2735 out_unlock:
2736 	up_write(&osdc->lock);
2737 }
2738 
2739 static void send_map_check(struct ceph_osd_request *req)
2740 {
2741 	struct ceph_osd_client *osdc = req->r_osdc;
2742 	struct ceph_osd_request *lookup_req;
2743 	int ret;
2744 
2745 	verify_osdc_wrlocked(osdc);
2746 
2747 	lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2748 	if (lookup_req) {
2749 		WARN_ON(lookup_req != req);
2750 		return;
2751 	}
2752 
2753 	ceph_osdc_get_request(req);
2754 	insert_request_mc(&osdc->map_checks, req);
2755 	ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2756 					  map_check_cb, req->r_tid);
2757 	WARN_ON(ret);
2758 }
2759 
2760 /*
2761  * lingering requests, watch/notify v2 infrastructure
2762  */
2763 static void linger_release(struct kref *kref)
2764 {
2765 	struct ceph_osd_linger_request *lreq =
2766 	    container_of(kref, struct ceph_osd_linger_request, kref);
2767 
2768 	dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2769 	     lreq->reg_req, lreq->ping_req);
2770 	WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2771 	WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2772 	WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2773 	WARN_ON(!list_empty(&lreq->scan_item));
2774 	WARN_ON(!list_empty(&lreq->pending_lworks));
2775 	WARN_ON(lreq->osd);
2776 
2777 	if (lreq->request_pl)
2778 		ceph_pagelist_release(lreq->request_pl);
2779 	if (lreq->notify_id_pages)
2780 		ceph_release_page_vector(lreq->notify_id_pages, 1);
2781 
2782 	ceph_osdc_put_request(lreq->reg_req);
2783 	ceph_osdc_put_request(lreq->ping_req);
2784 	target_destroy(&lreq->t);
2785 	kfree(lreq);
2786 }
2787 
2788 static void linger_put(struct ceph_osd_linger_request *lreq)
2789 {
2790 	if (lreq)
2791 		kref_put(&lreq->kref, linger_release);
2792 }
2793 
2794 static struct ceph_osd_linger_request *
2795 linger_get(struct ceph_osd_linger_request *lreq)
2796 {
2797 	kref_get(&lreq->kref);
2798 	return lreq;
2799 }
2800 
2801 static struct ceph_osd_linger_request *
2802 linger_alloc(struct ceph_osd_client *osdc)
2803 {
2804 	struct ceph_osd_linger_request *lreq;
2805 
2806 	lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2807 	if (!lreq)
2808 		return NULL;
2809 
2810 	kref_init(&lreq->kref);
2811 	mutex_init(&lreq->lock);
2812 	RB_CLEAR_NODE(&lreq->node);
2813 	RB_CLEAR_NODE(&lreq->osdc_node);
2814 	RB_CLEAR_NODE(&lreq->mc_node);
2815 	INIT_LIST_HEAD(&lreq->scan_item);
2816 	INIT_LIST_HEAD(&lreq->pending_lworks);
2817 	init_completion(&lreq->reg_commit_wait);
2818 	init_completion(&lreq->notify_finish_wait);
2819 
2820 	lreq->osdc = osdc;
2821 	target_init(&lreq->t);
2822 
2823 	dout("%s lreq %p\n", __func__, lreq);
2824 	return lreq;
2825 }
2826 
2827 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2828 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2829 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2830 
2831 /*
2832  * Create linger request <-> OSD session relation.
2833  *
2834  * @lreq has to be registered, @osd may be homeless.
2835  */
2836 static void link_linger(struct ceph_osd *osd,
2837 			struct ceph_osd_linger_request *lreq)
2838 {
2839 	verify_osd_locked(osd);
2840 	WARN_ON(!lreq->linger_id || lreq->osd);
2841 	dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2842 	     osd->o_osd, lreq, lreq->linger_id);
2843 
2844 	if (!osd_homeless(osd))
2845 		__remove_osd_from_lru(osd);
2846 	else
2847 		atomic_inc(&osd->o_osdc->num_homeless);
2848 
2849 	get_osd(osd);
2850 	insert_linger(&osd->o_linger_requests, lreq);
2851 	lreq->osd = osd;
2852 }
2853 
2854 static void unlink_linger(struct ceph_osd *osd,
2855 			  struct ceph_osd_linger_request *lreq)
2856 {
2857 	verify_osd_locked(osd);
2858 	WARN_ON(lreq->osd != osd);
2859 	dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2860 	     osd->o_osd, lreq, lreq->linger_id);
2861 
2862 	lreq->osd = NULL;
2863 	erase_linger(&osd->o_linger_requests, lreq);
2864 	put_osd(osd);
2865 
2866 	if (!osd_homeless(osd))
2867 		maybe_move_osd_to_lru(osd);
2868 	else
2869 		atomic_dec(&osd->o_osdc->num_homeless);
2870 }
2871 
2872 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2873 {
2874 	verify_osdc_locked(lreq->osdc);
2875 
2876 	return !RB_EMPTY_NODE(&lreq->osdc_node);
2877 }
2878 
2879 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2880 {
2881 	struct ceph_osd_client *osdc = lreq->osdc;
2882 	bool registered;
2883 
2884 	down_read(&osdc->lock);
2885 	registered = __linger_registered(lreq);
2886 	up_read(&osdc->lock);
2887 
2888 	return registered;
2889 }
2890 
2891 static void linger_register(struct ceph_osd_linger_request *lreq)
2892 {
2893 	struct ceph_osd_client *osdc = lreq->osdc;
2894 
2895 	verify_osdc_wrlocked(osdc);
2896 	WARN_ON(lreq->linger_id);
2897 
2898 	linger_get(lreq);
2899 	lreq->linger_id = ++osdc->last_linger_id;
2900 	insert_linger_osdc(&osdc->linger_requests, lreq);
2901 }
2902 
2903 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2904 {
2905 	struct ceph_osd_client *osdc = lreq->osdc;
2906 
2907 	verify_osdc_wrlocked(osdc);
2908 
2909 	erase_linger_osdc(&osdc->linger_requests, lreq);
2910 	linger_put(lreq);
2911 }
2912 
2913 static void cancel_linger_request(struct ceph_osd_request *req)
2914 {
2915 	struct ceph_osd_linger_request *lreq = req->r_priv;
2916 
2917 	WARN_ON(!req->r_linger);
2918 	cancel_request(req);
2919 	linger_put(lreq);
2920 }
2921 
2922 struct linger_work {
2923 	struct work_struct work;
2924 	struct ceph_osd_linger_request *lreq;
2925 	struct list_head pending_item;
2926 	unsigned long queued_stamp;
2927 
2928 	union {
2929 		struct {
2930 			u64 notify_id;
2931 			u64 notifier_id;
2932 			void *payload; /* points into @msg front */
2933 			size_t payload_len;
2934 
2935 			struct ceph_msg *msg; /* for ceph_msg_put() */
2936 		} notify;
2937 		struct {
2938 			int err;
2939 		} error;
2940 	};
2941 };
2942 
2943 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2944 				       work_func_t workfn)
2945 {
2946 	struct linger_work *lwork;
2947 
2948 	lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2949 	if (!lwork)
2950 		return NULL;
2951 
2952 	INIT_WORK(&lwork->work, workfn);
2953 	INIT_LIST_HEAD(&lwork->pending_item);
2954 	lwork->lreq = linger_get(lreq);
2955 
2956 	return lwork;
2957 }
2958 
2959 static void lwork_free(struct linger_work *lwork)
2960 {
2961 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2962 
2963 	mutex_lock(&lreq->lock);
2964 	list_del(&lwork->pending_item);
2965 	mutex_unlock(&lreq->lock);
2966 
2967 	linger_put(lreq);
2968 	kfree(lwork);
2969 }
2970 
2971 static void lwork_queue(struct linger_work *lwork)
2972 {
2973 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2974 	struct ceph_osd_client *osdc = lreq->osdc;
2975 
2976 	verify_lreq_locked(lreq);
2977 	WARN_ON(!list_empty(&lwork->pending_item));
2978 
2979 	lwork->queued_stamp = jiffies;
2980 	list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2981 	queue_work(osdc->notify_wq, &lwork->work);
2982 }
2983 
2984 static void do_watch_notify(struct work_struct *w)
2985 {
2986 	struct linger_work *lwork = container_of(w, struct linger_work, work);
2987 	struct ceph_osd_linger_request *lreq = lwork->lreq;
2988 
2989 	if (!linger_registered(lreq)) {
2990 		dout("%s lreq %p not registered\n", __func__, lreq);
2991 		goto out;
2992 	}
2993 
2994 	WARN_ON(!lreq->is_watch);
2995 	dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2996 	     __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2997 	     lwork->notify.payload_len);
2998 	lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2999 		  lwork->notify.notifier_id, lwork->notify.payload,
3000 		  lwork->notify.payload_len);
3001 
3002 out:
3003 	ceph_msg_put(lwork->notify.msg);
3004 	lwork_free(lwork);
3005 }
3006 
3007 static void do_watch_error(struct work_struct *w)
3008 {
3009 	struct linger_work *lwork = container_of(w, struct linger_work, work);
3010 	struct ceph_osd_linger_request *lreq = lwork->lreq;
3011 
3012 	if (!linger_registered(lreq)) {
3013 		dout("%s lreq %p not registered\n", __func__, lreq);
3014 		goto out;
3015 	}
3016 
3017 	dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
3018 	lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
3019 
3020 out:
3021 	lwork_free(lwork);
3022 }
3023 
3024 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
3025 {
3026 	struct linger_work *lwork;
3027 
3028 	lwork = lwork_alloc(lreq, do_watch_error);
3029 	if (!lwork) {
3030 		pr_err("failed to allocate error-lwork\n");
3031 		return;
3032 	}
3033 
3034 	lwork->error.err = lreq->last_error;
3035 	lwork_queue(lwork);
3036 }
3037 
3038 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
3039 				       int result)
3040 {
3041 	if (!completion_done(&lreq->reg_commit_wait)) {
3042 		lreq->reg_commit_error = (result <= 0 ? result : 0);
3043 		complete_all(&lreq->reg_commit_wait);
3044 	}
3045 }
3046 
3047 static void linger_commit_cb(struct ceph_osd_request *req)
3048 {
3049 	struct ceph_osd_linger_request *lreq = req->r_priv;
3050 
3051 	mutex_lock(&lreq->lock);
3052 	if (req != lreq->reg_req) {
3053 		dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
3054 		     __func__, lreq, lreq->linger_id, req, lreq->reg_req);
3055 		goto out;
3056 	}
3057 
3058 	dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
3059 	     lreq->linger_id, req->r_result);
3060 	linger_reg_commit_complete(lreq, req->r_result);
3061 	lreq->committed = true;
3062 
3063 	if (!lreq->is_watch) {
3064 		struct ceph_osd_data *osd_data =
3065 		    osd_req_op_data(req, 0, notify, response_data);
3066 		void *p = page_address(osd_data->pages[0]);
3067 
3068 		WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
3069 			osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
3070 
3071 		/* make note of the notify_id */
3072 		if (req->r_ops[0].outdata_len >= sizeof(u64)) {
3073 			lreq->notify_id = ceph_decode_64(&p);
3074 			dout("lreq %p notify_id %llu\n", lreq,
3075 			     lreq->notify_id);
3076 		} else {
3077 			dout("lreq %p no notify_id\n", lreq);
3078 		}
3079 	}
3080 
3081 out:
3082 	mutex_unlock(&lreq->lock);
3083 	linger_put(lreq);
3084 }
3085 
3086 static int normalize_watch_error(int err)
3087 {
3088 	/*
3089 	 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
3090 	 * notification and a failure to reconnect because we raced with
3091 	 * the delete appear the same to the user.
3092 	 */
3093 	if (err == -ENOENT)
3094 		err = -ENOTCONN;
3095 
3096 	return err;
3097 }
3098 
3099 static void linger_reconnect_cb(struct ceph_osd_request *req)
3100 {
3101 	struct ceph_osd_linger_request *lreq = req->r_priv;
3102 
3103 	mutex_lock(&lreq->lock);
3104 	if (req != lreq->reg_req) {
3105 		dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
3106 		     __func__, lreq, lreq->linger_id, req, lreq->reg_req);
3107 		goto out;
3108 	}
3109 
3110 	dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
3111 	     lreq, lreq->linger_id, req->r_result, lreq->last_error);
3112 	if (req->r_result < 0) {
3113 		if (!lreq->last_error) {
3114 			lreq->last_error = normalize_watch_error(req->r_result);
3115 			queue_watch_error(lreq);
3116 		}
3117 	}
3118 
3119 out:
3120 	mutex_unlock(&lreq->lock);
3121 	linger_put(lreq);
3122 }
3123 
3124 static void send_linger(struct ceph_osd_linger_request *lreq)
3125 {
3126 	struct ceph_osd_client *osdc = lreq->osdc;
3127 	struct ceph_osd_request *req;
3128 	int ret;
3129 
3130 	verify_osdc_wrlocked(osdc);
3131 	mutex_lock(&lreq->lock);
3132 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3133 
3134 	if (lreq->reg_req) {
3135 		if (lreq->reg_req->r_osd)
3136 			cancel_linger_request(lreq->reg_req);
3137 		ceph_osdc_put_request(lreq->reg_req);
3138 	}
3139 
3140 	req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
3141 	BUG_ON(!req);
3142 
3143 	target_copy(&req->r_t, &lreq->t);
3144 	req->r_mtime = lreq->mtime;
3145 
3146 	if (lreq->is_watch && lreq->committed) {
3147 		osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_RECONNECT,
3148 				      lreq->linger_id, ++lreq->register_gen);
3149 		dout("lreq %p reconnect register_gen %u\n", lreq,
3150 		     req->r_ops[0].watch.gen);
3151 		req->r_callback = linger_reconnect_cb;
3152 	} else {
3153 		if (lreq->is_watch) {
3154 			osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_WATCH,
3155 					      lreq->linger_id, 0);
3156 		} else {
3157 			lreq->notify_id = 0;
3158 
3159 			refcount_inc(&lreq->request_pl->refcnt);
3160 			osd_req_op_notify_init(req, 0, lreq->linger_id,
3161 					       lreq->request_pl);
3162 			ceph_osd_data_pages_init(
3163 			    osd_req_op_data(req, 0, notify, response_data),
3164 			    lreq->notify_id_pages, PAGE_SIZE, 0, false, false);
3165 		}
3166 		dout("lreq %p register\n", lreq);
3167 		req->r_callback = linger_commit_cb;
3168 	}
3169 
3170 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3171 	BUG_ON(ret);
3172 
3173 	req->r_priv = linger_get(lreq);
3174 	req->r_linger = true;
3175 	lreq->reg_req = req;
3176 	mutex_unlock(&lreq->lock);
3177 
3178 	submit_request(req, true);
3179 }
3180 
3181 static void linger_ping_cb(struct ceph_osd_request *req)
3182 {
3183 	struct ceph_osd_linger_request *lreq = req->r_priv;
3184 
3185 	mutex_lock(&lreq->lock);
3186 	if (req != lreq->ping_req) {
3187 		dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
3188 		     __func__, lreq, lreq->linger_id, req, lreq->ping_req);
3189 		goto out;
3190 	}
3191 
3192 	dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
3193 	     __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
3194 	     lreq->last_error);
3195 	if (lreq->register_gen == req->r_ops[0].watch.gen) {
3196 		if (!req->r_result) {
3197 			lreq->watch_valid_thru = lreq->ping_sent;
3198 		} else if (!lreq->last_error) {
3199 			lreq->last_error = normalize_watch_error(req->r_result);
3200 			queue_watch_error(lreq);
3201 		}
3202 	} else {
3203 		dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
3204 		     lreq->register_gen, req->r_ops[0].watch.gen);
3205 	}
3206 
3207 out:
3208 	mutex_unlock(&lreq->lock);
3209 	linger_put(lreq);
3210 }
3211 
3212 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
3213 {
3214 	struct ceph_osd_client *osdc = lreq->osdc;
3215 	struct ceph_osd_request *req;
3216 	int ret;
3217 
3218 	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
3219 		dout("%s PAUSERD\n", __func__);
3220 		return;
3221 	}
3222 
3223 	lreq->ping_sent = jiffies;
3224 	dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
3225 	     __func__, lreq, lreq->linger_id, lreq->ping_sent,
3226 	     lreq->register_gen);
3227 
3228 	if (lreq->ping_req) {
3229 		if (lreq->ping_req->r_osd)
3230 			cancel_linger_request(lreq->ping_req);
3231 		ceph_osdc_put_request(lreq->ping_req);
3232 	}
3233 
3234 	req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
3235 	BUG_ON(!req);
3236 
3237 	target_copy(&req->r_t, &lreq->t);
3238 	osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_PING, lreq->linger_id,
3239 			      lreq->register_gen);
3240 	req->r_callback = linger_ping_cb;
3241 
3242 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3243 	BUG_ON(ret);
3244 
3245 	req->r_priv = linger_get(lreq);
3246 	req->r_linger = true;
3247 	lreq->ping_req = req;
3248 
3249 	ceph_osdc_get_request(req);
3250 	account_request(req);
3251 	req->r_tid = atomic64_inc_return(&osdc->last_tid);
3252 	link_request(lreq->osd, req);
3253 	send_request(req);
3254 }
3255 
3256 static void linger_submit(struct ceph_osd_linger_request *lreq)
3257 {
3258 	struct ceph_osd_client *osdc = lreq->osdc;
3259 	struct ceph_osd *osd;
3260 
3261 	down_write(&osdc->lock);
3262 	linger_register(lreq);
3263 
3264 	calc_target(osdc, &lreq->t, false);
3265 	osd = lookup_create_osd(osdc, lreq->t.osd, true);
3266 	link_linger(osd, lreq);
3267 
3268 	send_linger(lreq);
3269 	up_write(&osdc->lock);
3270 }
3271 
3272 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
3273 {
3274 	struct ceph_osd_client *osdc = lreq->osdc;
3275 	struct ceph_osd_linger_request *lookup_lreq;
3276 
3277 	verify_osdc_wrlocked(osdc);
3278 
3279 	lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3280 				       lreq->linger_id);
3281 	if (!lookup_lreq)
3282 		return;
3283 
3284 	WARN_ON(lookup_lreq != lreq);
3285 	erase_linger_mc(&osdc->linger_map_checks, lreq);
3286 	linger_put(lreq);
3287 }
3288 
3289 /*
3290  * @lreq has to be both registered and linked.
3291  */
3292 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
3293 {
3294 	if (lreq->ping_req && lreq->ping_req->r_osd)
3295 		cancel_linger_request(lreq->ping_req);
3296 	if (lreq->reg_req && lreq->reg_req->r_osd)
3297 		cancel_linger_request(lreq->reg_req);
3298 	cancel_linger_map_check(lreq);
3299 	unlink_linger(lreq->osd, lreq);
3300 	linger_unregister(lreq);
3301 }
3302 
3303 static void linger_cancel(struct ceph_osd_linger_request *lreq)
3304 {
3305 	struct ceph_osd_client *osdc = lreq->osdc;
3306 
3307 	down_write(&osdc->lock);
3308 	if (__linger_registered(lreq))
3309 		__linger_cancel(lreq);
3310 	up_write(&osdc->lock);
3311 }
3312 
3313 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
3314 
3315 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3316 {
3317 	struct ceph_osd_client *osdc = lreq->osdc;
3318 	struct ceph_osdmap *map = osdc->osdmap;
3319 
3320 	verify_osdc_wrlocked(osdc);
3321 	WARN_ON(!map->epoch);
3322 
3323 	if (lreq->register_gen) {
3324 		lreq->map_dne_bound = map->epoch;
3325 		dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3326 		     lreq, lreq->linger_id);
3327 	} else {
3328 		dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3329 		     __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3330 		     map->epoch);
3331 	}
3332 
3333 	if (lreq->map_dne_bound) {
3334 		if (map->epoch >= lreq->map_dne_bound) {
3335 			/* we had a new enough map */
3336 			pr_info("linger_id %llu pool does not exist\n",
3337 				lreq->linger_id);
3338 			linger_reg_commit_complete(lreq, -ENOENT);
3339 			__linger_cancel(lreq);
3340 		}
3341 	} else {
3342 		send_linger_map_check(lreq);
3343 	}
3344 }
3345 
3346 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
3347 {
3348 	struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3349 	struct ceph_osd_linger_request *lreq;
3350 	u64 linger_id = greq->private_data;
3351 
3352 	WARN_ON(greq->result || !greq->u.newest);
3353 
3354 	down_write(&osdc->lock);
3355 	lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3356 	if (!lreq) {
3357 		dout("%s linger_id %llu dne\n", __func__, linger_id);
3358 		goto out_unlock;
3359 	}
3360 
3361 	dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3362 	     __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3363 	     greq->u.newest);
3364 	if (!lreq->map_dne_bound)
3365 		lreq->map_dne_bound = greq->u.newest;
3366 	erase_linger_mc(&osdc->linger_map_checks, lreq);
3367 	check_linger_pool_dne(lreq);
3368 
3369 	linger_put(lreq);
3370 out_unlock:
3371 	up_write(&osdc->lock);
3372 }
3373 
3374 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3375 {
3376 	struct ceph_osd_client *osdc = lreq->osdc;
3377 	struct ceph_osd_linger_request *lookup_lreq;
3378 	int ret;
3379 
3380 	verify_osdc_wrlocked(osdc);
3381 
3382 	lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3383 				       lreq->linger_id);
3384 	if (lookup_lreq) {
3385 		WARN_ON(lookup_lreq != lreq);
3386 		return;
3387 	}
3388 
3389 	linger_get(lreq);
3390 	insert_linger_mc(&osdc->linger_map_checks, lreq);
3391 	ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3392 					  linger_map_check_cb, lreq->linger_id);
3393 	WARN_ON(ret);
3394 }
3395 
3396 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3397 {
3398 	int ret;
3399 
3400 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3401 	ret = wait_for_completion_killable(&lreq->reg_commit_wait);
3402 	return ret ?: lreq->reg_commit_error;
3403 }
3404 
3405 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq,
3406 				     unsigned long timeout)
3407 {
3408 	long left;
3409 
3410 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3411 	left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait,
3412 						ceph_timeout_jiffies(timeout));
3413 	if (left <= 0)
3414 		left = left ?: -ETIMEDOUT;
3415 	else
3416 		left = lreq->notify_finish_error; /* completed */
3417 
3418 	return left;
3419 }
3420 
3421 /*
3422  * Timeout callback, called every N seconds.  When 1 or more OSD
3423  * requests has been active for more than N seconds, we send a keepalive
3424  * (tag + timestamp) to its OSD to ensure any communications channel
3425  * reset is detected.
3426  */
3427 static void handle_timeout(struct work_struct *work)
3428 {
3429 	struct ceph_osd_client *osdc =
3430 		container_of(work, struct ceph_osd_client, timeout_work.work);
3431 	struct ceph_options *opts = osdc->client->options;
3432 	unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3433 	unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3434 	LIST_HEAD(slow_osds);
3435 	struct rb_node *n, *p;
3436 
3437 	dout("%s osdc %p\n", __func__, osdc);
3438 	down_write(&osdc->lock);
3439 
3440 	/*
3441 	 * ping osds that are a bit slow.  this ensures that if there
3442 	 * is a break in the TCP connection we will notice, and reopen
3443 	 * a connection with that osd (from the fault callback).
3444 	 */
3445 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3446 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3447 		bool found = false;
3448 
3449 		for (p = rb_first(&osd->o_requests); p; ) {
3450 			struct ceph_osd_request *req =
3451 			    rb_entry(p, struct ceph_osd_request, r_node);
3452 
3453 			p = rb_next(p); /* abort_request() */
3454 
3455 			if (time_before(req->r_stamp, cutoff)) {
3456 				dout(" req %p tid %llu on osd%d is laggy\n",
3457 				     req, req->r_tid, osd->o_osd);
3458 				found = true;
3459 			}
3460 			if (opts->osd_request_timeout &&
3461 			    time_before(req->r_start_stamp, expiry_cutoff)) {
3462 				pr_err_ratelimited("tid %llu on osd%d timeout\n",
3463 				       req->r_tid, osd->o_osd);
3464 				abort_request(req, -ETIMEDOUT);
3465 			}
3466 		}
3467 		for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3468 			struct ceph_osd_linger_request *lreq =
3469 			    rb_entry(p, struct ceph_osd_linger_request, node);
3470 
3471 			dout(" lreq %p linger_id %llu is served by osd%d\n",
3472 			     lreq, lreq->linger_id, osd->o_osd);
3473 			found = true;
3474 
3475 			mutex_lock(&lreq->lock);
3476 			if (lreq->is_watch && lreq->committed && !lreq->last_error)
3477 				send_linger_ping(lreq);
3478 			mutex_unlock(&lreq->lock);
3479 		}
3480 
3481 		if (found)
3482 			list_move_tail(&osd->o_keepalive_item, &slow_osds);
3483 	}
3484 
3485 	if (opts->osd_request_timeout) {
3486 		for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3487 			struct ceph_osd_request *req =
3488 			    rb_entry(p, struct ceph_osd_request, r_node);
3489 
3490 			p = rb_next(p); /* abort_request() */
3491 
3492 			if (time_before(req->r_start_stamp, expiry_cutoff)) {
3493 				pr_err_ratelimited("tid %llu on osd%d timeout\n",
3494 				       req->r_tid, osdc->homeless_osd.o_osd);
3495 				abort_request(req, -ETIMEDOUT);
3496 			}
3497 		}
3498 	}
3499 
3500 	if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3501 		maybe_request_map(osdc);
3502 
3503 	while (!list_empty(&slow_osds)) {
3504 		struct ceph_osd *osd = list_first_entry(&slow_osds,
3505 							struct ceph_osd,
3506 							o_keepalive_item);
3507 		list_del_init(&osd->o_keepalive_item);
3508 		ceph_con_keepalive(&osd->o_con);
3509 	}
3510 
3511 	up_write(&osdc->lock);
3512 	schedule_delayed_work(&osdc->timeout_work,
3513 			      osdc->client->options->osd_keepalive_timeout);
3514 }
3515 
3516 static void handle_osds_timeout(struct work_struct *work)
3517 {
3518 	struct ceph_osd_client *osdc =
3519 		container_of(work, struct ceph_osd_client,
3520 			     osds_timeout_work.work);
3521 	unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3522 	struct ceph_osd *osd, *nosd;
3523 
3524 	dout("%s osdc %p\n", __func__, osdc);
3525 	down_write(&osdc->lock);
3526 	list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3527 		if (time_before(jiffies, osd->lru_ttl))
3528 			break;
3529 
3530 		WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3531 		WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3532 		close_osd(osd);
3533 	}
3534 
3535 	up_write(&osdc->lock);
3536 	schedule_delayed_work(&osdc->osds_timeout_work,
3537 			      round_jiffies_relative(delay));
3538 }
3539 
3540 static int ceph_oloc_decode(void **p, void *end,
3541 			    struct ceph_object_locator *oloc)
3542 {
3543 	u8 struct_v, struct_cv;
3544 	u32 len;
3545 	void *struct_end;
3546 	int ret = 0;
3547 
3548 	ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3549 	struct_v = ceph_decode_8(p);
3550 	struct_cv = ceph_decode_8(p);
3551 	if (struct_v < 3) {
3552 		pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3553 			struct_v, struct_cv);
3554 		goto e_inval;
3555 	}
3556 	if (struct_cv > 6) {
3557 		pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3558 			struct_v, struct_cv);
3559 		goto e_inval;
3560 	}
3561 	len = ceph_decode_32(p);
3562 	ceph_decode_need(p, end, len, e_inval);
3563 	struct_end = *p + len;
3564 
3565 	oloc->pool = ceph_decode_64(p);
3566 	*p += 4; /* skip preferred */
3567 
3568 	len = ceph_decode_32(p);
3569 	if (len > 0) {
3570 		pr_warn("ceph_object_locator::key is set\n");
3571 		goto e_inval;
3572 	}
3573 
3574 	if (struct_v >= 5) {
3575 		bool changed = false;
3576 
3577 		len = ceph_decode_32(p);
3578 		if (len > 0) {
3579 			ceph_decode_need(p, end, len, e_inval);
3580 			if (!oloc->pool_ns ||
3581 			    ceph_compare_string(oloc->pool_ns, *p, len))
3582 				changed = true;
3583 			*p += len;
3584 		} else {
3585 			if (oloc->pool_ns)
3586 				changed = true;
3587 		}
3588 		if (changed) {
3589 			/* redirect changes namespace */
3590 			pr_warn("ceph_object_locator::nspace is changed\n");
3591 			goto e_inval;
3592 		}
3593 	}
3594 
3595 	if (struct_v >= 6) {
3596 		s64 hash = ceph_decode_64(p);
3597 		if (hash != -1) {
3598 			pr_warn("ceph_object_locator::hash is set\n");
3599 			goto e_inval;
3600 		}
3601 	}
3602 
3603 	/* skip the rest */
3604 	*p = struct_end;
3605 out:
3606 	return ret;
3607 
3608 e_inval:
3609 	ret = -EINVAL;
3610 	goto out;
3611 }
3612 
3613 static int ceph_redirect_decode(void **p, void *end,
3614 				struct ceph_request_redirect *redir)
3615 {
3616 	u8 struct_v, struct_cv;
3617 	u32 len;
3618 	void *struct_end;
3619 	int ret;
3620 
3621 	ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3622 	struct_v = ceph_decode_8(p);
3623 	struct_cv = ceph_decode_8(p);
3624 	if (struct_cv > 1) {
3625 		pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3626 			struct_v, struct_cv);
3627 		goto e_inval;
3628 	}
3629 	len = ceph_decode_32(p);
3630 	ceph_decode_need(p, end, len, e_inval);
3631 	struct_end = *p + len;
3632 
3633 	ret = ceph_oloc_decode(p, end, &redir->oloc);
3634 	if (ret)
3635 		goto out;
3636 
3637 	len = ceph_decode_32(p);
3638 	if (len > 0) {
3639 		pr_warn("ceph_request_redirect::object_name is set\n");
3640 		goto e_inval;
3641 	}
3642 
3643 	/* skip the rest */
3644 	*p = struct_end;
3645 out:
3646 	return ret;
3647 
3648 e_inval:
3649 	ret = -EINVAL;
3650 	goto out;
3651 }
3652 
3653 struct MOSDOpReply {
3654 	struct ceph_pg pgid;
3655 	u64 flags;
3656 	int result;
3657 	u32 epoch;
3658 	int num_ops;
3659 	u32 outdata_len[CEPH_OSD_MAX_OPS];
3660 	s32 rval[CEPH_OSD_MAX_OPS];
3661 	int retry_attempt;
3662 	struct ceph_eversion replay_version;
3663 	u64 user_version;
3664 	struct ceph_request_redirect redirect;
3665 };
3666 
3667 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3668 {
3669 	void *p = msg->front.iov_base;
3670 	void *const end = p + msg->front.iov_len;
3671 	u16 version = le16_to_cpu(msg->hdr.version);
3672 	struct ceph_eversion bad_replay_version;
3673 	u8 decode_redir;
3674 	u32 len;
3675 	int ret;
3676 	int i;
3677 
3678 	ceph_decode_32_safe(&p, end, len, e_inval);
3679 	ceph_decode_need(&p, end, len, e_inval);
3680 	p += len; /* skip oid */
3681 
3682 	ret = ceph_decode_pgid(&p, end, &m->pgid);
3683 	if (ret)
3684 		return ret;
3685 
3686 	ceph_decode_64_safe(&p, end, m->flags, e_inval);
3687 	ceph_decode_32_safe(&p, end, m->result, e_inval);
3688 	ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3689 	memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3690 	p += sizeof(bad_replay_version);
3691 	ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3692 
3693 	ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3694 	if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3695 		goto e_inval;
3696 
3697 	ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3698 			 e_inval);
3699 	for (i = 0; i < m->num_ops; i++) {
3700 		struct ceph_osd_op *op = p;
3701 
3702 		m->outdata_len[i] = le32_to_cpu(op->payload_len);
3703 		p += sizeof(*op);
3704 	}
3705 
3706 	ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3707 	for (i = 0; i < m->num_ops; i++)
3708 		ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3709 
3710 	if (version >= 5) {
3711 		ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3712 		memcpy(&m->replay_version, p, sizeof(m->replay_version));
3713 		p += sizeof(m->replay_version);
3714 		ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3715 	} else {
3716 		m->replay_version = bad_replay_version; /* struct */
3717 		m->user_version = le64_to_cpu(m->replay_version.version);
3718 	}
3719 
3720 	if (version >= 6) {
3721 		if (version >= 7)
3722 			ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3723 		else
3724 			decode_redir = 1;
3725 	} else {
3726 		decode_redir = 0;
3727 	}
3728 
3729 	if (decode_redir) {
3730 		ret = ceph_redirect_decode(&p, end, &m->redirect);
3731 		if (ret)
3732 			return ret;
3733 	} else {
3734 		ceph_oloc_init(&m->redirect.oloc);
3735 	}
3736 
3737 	return 0;
3738 
3739 e_inval:
3740 	return -EINVAL;
3741 }
3742 
3743 /*
3744  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
3745  * specified.
3746  */
3747 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3748 {
3749 	struct ceph_osd_client *osdc = osd->o_osdc;
3750 	struct ceph_osd_request *req;
3751 	struct MOSDOpReply m;
3752 	u64 tid = le64_to_cpu(msg->hdr.tid);
3753 	u32 data_len = 0;
3754 	int ret;
3755 	int i;
3756 
3757 	dout("%s msg %p tid %llu\n", __func__, msg, tid);
3758 
3759 	down_read(&osdc->lock);
3760 	if (!osd_registered(osd)) {
3761 		dout("%s osd%d unknown\n", __func__, osd->o_osd);
3762 		goto out_unlock_osdc;
3763 	}
3764 	WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3765 
3766 	mutex_lock(&osd->lock);
3767 	req = lookup_request(&osd->o_requests, tid);
3768 	if (!req) {
3769 		dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3770 		goto out_unlock_session;
3771 	}
3772 
3773 	m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3774 	ret = decode_MOSDOpReply(msg, &m);
3775 	m.redirect.oloc.pool_ns = NULL;
3776 	if (ret) {
3777 		pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3778 		       req->r_tid, ret);
3779 		ceph_msg_dump(msg);
3780 		goto fail_request;
3781 	}
3782 	dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3783 	     __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3784 	     m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3785 	     le64_to_cpu(m.replay_version.version), m.user_version);
3786 
3787 	if (m.retry_attempt >= 0) {
3788 		if (m.retry_attempt != req->r_attempts - 1) {
3789 			dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3790 			     req, req->r_tid, m.retry_attempt,
3791 			     req->r_attempts - 1);
3792 			goto out_unlock_session;
3793 		}
3794 	} else {
3795 		WARN_ON(1); /* MOSDOpReply v4 is assumed */
3796 	}
3797 
3798 	if (!ceph_oloc_empty(&m.redirect.oloc)) {
3799 		dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3800 		     m.redirect.oloc.pool);
3801 		unlink_request(osd, req);
3802 		mutex_unlock(&osd->lock);
3803 
3804 		/*
3805 		 * Not ceph_oloc_copy() - changing pool_ns is not
3806 		 * supported.
3807 		 */
3808 		req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3809 		req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
3810 				CEPH_OSD_FLAG_IGNORE_OVERLAY |
3811 				CEPH_OSD_FLAG_IGNORE_CACHE;
3812 		req->r_tid = 0;
3813 		__submit_request(req, false);
3814 		goto out_unlock_osdc;
3815 	}
3816 
3817 	if (m.result == -EAGAIN) {
3818 		dout("req %p tid %llu EAGAIN\n", req, req->r_tid);
3819 		unlink_request(osd, req);
3820 		mutex_unlock(&osd->lock);
3821 
3822 		/*
3823 		 * The object is missing on the replica or not (yet)
3824 		 * readable.  Clear pgid to force a resend to the primary
3825 		 * via legacy_change.
3826 		 */
3827 		req->r_t.pgid.pool = 0;
3828 		req->r_t.pgid.seed = 0;
3829 		WARN_ON(!req->r_t.used_replica);
3830 		req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS |
3831 				  CEPH_OSD_FLAG_LOCALIZE_READS);
3832 		req->r_tid = 0;
3833 		__submit_request(req, false);
3834 		goto out_unlock_osdc;
3835 	}
3836 
3837 	if (m.num_ops != req->r_num_ops) {
3838 		pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3839 		       req->r_num_ops, req->r_tid);
3840 		goto fail_request;
3841 	}
3842 	for (i = 0; i < req->r_num_ops; i++) {
3843 		dout(" req %p tid %llu op %d rval %d len %u\n", req,
3844 		     req->r_tid, i, m.rval[i], m.outdata_len[i]);
3845 		req->r_ops[i].rval = m.rval[i];
3846 		req->r_ops[i].outdata_len = m.outdata_len[i];
3847 		data_len += m.outdata_len[i];
3848 	}
3849 	if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3850 		pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3851 		       le32_to_cpu(msg->hdr.data_len), req->r_tid);
3852 		goto fail_request;
3853 	}
3854 	dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3855 	     req, req->r_tid, m.result, data_len);
3856 
3857 	/*
3858 	 * Since we only ever request ONDISK, we should only ever get
3859 	 * one (type of) reply back.
3860 	 */
3861 	WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3862 	req->r_result = m.result ?: data_len;
3863 	finish_request(req);
3864 	mutex_unlock(&osd->lock);
3865 	up_read(&osdc->lock);
3866 
3867 	__complete_request(req);
3868 	return;
3869 
3870 fail_request:
3871 	complete_request(req, -EIO);
3872 out_unlock_session:
3873 	mutex_unlock(&osd->lock);
3874 out_unlock_osdc:
3875 	up_read(&osdc->lock);
3876 }
3877 
3878 static void set_pool_was_full(struct ceph_osd_client *osdc)
3879 {
3880 	struct rb_node *n;
3881 
3882 	for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3883 		struct ceph_pg_pool_info *pi =
3884 		    rb_entry(n, struct ceph_pg_pool_info, node);
3885 
3886 		pi->was_full = __pool_full(pi);
3887 	}
3888 }
3889 
3890 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3891 {
3892 	struct ceph_pg_pool_info *pi;
3893 
3894 	pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3895 	if (!pi)
3896 		return false;
3897 
3898 	return pi->was_full && !__pool_full(pi);
3899 }
3900 
3901 static enum calc_target_result
3902 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3903 {
3904 	struct ceph_osd_client *osdc = lreq->osdc;
3905 	enum calc_target_result ct_res;
3906 
3907 	ct_res = calc_target(osdc, &lreq->t, true);
3908 	if (ct_res == CALC_TARGET_NEED_RESEND) {
3909 		struct ceph_osd *osd;
3910 
3911 		osd = lookup_create_osd(osdc, lreq->t.osd, true);
3912 		if (osd != lreq->osd) {
3913 			unlink_linger(lreq->osd, lreq);
3914 			link_linger(osd, lreq);
3915 		}
3916 	}
3917 
3918 	return ct_res;
3919 }
3920 
3921 /*
3922  * Requeue requests whose mapping to an OSD has changed.
3923  */
3924 static void scan_requests(struct ceph_osd *osd,
3925 			  bool force_resend,
3926 			  bool cleared_full,
3927 			  bool check_pool_cleared_full,
3928 			  struct rb_root *need_resend,
3929 			  struct list_head *need_resend_linger)
3930 {
3931 	struct ceph_osd_client *osdc = osd->o_osdc;
3932 	struct rb_node *n;
3933 	bool force_resend_writes;
3934 
3935 	for (n = rb_first(&osd->o_linger_requests); n; ) {
3936 		struct ceph_osd_linger_request *lreq =
3937 		    rb_entry(n, struct ceph_osd_linger_request, node);
3938 		enum calc_target_result ct_res;
3939 
3940 		n = rb_next(n); /* recalc_linger_target() */
3941 
3942 		dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3943 		     lreq->linger_id);
3944 		ct_res = recalc_linger_target(lreq);
3945 		switch (ct_res) {
3946 		case CALC_TARGET_NO_ACTION:
3947 			force_resend_writes = cleared_full ||
3948 			    (check_pool_cleared_full &&
3949 			     pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3950 			if (!force_resend && !force_resend_writes)
3951 				break;
3952 
3953 			fallthrough;
3954 		case CALC_TARGET_NEED_RESEND:
3955 			cancel_linger_map_check(lreq);
3956 			/*
3957 			 * scan_requests() for the previous epoch(s)
3958 			 * may have already added it to the list, since
3959 			 * it's not unlinked here.
3960 			 */
3961 			if (list_empty(&lreq->scan_item))
3962 				list_add_tail(&lreq->scan_item, need_resend_linger);
3963 			break;
3964 		case CALC_TARGET_POOL_DNE:
3965 			list_del_init(&lreq->scan_item);
3966 			check_linger_pool_dne(lreq);
3967 			break;
3968 		}
3969 	}
3970 
3971 	for (n = rb_first(&osd->o_requests); n; ) {
3972 		struct ceph_osd_request *req =
3973 		    rb_entry(n, struct ceph_osd_request, r_node);
3974 		enum calc_target_result ct_res;
3975 
3976 		n = rb_next(n); /* unlink_request(), check_pool_dne() */
3977 
3978 		dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3979 		ct_res = calc_target(osdc, &req->r_t, false);
3980 		switch (ct_res) {
3981 		case CALC_TARGET_NO_ACTION:
3982 			force_resend_writes = cleared_full ||
3983 			    (check_pool_cleared_full &&
3984 			     pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3985 			if (!force_resend &&
3986 			    (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3987 			     !force_resend_writes))
3988 				break;
3989 
3990 			fallthrough;
3991 		case CALC_TARGET_NEED_RESEND:
3992 			cancel_map_check(req);
3993 			unlink_request(osd, req);
3994 			insert_request(need_resend, req);
3995 			break;
3996 		case CALC_TARGET_POOL_DNE:
3997 			check_pool_dne(req);
3998 			break;
3999 		}
4000 	}
4001 }
4002 
4003 static int handle_one_map(struct ceph_osd_client *osdc,
4004 			  void *p, void *end, bool incremental,
4005 			  struct rb_root *need_resend,
4006 			  struct list_head *need_resend_linger)
4007 {
4008 	struct ceph_osdmap *newmap;
4009 	struct rb_node *n;
4010 	bool skipped_map = false;
4011 	bool was_full;
4012 
4013 	was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
4014 	set_pool_was_full(osdc);
4015 
4016 	if (incremental)
4017 		newmap = osdmap_apply_incremental(&p, end,
4018 						  ceph_msgr2(osdc->client),
4019 						  osdc->osdmap);
4020 	else
4021 		newmap = ceph_osdmap_decode(&p, end, ceph_msgr2(osdc->client));
4022 	if (IS_ERR(newmap))
4023 		return PTR_ERR(newmap);
4024 
4025 	if (newmap != osdc->osdmap) {
4026 		/*
4027 		 * Preserve ->was_full before destroying the old map.
4028 		 * For pools that weren't in the old map, ->was_full
4029 		 * should be false.
4030 		 */
4031 		for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
4032 			struct ceph_pg_pool_info *pi =
4033 			    rb_entry(n, struct ceph_pg_pool_info, node);
4034 			struct ceph_pg_pool_info *old_pi;
4035 
4036 			old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
4037 			if (old_pi)
4038 				pi->was_full = old_pi->was_full;
4039 			else
4040 				WARN_ON(pi->was_full);
4041 		}
4042 
4043 		if (osdc->osdmap->epoch &&
4044 		    osdc->osdmap->epoch + 1 < newmap->epoch) {
4045 			WARN_ON(incremental);
4046 			skipped_map = true;
4047 		}
4048 
4049 		ceph_osdmap_destroy(osdc->osdmap);
4050 		osdc->osdmap = newmap;
4051 	}
4052 
4053 	was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
4054 	scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
4055 		      need_resend, need_resend_linger);
4056 
4057 	for (n = rb_first(&osdc->osds); n; ) {
4058 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4059 
4060 		n = rb_next(n); /* close_osd() */
4061 
4062 		scan_requests(osd, skipped_map, was_full, true, need_resend,
4063 			      need_resend_linger);
4064 		if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
4065 		    memcmp(&osd->o_con.peer_addr,
4066 			   ceph_osd_addr(osdc->osdmap, osd->o_osd),
4067 			   sizeof(struct ceph_entity_addr)))
4068 			close_osd(osd);
4069 	}
4070 
4071 	return 0;
4072 }
4073 
4074 static void kick_requests(struct ceph_osd_client *osdc,
4075 			  struct rb_root *need_resend,
4076 			  struct list_head *need_resend_linger)
4077 {
4078 	struct ceph_osd_linger_request *lreq, *nlreq;
4079 	enum calc_target_result ct_res;
4080 	struct rb_node *n;
4081 
4082 	/* make sure need_resend targets reflect latest map */
4083 	for (n = rb_first(need_resend); n; ) {
4084 		struct ceph_osd_request *req =
4085 		    rb_entry(n, struct ceph_osd_request, r_node);
4086 
4087 		n = rb_next(n);
4088 
4089 		if (req->r_t.epoch < osdc->osdmap->epoch) {
4090 			ct_res = calc_target(osdc, &req->r_t, false);
4091 			if (ct_res == CALC_TARGET_POOL_DNE) {
4092 				erase_request(need_resend, req);
4093 				check_pool_dne(req);
4094 			}
4095 		}
4096 	}
4097 
4098 	for (n = rb_first(need_resend); n; ) {
4099 		struct ceph_osd_request *req =
4100 		    rb_entry(n, struct ceph_osd_request, r_node);
4101 		struct ceph_osd *osd;
4102 
4103 		n = rb_next(n);
4104 		erase_request(need_resend, req); /* before link_request() */
4105 
4106 		osd = lookup_create_osd(osdc, req->r_t.osd, true);
4107 		link_request(osd, req);
4108 		if (!req->r_linger) {
4109 			if (!osd_homeless(osd) && !req->r_t.paused)
4110 				send_request(req);
4111 		} else {
4112 			cancel_linger_request(req);
4113 		}
4114 	}
4115 
4116 	list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
4117 		if (!osd_homeless(lreq->osd))
4118 			send_linger(lreq);
4119 
4120 		list_del_init(&lreq->scan_item);
4121 	}
4122 }
4123 
4124 /*
4125  * Process updated osd map.
4126  *
4127  * The message contains any number of incremental and full maps, normally
4128  * indicating some sort of topology change in the cluster.  Kick requests
4129  * off to different OSDs as needed.
4130  */
4131 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
4132 {
4133 	void *p = msg->front.iov_base;
4134 	void *const end = p + msg->front.iov_len;
4135 	u32 nr_maps, maplen;
4136 	u32 epoch;
4137 	struct ceph_fsid fsid;
4138 	struct rb_root need_resend = RB_ROOT;
4139 	LIST_HEAD(need_resend_linger);
4140 	bool handled_incremental = false;
4141 	bool was_pauserd, was_pausewr;
4142 	bool pauserd, pausewr;
4143 	int err;
4144 
4145 	dout("%s have %u\n", __func__, osdc->osdmap->epoch);
4146 	down_write(&osdc->lock);
4147 
4148 	/* verify fsid */
4149 	ceph_decode_need(&p, end, sizeof(fsid), bad);
4150 	ceph_decode_copy(&p, &fsid, sizeof(fsid));
4151 	if (ceph_check_fsid(osdc->client, &fsid) < 0)
4152 		goto bad;
4153 
4154 	was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4155 	was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4156 		      ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4157 		      have_pool_full(osdc);
4158 
4159 	/* incremental maps */
4160 	ceph_decode_32_safe(&p, end, nr_maps, bad);
4161 	dout(" %d inc maps\n", nr_maps);
4162 	while (nr_maps > 0) {
4163 		ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4164 		epoch = ceph_decode_32(&p);
4165 		maplen = ceph_decode_32(&p);
4166 		ceph_decode_need(&p, end, maplen, bad);
4167 		if (osdc->osdmap->epoch &&
4168 		    osdc->osdmap->epoch + 1 == epoch) {
4169 			dout("applying incremental map %u len %d\n",
4170 			     epoch, maplen);
4171 			err = handle_one_map(osdc, p, p + maplen, true,
4172 					     &need_resend, &need_resend_linger);
4173 			if (err)
4174 				goto bad;
4175 			handled_incremental = true;
4176 		} else {
4177 			dout("ignoring incremental map %u len %d\n",
4178 			     epoch, maplen);
4179 		}
4180 		p += maplen;
4181 		nr_maps--;
4182 	}
4183 	if (handled_incremental)
4184 		goto done;
4185 
4186 	/* full maps */
4187 	ceph_decode_32_safe(&p, end, nr_maps, bad);
4188 	dout(" %d full maps\n", nr_maps);
4189 	while (nr_maps) {
4190 		ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4191 		epoch = ceph_decode_32(&p);
4192 		maplen = ceph_decode_32(&p);
4193 		ceph_decode_need(&p, end, maplen, bad);
4194 		if (nr_maps > 1) {
4195 			dout("skipping non-latest full map %u len %d\n",
4196 			     epoch, maplen);
4197 		} else if (osdc->osdmap->epoch >= epoch) {
4198 			dout("skipping full map %u len %d, "
4199 			     "older than our %u\n", epoch, maplen,
4200 			     osdc->osdmap->epoch);
4201 		} else {
4202 			dout("taking full map %u len %d\n", epoch, maplen);
4203 			err = handle_one_map(osdc, p, p + maplen, false,
4204 					     &need_resend, &need_resend_linger);
4205 			if (err)
4206 				goto bad;
4207 		}
4208 		p += maplen;
4209 		nr_maps--;
4210 	}
4211 
4212 done:
4213 	/*
4214 	 * subscribe to subsequent osdmap updates if full to ensure
4215 	 * we find out when we are no longer full and stop returning
4216 	 * ENOSPC.
4217 	 */
4218 	pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4219 	pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4220 		  ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4221 		  have_pool_full(osdc);
4222 	if (was_pauserd || was_pausewr || pauserd || pausewr ||
4223 	    osdc->osdmap->epoch < osdc->epoch_barrier)
4224 		maybe_request_map(osdc);
4225 
4226 	kick_requests(osdc, &need_resend, &need_resend_linger);
4227 
4228 	ceph_osdc_abort_on_full(osdc);
4229 	ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
4230 			  osdc->osdmap->epoch);
4231 	up_write(&osdc->lock);
4232 	wake_up_all(&osdc->client->auth_wq);
4233 	return;
4234 
4235 bad:
4236 	pr_err("osdc handle_map corrupt msg\n");
4237 	ceph_msg_dump(msg);
4238 	up_write(&osdc->lock);
4239 }
4240 
4241 /*
4242  * Resubmit requests pending on the given osd.
4243  */
4244 static void kick_osd_requests(struct ceph_osd *osd)
4245 {
4246 	struct rb_node *n;
4247 
4248 	clear_backoffs(osd);
4249 
4250 	for (n = rb_first(&osd->o_requests); n; ) {
4251 		struct ceph_osd_request *req =
4252 		    rb_entry(n, struct ceph_osd_request, r_node);
4253 
4254 		n = rb_next(n); /* cancel_linger_request() */
4255 
4256 		if (!req->r_linger) {
4257 			if (!req->r_t.paused)
4258 				send_request(req);
4259 		} else {
4260 			cancel_linger_request(req);
4261 		}
4262 	}
4263 	for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
4264 		struct ceph_osd_linger_request *lreq =
4265 		    rb_entry(n, struct ceph_osd_linger_request, node);
4266 
4267 		send_linger(lreq);
4268 	}
4269 }
4270 
4271 /*
4272  * If the osd connection drops, we need to resubmit all requests.
4273  */
4274 static void osd_fault(struct ceph_connection *con)
4275 {
4276 	struct ceph_osd *osd = con->private;
4277 	struct ceph_osd_client *osdc = osd->o_osdc;
4278 
4279 	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
4280 
4281 	down_write(&osdc->lock);
4282 	if (!osd_registered(osd)) {
4283 		dout("%s osd%d unknown\n", __func__, osd->o_osd);
4284 		goto out_unlock;
4285 	}
4286 
4287 	if (!reopen_osd(osd))
4288 		kick_osd_requests(osd);
4289 	maybe_request_map(osdc);
4290 
4291 out_unlock:
4292 	up_write(&osdc->lock);
4293 }
4294 
4295 struct MOSDBackoff {
4296 	struct ceph_spg spgid;
4297 	u32 map_epoch;
4298 	u8 op;
4299 	u64 id;
4300 	struct ceph_hobject_id *begin;
4301 	struct ceph_hobject_id *end;
4302 };
4303 
4304 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
4305 {
4306 	void *p = msg->front.iov_base;
4307 	void *const end = p + msg->front.iov_len;
4308 	u8 struct_v;
4309 	u32 struct_len;
4310 	int ret;
4311 
4312 	ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
4313 	if (ret)
4314 		return ret;
4315 
4316 	ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
4317 	if (ret)
4318 		return ret;
4319 
4320 	ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
4321 	ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
4322 	ceph_decode_8_safe(&p, end, m->op, e_inval);
4323 	ceph_decode_64_safe(&p, end, m->id, e_inval);
4324 
4325 	m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
4326 	if (!m->begin)
4327 		return -ENOMEM;
4328 
4329 	ret = decode_hoid(&p, end, m->begin);
4330 	if (ret) {
4331 		free_hoid(m->begin);
4332 		return ret;
4333 	}
4334 
4335 	m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
4336 	if (!m->end) {
4337 		free_hoid(m->begin);
4338 		return -ENOMEM;
4339 	}
4340 
4341 	ret = decode_hoid(&p, end, m->end);
4342 	if (ret) {
4343 		free_hoid(m->begin);
4344 		free_hoid(m->end);
4345 		return ret;
4346 	}
4347 
4348 	return 0;
4349 
4350 e_inval:
4351 	return -EINVAL;
4352 }
4353 
4354 static struct ceph_msg *create_backoff_message(
4355 				const struct ceph_osd_backoff *backoff,
4356 				u32 map_epoch)
4357 {
4358 	struct ceph_msg *msg;
4359 	void *p, *end;
4360 	int msg_size;
4361 
4362 	msg_size = CEPH_ENCODING_START_BLK_LEN +
4363 			CEPH_PGID_ENCODING_LEN + 1; /* spgid */
4364 	msg_size += 4 + 1 + 8; /* map_epoch, op, id */
4365 	msg_size += CEPH_ENCODING_START_BLK_LEN +
4366 			hoid_encoding_size(backoff->begin);
4367 	msg_size += CEPH_ENCODING_START_BLK_LEN +
4368 			hoid_encoding_size(backoff->end);
4369 
4370 	msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
4371 	if (!msg)
4372 		return NULL;
4373 
4374 	p = msg->front.iov_base;
4375 	end = p + msg->front_alloc_len;
4376 
4377 	encode_spgid(&p, &backoff->spgid);
4378 	ceph_encode_32(&p, map_epoch);
4379 	ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4380 	ceph_encode_64(&p, backoff->id);
4381 	encode_hoid(&p, end, backoff->begin);
4382 	encode_hoid(&p, end, backoff->end);
4383 	BUG_ON(p != end);
4384 
4385 	msg->front.iov_len = p - msg->front.iov_base;
4386 	msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4387 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4388 
4389 	return msg;
4390 }
4391 
4392 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4393 {
4394 	struct ceph_spg_mapping *spg;
4395 	struct ceph_osd_backoff *backoff;
4396 	struct ceph_msg *msg;
4397 
4398 	dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4399 	     m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4400 
4401 	spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4402 	if (!spg) {
4403 		spg = alloc_spg_mapping();
4404 		if (!spg) {
4405 			pr_err("%s failed to allocate spg\n", __func__);
4406 			return;
4407 		}
4408 		spg->spgid = m->spgid; /* struct */
4409 		insert_spg_mapping(&osd->o_backoff_mappings, spg);
4410 	}
4411 
4412 	backoff = alloc_backoff();
4413 	if (!backoff) {
4414 		pr_err("%s failed to allocate backoff\n", __func__);
4415 		return;
4416 	}
4417 	backoff->spgid = m->spgid; /* struct */
4418 	backoff->id = m->id;
4419 	backoff->begin = m->begin;
4420 	m->begin = NULL; /* backoff now owns this */
4421 	backoff->end = m->end;
4422 	m->end = NULL;   /* ditto */
4423 
4424 	insert_backoff(&spg->backoffs, backoff);
4425 	insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4426 
4427 	/*
4428 	 * Ack with original backoff's epoch so that the OSD can
4429 	 * discard this if there was a PG split.
4430 	 */
4431 	msg = create_backoff_message(backoff, m->map_epoch);
4432 	if (!msg) {
4433 		pr_err("%s failed to allocate msg\n", __func__);
4434 		return;
4435 	}
4436 	ceph_con_send(&osd->o_con, msg);
4437 }
4438 
4439 static bool target_contained_by(const struct ceph_osd_request_target *t,
4440 				const struct ceph_hobject_id *begin,
4441 				const struct ceph_hobject_id *end)
4442 {
4443 	struct ceph_hobject_id hoid;
4444 	int cmp;
4445 
4446 	hoid_fill_from_target(&hoid, t);
4447 	cmp = hoid_compare(&hoid, begin);
4448 	return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4449 }
4450 
4451 static void handle_backoff_unblock(struct ceph_osd *osd,
4452 				   const struct MOSDBackoff *m)
4453 {
4454 	struct ceph_spg_mapping *spg;
4455 	struct ceph_osd_backoff *backoff;
4456 	struct rb_node *n;
4457 
4458 	dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4459 	     m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4460 
4461 	backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4462 	if (!backoff) {
4463 		pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4464 		       __func__, osd->o_osd, m->spgid.pgid.pool,
4465 		       m->spgid.pgid.seed, m->spgid.shard, m->id);
4466 		return;
4467 	}
4468 
4469 	if (hoid_compare(backoff->begin, m->begin) &&
4470 	    hoid_compare(backoff->end, m->end)) {
4471 		pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4472 		       __func__, osd->o_osd, m->spgid.pgid.pool,
4473 		       m->spgid.pgid.seed, m->spgid.shard, m->id);
4474 		/* unblock it anyway... */
4475 	}
4476 
4477 	spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4478 	BUG_ON(!spg);
4479 
4480 	erase_backoff(&spg->backoffs, backoff);
4481 	erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4482 	free_backoff(backoff);
4483 
4484 	if (RB_EMPTY_ROOT(&spg->backoffs)) {
4485 		erase_spg_mapping(&osd->o_backoff_mappings, spg);
4486 		free_spg_mapping(spg);
4487 	}
4488 
4489 	for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4490 		struct ceph_osd_request *req =
4491 		    rb_entry(n, struct ceph_osd_request, r_node);
4492 
4493 		if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4494 			/*
4495 			 * Match against @m, not @backoff -- the PG may
4496 			 * have split on the OSD.
4497 			 */
4498 			if (target_contained_by(&req->r_t, m->begin, m->end)) {
4499 				/*
4500 				 * If no other installed backoff applies,
4501 				 * resend.
4502 				 */
4503 				send_request(req);
4504 			}
4505 		}
4506 	}
4507 }
4508 
4509 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4510 {
4511 	struct ceph_osd_client *osdc = osd->o_osdc;
4512 	struct MOSDBackoff m;
4513 	int ret;
4514 
4515 	down_read(&osdc->lock);
4516 	if (!osd_registered(osd)) {
4517 		dout("%s osd%d unknown\n", __func__, osd->o_osd);
4518 		up_read(&osdc->lock);
4519 		return;
4520 	}
4521 	WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4522 
4523 	mutex_lock(&osd->lock);
4524 	ret = decode_MOSDBackoff(msg, &m);
4525 	if (ret) {
4526 		pr_err("failed to decode MOSDBackoff: %d\n", ret);
4527 		ceph_msg_dump(msg);
4528 		goto out_unlock;
4529 	}
4530 
4531 	switch (m.op) {
4532 	case CEPH_OSD_BACKOFF_OP_BLOCK:
4533 		handle_backoff_block(osd, &m);
4534 		break;
4535 	case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4536 		handle_backoff_unblock(osd, &m);
4537 		break;
4538 	default:
4539 		pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4540 	}
4541 
4542 	free_hoid(m.begin);
4543 	free_hoid(m.end);
4544 
4545 out_unlock:
4546 	mutex_unlock(&osd->lock);
4547 	up_read(&osdc->lock);
4548 }
4549 
4550 /*
4551  * Process osd watch notifications
4552  */
4553 static void handle_watch_notify(struct ceph_osd_client *osdc,
4554 				struct ceph_msg *msg)
4555 {
4556 	void *p = msg->front.iov_base;
4557 	void *const end = p + msg->front.iov_len;
4558 	struct ceph_osd_linger_request *lreq;
4559 	struct linger_work *lwork;
4560 	u8 proto_ver, opcode;
4561 	u64 cookie, notify_id;
4562 	u64 notifier_id = 0;
4563 	s32 return_code = 0;
4564 	void *payload = NULL;
4565 	u32 payload_len = 0;
4566 
4567 	ceph_decode_8_safe(&p, end, proto_ver, bad);
4568 	ceph_decode_8_safe(&p, end, opcode, bad);
4569 	ceph_decode_64_safe(&p, end, cookie, bad);
4570 	p += 8; /* skip ver */
4571 	ceph_decode_64_safe(&p, end, notify_id, bad);
4572 
4573 	if (proto_ver >= 1) {
4574 		ceph_decode_32_safe(&p, end, payload_len, bad);
4575 		ceph_decode_need(&p, end, payload_len, bad);
4576 		payload = p;
4577 		p += payload_len;
4578 	}
4579 
4580 	if (le16_to_cpu(msg->hdr.version) >= 2)
4581 		ceph_decode_32_safe(&p, end, return_code, bad);
4582 
4583 	if (le16_to_cpu(msg->hdr.version) >= 3)
4584 		ceph_decode_64_safe(&p, end, notifier_id, bad);
4585 
4586 	down_read(&osdc->lock);
4587 	lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4588 	if (!lreq) {
4589 		dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4590 		     cookie);
4591 		goto out_unlock_osdc;
4592 	}
4593 
4594 	mutex_lock(&lreq->lock);
4595 	dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4596 	     opcode, cookie, lreq, lreq->is_watch);
4597 	if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4598 		if (!lreq->last_error) {
4599 			lreq->last_error = -ENOTCONN;
4600 			queue_watch_error(lreq);
4601 		}
4602 	} else if (!lreq->is_watch) {
4603 		/* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4604 		if (lreq->notify_id && lreq->notify_id != notify_id) {
4605 			dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4606 			     lreq->notify_id, notify_id);
4607 		} else if (!completion_done(&lreq->notify_finish_wait)) {
4608 			struct ceph_msg_data *data =
4609 			    msg->num_data_items ? &msg->data[0] : NULL;
4610 
4611 			if (data) {
4612 				if (lreq->preply_pages) {
4613 					WARN_ON(data->type !=
4614 							CEPH_MSG_DATA_PAGES);
4615 					*lreq->preply_pages = data->pages;
4616 					*lreq->preply_len = data->length;
4617 					data->own_pages = false;
4618 				}
4619 			}
4620 			lreq->notify_finish_error = return_code;
4621 			complete_all(&lreq->notify_finish_wait);
4622 		}
4623 	} else {
4624 		/* CEPH_WATCH_EVENT_NOTIFY */
4625 		lwork = lwork_alloc(lreq, do_watch_notify);
4626 		if (!lwork) {
4627 			pr_err("failed to allocate notify-lwork\n");
4628 			goto out_unlock_lreq;
4629 		}
4630 
4631 		lwork->notify.notify_id = notify_id;
4632 		lwork->notify.notifier_id = notifier_id;
4633 		lwork->notify.payload = payload;
4634 		lwork->notify.payload_len = payload_len;
4635 		lwork->notify.msg = ceph_msg_get(msg);
4636 		lwork_queue(lwork);
4637 	}
4638 
4639 out_unlock_lreq:
4640 	mutex_unlock(&lreq->lock);
4641 out_unlock_osdc:
4642 	up_read(&osdc->lock);
4643 	return;
4644 
4645 bad:
4646 	pr_err("osdc handle_watch_notify corrupt msg\n");
4647 }
4648 
4649 /*
4650  * Register request, send initial attempt.
4651  */
4652 void ceph_osdc_start_request(struct ceph_osd_client *osdc,
4653 			     struct ceph_osd_request *req)
4654 {
4655 	down_read(&osdc->lock);
4656 	submit_request(req, false);
4657 	up_read(&osdc->lock);
4658 }
4659 EXPORT_SYMBOL(ceph_osdc_start_request);
4660 
4661 /*
4662  * Unregister request.  If @req was registered, it isn't completed:
4663  * r_result isn't set and __complete_request() isn't invoked.
4664  *
4665  * If @req wasn't registered, this call may have raced with
4666  * handle_reply(), in which case r_result would already be set and
4667  * __complete_request() would be getting invoked, possibly even
4668  * concurrently with this call.
4669  */
4670 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4671 {
4672 	struct ceph_osd_client *osdc = req->r_osdc;
4673 
4674 	down_write(&osdc->lock);
4675 	if (req->r_osd)
4676 		cancel_request(req);
4677 	up_write(&osdc->lock);
4678 }
4679 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4680 
4681 /*
4682  * @timeout: in jiffies, 0 means "wait forever"
4683  */
4684 static int wait_request_timeout(struct ceph_osd_request *req,
4685 				unsigned long timeout)
4686 {
4687 	long left;
4688 
4689 	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4690 	left = wait_for_completion_killable_timeout(&req->r_completion,
4691 						ceph_timeout_jiffies(timeout));
4692 	if (left <= 0) {
4693 		left = left ?: -ETIMEDOUT;
4694 		ceph_osdc_cancel_request(req);
4695 	} else {
4696 		left = req->r_result; /* completed */
4697 	}
4698 
4699 	return left;
4700 }
4701 
4702 /*
4703  * wait for a request to complete
4704  */
4705 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4706 			   struct ceph_osd_request *req)
4707 {
4708 	return wait_request_timeout(req, 0);
4709 }
4710 EXPORT_SYMBOL(ceph_osdc_wait_request);
4711 
4712 /*
4713  * sync - wait for all in-flight requests to flush.  avoid starvation.
4714  */
4715 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4716 {
4717 	struct rb_node *n, *p;
4718 	u64 last_tid = atomic64_read(&osdc->last_tid);
4719 
4720 again:
4721 	down_read(&osdc->lock);
4722 	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4723 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4724 
4725 		mutex_lock(&osd->lock);
4726 		for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4727 			struct ceph_osd_request *req =
4728 			    rb_entry(p, struct ceph_osd_request, r_node);
4729 
4730 			if (req->r_tid > last_tid)
4731 				break;
4732 
4733 			if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4734 				continue;
4735 
4736 			ceph_osdc_get_request(req);
4737 			mutex_unlock(&osd->lock);
4738 			up_read(&osdc->lock);
4739 			dout("%s waiting on req %p tid %llu last_tid %llu\n",
4740 			     __func__, req, req->r_tid, last_tid);
4741 			wait_for_completion(&req->r_completion);
4742 			ceph_osdc_put_request(req);
4743 			goto again;
4744 		}
4745 
4746 		mutex_unlock(&osd->lock);
4747 	}
4748 
4749 	up_read(&osdc->lock);
4750 	dout("%s done last_tid %llu\n", __func__, last_tid);
4751 }
4752 EXPORT_SYMBOL(ceph_osdc_sync);
4753 
4754 /*
4755  * Returns a handle, caller owns a ref.
4756  */
4757 struct ceph_osd_linger_request *
4758 ceph_osdc_watch(struct ceph_osd_client *osdc,
4759 		struct ceph_object_id *oid,
4760 		struct ceph_object_locator *oloc,
4761 		rados_watchcb2_t wcb,
4762 		rados_watcherrcb_t errcb,
4763 		void *data)
4764 {
4765 	struct ceph_osd_linger_request *lreq;
4766 	int ret;
4767 
4768 	lreq = linger_alloc(osdc);
4769 	if (!lreq)
4770 		return ERR_PTR(-ENOMEM);
4771 
4772 	lreq->is_watch = true;
4773 	lreq->wcb = wcb;
4774 	lreq->errcb = errcb;
4775 	lreq->data = data;
4776 	lreq->watch_valid_thru = jiffies;
4777 
4778 	ceph_oid_copy(&lreq->t.base_oid, oid);
4779 	ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4780 	lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4781 	ktime_get_real_ts64(&lreq->mtime);
4782 
4783 	linger_submit(lreq);
4784 	ret = linger_reg_commit_wait(lreq);
4785 	if (ret) {
4786 		linger_cancel(lreq);
4787 		goto err_put_lreq;
4788 	}
4789 
4790 	return lreq;
4791 
4792 err_put_lreq:
4793 	linger_put(lreq);
4794 	return ERR_PTR(ret);
4795 }
4796 EXPORT_SYMBOL(ceph_osdc_watch);
4797 
4798 /*
4799  * Releases a ref.
4800  *
4801  * Times out after mount_timeout to preserve rbd unmap behaviour
4802  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4803  * with mount_timeout").
4804  */
4805 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4806 		      struct ceph_osd_linger_request *lreq)
4807 {
4808 	struct ceph_options *opts = osdc->client->options;
4809 	struct ceph_osd_request *req;
4810 	int ret;
4811 
4812 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4813 	if (!req)
4814 		return -ENOMEM;
4815 
4816 	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4817 	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4818 	req->r_flags = CEPH_OSD_FLAG_WRITE;
4819 	ktime_get_real_ts64(&req->r_mtime);
4820 	osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_UNWATCH,
4821 			      lreq->linger_id, 0);
4822 
4823 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4824 	if (ret)
4825 		goto out_put_req;
4826 
4827 	ceph_osdc_start_request(osdc, req);
4828 	linger_cancel(lreq);
4829 	linger_put(lreq);
4830 	ret = wait_request_timeout(req, opts->mount_timeout);
4831 
4832 out_put_req:
4833 	ceph_osdc_put_request(req);
4834 	return ret;
4835 }
4836 EXPORT_SYMBOL(ceph_osdc_unwatch);
4837 
4838 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4839 				      u64 notify_id, u64 cookie, void *payload,
4840 				      u32 payload_len)
4841 {
4842 	struct ceph_osd_req_op *op;
4843 	struct ceph_pagelist *pl;
4844 	int ret;
4845 
4846 	op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4847 
4848 	pl = ceph_pagelist_alloc(GFP_NOIO);
4849 	if (!pl)
4850 		return -ENOMEM;
4851 
4852 	ret = ceph_pagelist_encode_64(pl, notify_id);
4853 	ret |= ceph_pagelist_encode_64(pl, cookie);
4854 	if (payload) {
4855 		ret |= ceph_pagelist_encode_32(pl, payload_len);
4856 		ret |= ceph_pagelist_append(pl, payload, payload_len);
4857 	} else {
4858 		ret |= ceph_pagelist_encode_32(pl, 0);
4859 	}
4860 	if (ret) {
4861 		ceph_pagelist_release(pl);
4862 		return -ENOMEM;
4863 	}
4864 
4865 	ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4866 	op->indata_len = pl->length;
4867 	return 0;
4868 }
4869 
4870 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4871 			 struct ceph_object_id *oid,
4872 			 struct ceph_object_locator *oloc,
4873 			 u64 notify_id,
4874 			 u64 cookie,
4875 			 void *payload,
4876 			 u32 payload_len)
4877 {
4878 	struct ceph_osd_request *req;
4879 	int ret;
4880 
4881 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4882 	if (!req)
4883 		return -ENOMEM;
4884 
4885 	ceph_oid_copy(&req->r_base_oid, oid);
4886 	ceph_oloc_copy(&req->r_base_oloc, oloc);
4887 	req->r_flags = CEPH_OSD_FLAG_READ;
4888 
4889 	ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4890 					 payload_len);
4891 	if (ret)
4892 		goto out_put_req;
4893 
4894 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4895 	if (ret)
4896 		goto out_put_req;
4897 
4898 	ceph_osdc_start_request(osdc, req);
4899 	ret = ceph_osdc_wait_request(osdc, req);
4900 
4901 out_put_req:
4902 	ceph_osdc_put_request(req);
4903 	return ret;
4904 }
4905 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4906 
4907 /*
4908  * @timeout: in seconds
4909  *
4910  * @preply_{pages,len} are initialized both on success and error.
4911  * The caller is responsible for:
4912  *
4913  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4914  */
4915 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4916 		     struct ceph_object_id *oid,
4917 		     struct ceph_object_locator *oloc,
4918 		     void *payload,
4919 		     u32 payload_len,
4920 		     u32 timeout,
4921 		     struct page ***preply_pages,
4922 		     size_t *preply_len)
4923 {
4924 	struct ceph_osd_linger_request *lreq;
4925 	int ret;
4926 
4927 	WARN_ON(!timeout);
4928 	if (preply_pages) {
4929 		*preply_pages = NULL;
4930 		*preply_len = 0;
4931 	}
4932 
4933 	lreq = linger_alloc(osdc);
4934 	if (!lreq)
4935 		return -ENOMEM;
4936 
4937 	lreq->request_pl = ceph_pagelist_alloc(GFP_NOIO);
4938 	if (!lreq->request_pl) {
4939 		ret = -ENOMEM;
4940 		goto out_put_lreq;
4941 	}
4942 
4943 	ret = ceph_pagelist_encode_32(lreq->request_pl, 1); /* prot_ver */
4944 	ret |= ceph_pagelist_encode_32(lreq->request_pl, timeout);
4945 	ret |= ceph_pagelist_encode_32(lreq->request_pl, payload_len);
4946 	ret |= ceph_pagelist_append(lreq->request_pl, payload, payload_len);
4947 	if (ret) {
4948 		ret = -ENOMEM;
4949 		goto out_put_lreq;
4950 	}
4951 
4952 	/* for notify_id */
4953 	lreq->notify_id_pages = ceph_alloc_page_vector(1, GFP_NOIO);
4954 	if (IS_ERR(lreq->notify_id_pages)) {
4955 		ret = PTR_ERR(lreq->notify_id_pages);
4956 		lreq->notify_id_pages = NULL;
4957 		goto out_put_lreq;
4958 	}
4959 
4960 	lreq->preply_pages = preply_pages;
4961 	lreq->preply_len = preply_len;
4962 
4963 	ceph_oid_copy(&lreq->t.base_oid, oid);
4964 	ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4965 	lreq->t.flags = CEPH_OSD_FLAG_READ;
4966 
4967 	linger_submit(lreq);
4968 	ret = linger_reg_commit_wait(lreq);
4969 	if (!ret)
4970 		ret = linger_notify_finish_wait(lreq,
4971 				 msecs_to_jiffies(2 * timeout * MSEC_PER_SEC));
4972 	else
4973 		dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4974 
4975 	linger_cancel(lreq);
4976 out_put_lreq:
4977 	linger_put(lreq);
4978 	return ret;
4979 }
4980 EXPORT_SYMBOL(ceph_osdc_notify);
4981 
4982 /*
4983  * Return the number of milliseconds since the watch was last
4984  * confirmed, or an error.  If there is an error, the watch is no
4985  * longer valid, and should be destroyed with ceph_osdc_unwatch().
4986  */
4987 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4988 			  struct ceph_osd_linger_request *lreq)
4989 {
4990 	unsigned long stamp, age;
4991 	int ret;
4992 
4993 	down_read(&osdc->lock);
4994 	mutex_lock(&lreq->lock);
4995 	stamp = lreq->watch_valid_thru;
4996 	if (!list_empty(&lreq->pending_lworks)) {
4997 		struct linger_work *lwork =
4998 		    list_first_entry(&lreq->pending_lworks,
4999 				     struct linger_work,
5000 				     pending_item);
5001 
5002 		if (time_before(lwork->queued_stamp, stamp))
5003 			stamp = lwork->queued_stamp;
5004 	}
5005 	age = jiffies - stamp;
5006 	dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
5007 	     lreq, lreq->linger_id, age, lreq->last_error);
5008 	/* we are truncating to msecs, so return a safe upper bound */
5009 	ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
5010 
5011 	mutex_unlock(&lreq->lock);
5012 	up_read(&osdc->lock);
5013 	return ret;
5014 }
5015 
5016 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
5017 {
5018 	u8 struct_v;
5019 	u32 struct_len;
5020 	int ret;
5021 
5022 	ret = ceph_start_decoding(p, end, 2, "watch_item_t",
5023 				  &struct_v, &struct_len);
5024 	if (ret)
5025 		goto bad;
5026 
5027 	ret = -EINVAL;
5028 	ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad);
5029 	ceph_decode_64_safe(p, end, item->cookie, bad);
5030 	ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */
5031 
5032 	if (struct_v >= 2) {
5033 		ret = ceph_decode_entity_addr(p, end, &item->addr);
5034 		if (ret)
5035 			goto bad;
5036 	} else {
5037 		ret = 0;
5038 	}
5039 
5040 	dout("%s %s%llu cookie %llu addr %s\n", __func__,
5041 	     ENTITY_NAME(item->name), item->cookie,
5042 	     ceph_pr_addr(&item->addr));
5043 bad:
5044 	return ret;
5045 }
5046 
5047 static int decode_watchers(void **p, void *end,
5048 			   struct ceph_watch_item **watchers,
5049 			   u32 *num_watchers)
5050 {
5051 	u8 struct_v;
5052 	u32 struct_len;
5053 	int i;
5054 	int ret;
5055 
5056 	ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
5057 				  &struct_v, &struct_len);
5058 	if (ret)
5059 		return ret;
5060 
5061 	*num_watchers = ceph_decode_32(p);
5062 	*watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
5063 	if (!*watchers)
5064 		return -ENOMEM;
5065 
5066 	for (i = 0; i < *num_watchers; i++) {
5067 		ret = decode_watcher(p, end, *watchers + i);
5068 		if (ret) {
5069 			kfree(*watchers);
5070 			return ret;
5071 		}
5072 	}
5073 
5074 	return 0;
5075 }
5076 
5077 /*
5078  * On success, the caller is responsible for:
5079  *
5080  *     kfree(watchers);
5081  */
5082 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
5083 			    struct ceph_object_id *oid,
5084 			    struct ceph_object_locator *oloc,
5085 			    struct ceph_watch_item **watchers,
5086 			    u32 *num_watchers)
5087 {
5088 	struct ceph_osd_request *req;
5089 	struct page **pages;
5090 	int ret;
5091 
5092 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5093 	if (!req)
5094 		return -ENOMEM;
5095 
5096 	ceph_oid_copy(&req->r_base_oid, oid);
5097 	ceph_oloc_copy(&req->r_base_oloc, oloc);
5098 	req->r_flags = CEPH_OSD_FLAG_READ;
5099 
5100 	pages = ceph_alloc_page_vector(1, GFP_NOIO);
5101 	if (IS_ERR(pages)) {
5102 		ret = PTR_ERR(pages);
5103 		goto out_put_req;
5104 	}
5105 
5106 	osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
5107 	ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
5108 						 response_data),
5109 				 pages, PAGE_SIZE, 0, false, true);
5110 
5111 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5112 	if (ret)
5113 		goto out_put_req;
5114 
5115 	ceph_osdc_start_request(osdc, req);
5116 	ret = ceph_osdc_wait_request(osdc, req);
5117 	if (ret >= 0) {
5118 		void *p = page_address(pages[0]);
5119 		void *const end = p + req->r_ops[0].outdata_len;
5120 
5121 		ret = decode_watchers(&p, end, watchers, num_watchers);
5122 	}
5123 
5124 out_put_req:
5125 	ceph_osdc_put_request(req);
5126 	return ret;
5127 }
5128 EXPORT_SYMBOL(ceph_osdc_list_watchers);
5129 
5130 /*
5131  * Call all pending notify callbacks - for use after a watch is
5132  * unregistered, to make sure no more callbacks for it will be invoked
5133  */
5134 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
5135 {
5136 	dout("%s osdc %p\n", __func__, osdc);
5137 	flush_workqueue(osdc->notify_wq);
5138 }
5139 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
5140 
5141 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
5142 {
5143 	down_read(&osdc->lock);
5144 	maybe_request_map(osdc);
5145 	up_read(&osdc->lock);
5146 }
5147 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
5148 
5149 /*
5150  * Execute an OSD class method on an object.
5151  *
5152  * @flags: CEPH_OSD_FLAG_*
5153  * @resp_len: in/out param for reply length
5154  */
5155 int ceph_osdc_call(struct ceph_osd_client *osdc,
5156 		   struct ceph_object_id *oid,
5157 		   struct ceph_object_locator *oloc,
5158 		   const char *class, const char *method,
5159 		   unsigned int flags,
5160 		   struct page *req_page, size_t req_len,
5161 		   struct page **resp_pages, size_t *resp_len)
5162 {
5163 	struct ceph_osd_request *req;
5164 	int ret;
5165 
5166 	if (req_len > PAGE_SIZE)
5167 		return -E2BIG;
5168 
5169 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5170 	if (!req)
5171 		return -ENOMEM;
5172 
5173 	ceph_oid_copy(&req->r_base_oid, oid);
5174 	ceph_oloc_copy(&req->r_base_oloc, oloc);
5175 	req->r_flags = flags;
5176 
5177 	ret = osd_req_op_cls_init(req, 0, class, method);
5178 	if (ret)
5179 		goto out_put_req;
5180 
5181 	if (req_page)
5182 		osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
5183 						  0, false, false);
5184 	if (resp_pages)
5185 		osd_req_op_cls_response_data_pages(req, 0, resp_pages,
5186 						   *resp_len, 0, false, false);
5187 
5188 	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5189 	if (ret)
5190 		goto out_put_req;
5191 
5192 	ceph_osdc_start_request(osdc, req);
5193 	ret = ceph_osdc_wait_request(osdc, req);
5194 	if (ret >= 0) {
5195 		ret = req->r_ops[0].rval;
5196 		if (resp_pages)
5197 			*resp_len = req->r_ops[0].outdata_len;
5198 	}
5199 
5200 out_put_req:
5201 	ceph_osdc_put_request(req);
5202 	return ret;
5203 }
5204 EXPORT_SYMBOL(ceph_osdc_call);
5205 
5206 /*
5207  * reset all osd connections
5208  */
5209 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
5210 {
5211 	struct rb_node *n;
5212 
5213 	down_write(&osdc->lock);
5214 	for (n = rb_first(&osdc->osds); n; ) {
5215 		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
5216 
5217 		n = rb_next(n);
5218 		if (!reopen_osd(osd))
5219 			kick_osd_requests(osd);
5220 	}
5221 	up_write(&osdc->lock);
5222 }
5223 
5224 /*
5225  * init, shutdown
5226  */
5227 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
5228 {
5229 	int err;
5230 
5231 	dout("init\n");
5232 	osdc->client = client;
5233 	init_rwsem(&osdc->lock);
5234 	osdc->osds = RB_ROOT;
5235 	INIT_LIST_HEAD(&osdc->osd_lru);
5236 	spin_lock_init(&osdc->osd_lru_lock);
5237 	osd_init(&osdc->homeless_osd);
5238 	osdc->homeless_osd.o_osdc = osdc;
5239 	osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5240 	osdc->last_linger_id = CEPH_LINGER_ID_START;
5241 	osdc->linger_requests = RB_ROOT;
5242 	osdc->map_checks = RB_ROOT;
5243 	osdc->linger_map_checks = RB_ROOT;
5244 	INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5245 	INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5246 
5247 	err = -ENOMEM;
5248 	osdc->osdmap = ceph_osdmap_alloc();
5249 	if (!osdc->osdmap)
5250 		goto out;
5251 
5252 	osdc->req_mempool = mempool_create_slab_pool(10,
5253 						     ceph_osd_request_cache);
5254 	if (!osdc->req_mempool)
5255 		goto out_map;
5256 
5257 	err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5258 				PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
5259 	if (err < 0)
5260 		goto out_mempool;
5261 	err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5262 				PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
5263 				"osd_op_reply");
5264 	if (err < 0)
5265 		goto out_msgpool;
5266 
5267 	err = -ENOMEM;
5268 	osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5269 	if (!osdc->notify_wq)
5270 		goto out_msgpool_reply;
5271 
5272 	osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5273 	if (!osdc->completion_wq)
5274 		goto out_notify_wq;
5275 
5276 	schedule_delayed_work(&osdc->timeout_work,
5277 			      osdc->client->options->osd_keepalive_timeout);
5278 	schedule_delayed_work(&osdc->osds_timeout_work,
5279 	    round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5280 
5281 	return 0;
5282 
5283 out_notify_wq:
5284 	destroy_workqueue(osdc->notify_wq);
5285 out_msgpool_reply:
5286 	ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5287 out_msgpool:
5288 	ceph_msgpool_destroy(&osdc->msgpool_op);
5289 out_mempool:
5290 	mempool_destroy(osdc->req_mempool);
5291 out_map:
5292 	ceph_osdmap_destroy(osdc->osdmap);
5293 out:
5294 	return err;
5295 }
5296 
5297 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5298 {
5299 	destroy_workqueue(osdc->completion_wq);
5300 	destroy_workqueue(osdc->notify_wq);
5301 	cancel_delayed_work_sync(&osdc->timeout_work);
5302 	cancel_delayed_work_sync(&osdc->osds_timeout_work);
5303 
5304 	down_write(&osdc->lock);
5305 	while (!RB_EMPTY_ROOT(&osdc->osds)) {
5306 		struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5307 						struct ceph_osd, o_node);
5308 		close_osd(osd);
5309 	}
5310 	up_write(&osdc->lock);
5311 	WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5312 	osd_cleanup(&osdc->homeless_osd);
5313 
5314 	WARN_ON(!list_empty(&osdc->osd_lru));
5315 	WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5316 	WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5317 	WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5318 	WARN_ON(atomic_read(&osdc->num_requests));
5319 	WARN_ON(atomic_read(&osdc->num_homeless));
5320 
5321 	ceph_osdmap_destroy(osdc->osdmap);
5322 	mempool_destroy(osdc->req_mempool);
5323 	ceph_msgpool_destroy(&osdc->msgpool_op);
5324 	ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5325 }
5326 
5327 int osd_req_op_copy_from_init(struct ceph_osd_request *req,
5328 			      u64 src_snapid, u64 src_version,
5329 			      struct ceph_object_id *src_oid,
5330 			      struct ceph_object_locator *src_oloc,
5331 			      u32 src_fadvise_flags,
5332 			      u32 dst_fadvise_flags,
5333 			      u32 truncate_seq, u64 truncate_size,
5334 			      u8 copy_from_flags)
5335 {
5336 	struct ceph_osd_req_op *op;
5337 	struct page **pages;
5338 	void *p, *end;
5339 
5340 	pages = ceph_alloc_page_vector(1, GFP_KERNEL);
5341 	if (IS_ERR(pages))
5342 		return PTR_ERR(pages);
5343 
5344 	op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
5345 			     dst_fadvise_flags);
5346 	op->copy_from.snapid = src_snapid;
5347 	op->copy_from.src_version = src_version;
5348 	op->copy_from.flags = copy_from_flags;
5349 	op->copy_from.src_fadvise_flags = src_fadvise_flags;
5350 
5351 	p = page_address(pages[0]);
5352 	end = p + PAGE_SIZE;
5353 	ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
5354 	encode_oloc(&p, end, src_oloc);
5355 	ceph_encode_32(&p, truncate_seq);
5356 	ceph_encode_64(&p, truncate_size);
5357 	op->indata_len = PAGE_SIZE - (end - p);
5358 
5359 	ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
5360 				 op->indata_len, 0, false, true);
5361 	return 0;
5362 }
5363 EXPORT_SYMBOL(osd_req_op_copy_from_init);
5364 
5365 int __init ceph_osdc_setup(void)
5366 {
5367 	size_t size = sizeof(struct ceph_osd_request) +
5368 	    CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5369 
5370 	BUG_ON(ceph_osd_request_cache);
5371 	ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5372 						   0, 0, NULL);
5373 
5374 	return ceph_osd_request_cache ? 0 : -ENOMEM;
5375 }
5376 
5377 void ceph_osdc_cleanup(void)
5378 {
5379 	BUG_ON(!ceph_osd_request_cache);
5380 	kmem_cache_destroy(ceph_osd_request_cache);
5381 	ceph_osd_request_cache = NULL;
5382 }
5383 
5384 /*
5385  * handle incoming message
5386  */
5387 static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5388 {
5389 	struct ceph_osd *osd = con->private;
5390 	struct ceph_osd_client *osdc = osd->o_osdc;
5391 	int type = le16_to_cpu(msg->hdr.type);
5392 
5393 	switch (type) {
5394 	case CEPH_MSG_OSD_MAP:
5395 		ceph_osdc_handle_map(osdc, msg);
5396 		break;
5397 	case CEPH_MSG_OSD_OPREPLY:
5398 		handle_reply(osd, msg);
5399 		break;
5400 	case CEPH_MSG_OSD_BACKOFF:
5401 		handle_backoff(osd, msg);
5402 		break;
5403 	case CEPH_MSG_WATCH_NOTIFY:
5404 		handle_watch_notify(osdc, msg);
5405 		break;
5406 
5407 	default:
5408 		pr_err("received unknown message type %d %s\n", type,
5409 		       ceph_msg_type_name(type));
5410 	}
5411 
5412 	ceph_msg_put(msg);
5413 }
5414 
5415 /* How much sparse data was requested? */
5416 static u64 sparse_data_requested(struct ceph_osd_request *req)
5417 {
5418 	u64 len = 0;
5419 
5420 	if (req->r_flags & CEPH_OSD_FLAG_READ) {
5421 		int i;
5422 
5423 		for (i = 0; i < req->r_num_ops; ++i) {
5424 			struct ceph_osd_req_op *op = &req->r_ops[i];
5425 
5426 			if (op->op == CEPH_OSD_OP_SPARSE_READ)
5427 				len += op->extent.length;
5428 		}
5429 	}
5430 	return len;
5431 }
5432 
5433 /*
5434  * Lookup and return message for incoming reply.  Don't try to do
5435  * anything about a larger than preallocated data portion of the
5436  * message at the moment - for now, just skip the message.
5437  */
5438 static struct ceph_msg *get_reply(struct ceph_connection *con,
5439 				  struct ceph_msg_header *hdr,
5440 				  int *skip)
5441 {
5442 	struct ceph_osd *osd = con->private;
5443 	struct ceph_osd_client *osdc = osd->o_osdc;
5444 	struct ceph_msg *m = NULL;
5445 	struct ceph_osd_request *req;
5446 	int front_len = le32_to_cpu(hdr->front_len);
5447 	int data_len = le32_to_cpu(hdr->data_len);
5448 	u64 tid = le64_to_cpu(hdr->tid);
5449 	u64 srlen;
5450 
5451 	down_read(&osdc->lock);
5452 	if (!osd_registered(osd)) {
5453 		dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5454 		*skip = 1;
5455 		goto out_unlock_osdc;
5456 	}
5457 	WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5458 
5459 	mutex_lock(&osd->lock);
5460 	req = lookup_request(&osd->o_requests, tid);
5461 	if (!req) {
5462 		dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5463 		     osd->o_osd, tid);
5464 		*skip = 1;
5465 		goto out_unlock_session;
5466 	}
5467 
5468 	ceph_msg_revoke_incoming(req->r_reply);
5469 
5470 	if (front_len > req->r_reply->front_alloc_len) {
5471 		pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5472 			__func__, osd->o_osd, req->r_tid, front_len,
5473 			req->r_reply->front_alloc_len);
5474 		m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5475 				 false);
5476 		if (!m)
5477 			goto out_unlock_session;
5478 		ceph_msg_put(req->r_reply);
5479 		req->r_reply = m;
5480 	}
5481 
5482 	srlen = sparse_data_requested(req);
5483 	if (!srlen && data_len > req->r_reply->data_length) {
5484 		pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5485 			__func__, osd->o_osd, req->r_tid, data_len,
5486 			req->r_reply->data_length);
5487 		m = NULL;
5488 		*skip = 1;
5489 		goto out_unlock_session;
5490 	}
5491 
5492 	m = ceph_msg_get(req->r_reply);
5493 	m->sparse_read = (bool)srlen;
5494 
5495 	dout("get_reply tid %lld %p\n", tid, m);
5496 
5497 out_unlock_session:
5498 	mutex_unlock(&osd->lock);
5499 out_unlock_osdc:
5500 	up_read(&osdc->lock);
5501 	return m;
5502 }
5503 
5504 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5505 {
5506 	struct ceph_msg *m;
5507 	int type = le16_to_cpu(hdr->type);
5508 	u32 front_len = le32_to_cpu(hdr->front_len);
5509 	u32 data_len = le32_to_cpu(hdr->data_len);
5510 
5511 	m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
5512 	if (!m)
5513 		return NULL;
5514 
5515 	if (data_len) {
5516 		struct page **pages;
5517 
5518 		pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5519 					       GFP_NOIO);
5520 		if (IS_ERR(pages)) {
5521 			ceph_msg_put(m);
5522 			return NULL;
5523 		}
5524 
5525 		ceph_msg_data_add_pages(m, pages, data_len, 0, true);
5526 	}
5527 
5528 	return m;
5529 }
5530 
5531 static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con,
5532 				      struct ceph_msg_header *hdr,
5533 				      int *skip)
5534 {
5535 	struct ceph_osd *osd = con->private;
5536 	int type = le16_to_cpu(hdr->type);
5537 
5538 	*skip = 0;
5539 	switch (type) {
5540 	case CEPH_MSG_OSD_MAP:
5541 	case CEPH_MSG_OSD_BACKOFF:
5542 	case CEPH_MSG_WATCH_NOTIFY:
5543 		return alloc_msg_with_page_vector(hdr);
5544 	case CEPH_MSG_OSD_OPREPLY:
5545 		return get_reply(con, hdr, skip);
5546 	default:
5547 		pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5548 			osd->o_osd, type);
5549 		*skip = 1;
5550 		return NULL;
5551 	}
5552 }
5553 
5554 /*
5555  * Wrappers to refcount containing ceph_osd struct
5556  */
5557 static struct ceph_connection *osd_get_con(struct ceph_connection *con)
5558 {
5559 	struct ceph_osd *osd = con->private;
5560 	if (get_osd(osd))
5561 		return con;
5562 	return NULL;
5563 }
5564 
5565 static void osd_put_con(struct ceph_connection *con)
5566 {
5567 	struct ceph_osd *osd = con->private;
5568 	put_osd(osd);
5569 }
5570 
5571 /*
5572  * authentication
5573  */
5574 
5575 /*
5576  * Note: returned pointer is the address of a structure that's
5577  * managed separately.  Caller must *not* attempt to free it.
5578  */
5579 static struct ceph_auth_handshake *
5580 osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
5581 {
5582 	struct ceph_osd *o = con->private;
5583 	struct ceph_osd_client *osdc = o->o_osdc;
5584 	struct ceph_auth_client *ac = osdc->client->monc.auth;
5585 	struct ceph_auth_handshake *auth = &o->o_auth;
5586 	int ret;
5587 
5588 	ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_OSD,
5589 					 force_new, proto, NULL, NULL);
5590 	if (ret)
5591 		return ERR_PTR(ret);
5592 
5593 	return auth;
5594 }
5595 
5596 static int osd_add_authorizer_challenge(struct ceph_connection *con,
5597 				    void *challenge_buf, int challenge_buf_len)
5598 {
5599 	struct ceph_osd *o = con->private;
5600 	struct ceph_osd_client *osdc = o->o_osdc;
5601 	struct ceph_auth_client *ac = osdc->client->monc.auth;
5602 
5603 	return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5604 					    challenge_buf, challenge_buf_len);
5605 }
5606 
5607 static int osd_verify_authorizer_reply(struct ceph_connection *con)
5608 {
5609 	struct ceph_osd *o = con->private;
5610 	struct ceph_osd_client *osdc = o->o_osdc;
5611 	struct ceph_auth_client *ac = osdc->client->monc.auth;
5612 	struct ceph_auth_handshake *auth = &o->o_auth;
5613 
5614 	return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
5615 		auth->authorizer_reply_buf, auth->authorizer_reply_buf_len,
5616 		NULL, NULL, NULL, NULL);
5617 }
5618 
5619 static int osd_invalidate_authorizer(struct ceph_connection *con)
5620 {
5621 	struct ceph_osd *o = con->private;
5622 	struct ceph_osd_client *osdc = o->o_osdc;
5623 	struct ceph_auth_client *ac = osdc->client->monc.auth;
5624 
5625 	ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5626 	return ceph_monc_validate_auth(&osdc->client->monc);
5627 }
5628 
5629 static int osd_get_auth_request(struct ceph_connection *con,
5630 				void *buf, int *buf_len,
5631 				void **authorizer, int *authorizer_len)
5632 {
5633 	struct ceph_osd *o = con->private;
5634 	struct ceph_auth_client *ac = o->o_osdc->client->monc.auth;
5635 	struct ceph_auth_handshake *auth = &o->o_auth;
5636 	int ret;
5637 
5638 	ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_OSD,
5639 				       buf, buf_len);
5640 	if (ret)
5641 		return ret;
5642 
5643 	*authorizer = auth->authorizer_buf;
5644 	*authorizer_len = auth->authorizer_buf_len;
5645 	return 0;
5646 }
5647 
5648 static int osd_handle_auth_reply_more(struct ceph_connection *con,
5649 				      void *reply, int reply_len,
5650 				      void *buf, int *buf_len,
5651 				      void **authorizer, int *authorizer_len)
5652 {
5653 	struct ceph_osd *o = con->private;
5654 	struct ceph_auth_client *ac = o->o_osdc->client->monc.auth;
5655 	struct ceph_auth_handshake *auth = &o->o_auth;
5656 	int ret;
5657 
5658 	ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len,
5659 					      buf, buf_len);
5660 	if (ret)
5661 		return ret;
5662 
5663 	*authorizer = auth->authorizer_buf;
5664 	*authorizer_len = auth->authorizer_buf_len;
5665 	return 0;
5666 }
5667 
5668 static int osd_handle_auth_done(struct ceph_connection *con,
5669 				u64 global_id, void *reply, int reply_len,
5670 				u8 *session_key, int *session_key_len,
5671 				u8 *con_secret, int *con_secret_len)
5672 {
5673 	struct ceph_osd *o = con->private;
5674 	struct ceph_auth_client *ac = o->o_osdc->client->monc.auth;
5675 	struct ceph_auth_handshake *auth = &o->o_auth;
5676 
5677 	return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len,
5678 					       session_key, session_key_len,
5679 					       con_secret, con_secret_len);
5680 }
5681 
5682 static int osd_handle_auth_bad_method(struct ceph_connection *con,
5683 				      int used_proto, int result,
5684 				      const int *allowed_protos, int proto_cnt,
5685 				      const int *allowed_modes, int mode_cnt)
5686 {
5687 	struct ceph_osd *o = con->private;
5688 	struct ceph_mon_client *monc = &o->o_osdc->client->monc;
5689 	int ret;
5690 
5691 	if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_OSD,
5692 					    used_proto, result,
5693 					    allowed_protos, proto_cnt,
5694 					    allowed_modes, mode_cnt)) {
5695 		ret = ceph_monc_validate_auth(monc);
5696 		if (ret)
5697 			return ret;
5698 	}
5699 
5700 	return -EACCES;
5701 }
5702 
5703 static void osd_reencode_message(struct ceph_msg *msg)
5704 {
5705 	int type = le16_to_cpu(msg->hdr.type);
5706 
5707 	if (type == CEPH_MSG_OSD_OP)
5708 		encode_request_finish(msg);
5709 }
5710 
5711 static int osd_sign_message(struct ceph_msg *msg)
5712 {
5713 	struct ceph_osd *o = msg->con->private;
5714 	struct ceph_auth_handshake *auth = &o->o_auth;
5715 
5716 	return ceph_auth_sign_message(auth, msg);
5717 }
5718 
5719 static int osd_check_message_signature(struct ceph_msg *msg)
5720 {
5721 	struct ceph_osd *o = msg->con->private;
5722 	struct ceph_auth_handshake *auth = &o->o_auth;
5723 
5724 	return ceph_auth_check_message_signature(auth, msg);
5725 }
5726 
5727 static void advance_cursor(struct ceph_msg_data_cursor *cursor, size_t len,
5728 			   bool zero)
5729 {
5730 	while (len) {
5731 		struct page *page;
5732 		size_t poff, plen;
5733 
5734 		page = ceph_msg_data_next(cursor, &poff, &plen);
5735 		if (plen > len)
5736 			plen = len;
5737 		if (zero)
5738 			zero_user_segment(page, poff, poff + plen);
5739 		len -= plen;
5740 		ceph_msg_data_advance(cursor, plen);
5741 	}
5742 }
5743 
5744 static int prep_next_sparse_read(struct ceph_connection *con,
5745 				 struct ceph_msg_data_cursor *cursor)
5746 {
5747 	struct ceph_osd *o = con->private;
5748 	struct ceph_sparse_read *sr = &o->o_sparse_read;
5749 	struct ceph_osd_request *req;
5750 	struct ceph_osd_req_op *op;
5751 
5752 	spin_lock(&o->o_requests_lock);
5753 	req = lookup_request(&o->o_requests, le64_to_cpu(con->in_msg->hdr.tid));
5754 	if (!req) {
5755 		spin_unlock(&o->o_requests_lock);
5756 		return -EBADR;
5757 	}
5758 
5759 	if (o->o_sparse_op_idx < 0) {
5760 		u64 srlen = sparse_data_requested(req);
5761 
5762 		dout("%s: [%d] starting new sparse read req. srlen=0x%llx\n",
5763 		     __func__, o->o_osd, srlen);
5764 		ceph_msg_data_cursor_init(cursor, con->in_msg, srlen);
5765 	} else {
5766 		u64 end;
5767 
5768 		op = &req->r_ops[o->o_sparse_op_idx];
5769 
5770 		WARN_ON_ONCE(op->extent.sparse_ext);
5771 
5772 		/* hand back buffer we took earlier */
5773 		op->extent.sparse_ext = sr->sr_extent;
5774 		sr->sr_extent = NULL;
5775 		op->extent.sparse_ext_cnt = sr->sr_count;
5776 		sr->sr_ext_len = 0;
5777 		dout("%s: [%d] completed extent array len %d cursor->resid %zd\n",
5778 		     __func__, o->o_osd, op->extent.sparse_ext_cnt, cursor->resid);
5779 		/* Advance to end of data for this operation */
5780 		end = ceph_sparse_ext_map_end(op);
5781 		if (end < sr->sr_req_len)
5782 			advance_cursor(cursor, sr->sr_req_len - end, false);
5783 	}
5784 
5785 	ceph_init_sparse_read(sr);
5786 
5787 	/* find next op in this request (if any) */
5788 	while (++o->o_sparse_op_idx < req->r_num_ops) {
5789 		op = &req->r_ops[o->o_sparse_op_idx];
5790 		if (op->op == CEPH_OSD_OP_SPARSE_READ)
5791 			goto found;
5792 	}
5793 
5794 	/* reset for next sparse read request */
5795 	spin_unlock(&o->o_requests_lock);
5796 	o->o_sparse_op_idx = -1;
5797 	return 0;
5798 found:
5799 	sr->sr_req_off = op->extent.offset;
5800 	sr->sr_req_len = op->extent.length;
5801 	sr->sr_pos = sr->sr_req_off;
5802 	dout("%s: [%d] new sparse read op at idx %d 0x%llx~0x%llx\n", __func__,
5803 	     o->o_osd, o->o_sparse_op_idx, sr->sr_req_off, sr->sr_req_len);
5804 
5805 	/* hand off request's sparse extent map buffer */
5806 	sr->sr_ext_len = op->extent.sparse_ext_cnt;
5807 	op->extent.sparse_ext_cnt = 0;
5808 	sr->sr_extent = op->extent.sparse_ext;
5809 	op->extent.sparse_ext = NULL;
5810 
5811 	spin_unlock(&o->o_requests_lock);
5812 	return 1;
5813 }
5814 
5815 #ifdef __BIG_ENDIAN
5816 static inline void convert_extent_map(struct ceph_sparse_read *sr)
5817 {
5818 	int i;
5819 
5820 	for (i = 0; i < sr->sr_count; i++) {
5821 		struct ceph_sparse_extent *ext = &sr->sr_extent[i];
5822 
5823 		ext->off = le64_to_cpu((__force __le64)ext->off);
5824 		ext->len = le64_to_cpu((__force __le64)ext->len);
5825 	}
5826 }
5827 #else
5828 static inline void convert_extent_map(struct ceph_sparse_read *sr)
5829 {
5830 }
5831 #endif
5832 
5833 #define MAX_EXTENTS 4096
5834 
5835 static int osd_sparse_read(struct ceph_connection *con,
5836 			   struct ceph_msg_data_cursor *cursor,
5837 			   char **pbuf)
5838 {
5839 	struct ceph_osd *o = con->private;
5840 	struct ceph_sparse_read *sr = &o->o_sparse_read;
5841 	u32 count = sr->sr_count;
5842 	u64 eoff, elen;
5843 	int ret;
5844 
5845 	switch (sr->sr_state) {
5846 	case CEPH_SPARSE_READ_HDR:
5847 next_op:
5848 		ret = prep_next_sparse_read(con, cursor);
5849 		if (ret <= 0)
5850 			return ret;
5851 
5852 		/* number of extents */
5853 		ret = sizeof(sr->sr_count);
5854 		*pbuf = (char *)&sr->sr_count;
5855 		sr->sr_state = CEPH_SPARSE_READ_EXTENTS;
5856 		break;
5857 	case CEPH_SPARSE_READ_EXTENTS:
5858 		/* Convert sr_count to host-endian */
5859 		count = le32_to_cpu((__force __le32)sr->sr_count);
5860 		sr->sr_count = count;
5861 		dout("[%d] got %u extents\n", o->o_osd, count);
5862 
5863 		if (count > 0) {
5864 			if (!sr->sr_extent || count > sr->sr_ext_len) {
5865 				/*
5866 				 * Apply a hard cap to the number of extents.
5867 				 * If we have more, assume something is wrong.
5868 				 */
5869 				if (count > MAX_EXTENTS) {
5870 					dout("%s: OSD returned 0x%x extents in a single reply!\n",
5871 					     __func__, count);
5872 					return -EREMOTEIO;
5873 				}
5874 
5875 				/* no extent array provided, or too short */
5876 				kfree(sr->sr_extent);
5877 				sr->sr_extent = kmalloc_array(count,
5878 							      sizeof(*sr->sr_extent),
5879 							      GFP_NOIO);
5880 				if (!sr->sr_extent)
5881 					return -ENOMEM;
5882 				sr->sr_ext_len = count;
5883 			}
5884 			ret = count * sizeof(*sr->sr_extent);
5885 			*pbuf = (char *)sr->sr_extent;
5886 			sr->sr_state = CEPH_SPARSE_READ_DATA_LEN;
5887 			break;
5888 		}
5889 		/* No extents? Read data len */
5890 		fallthrough;
5891 	case CEPH_SPARSE_READ_DATA_LEN:
5892 		convert_extent_map(sr);
5893 		ret = sizeof(sr->sr_datalen);
5894 		*pbuf = (char *)&sr->sr_datalen;
5895 		sr->sr_state = CEPH_SPARSE_READ_DATA;
5896 		break;
5897 	case CEPH_SPARSE_READ_DATA:
5898 		if (sr->sr_index >= count) {
5899 			sr->sr_state = CEPH_SPARSE_READ_HDR;
5900 			goto next_op;
5901 		}
5902 
5903 		eoff = sr->sr_extent[sr->sr_index].off;
5904 		elen = sr->sr_extent[sr->sr_index].len;
5905 
5906 		dout("[%d] ext %d off 0x%llx len 0x%llx\n",
5907 		     o->o_osd, sr->sr_index, eoff, elen);
5908 
5909 		if (elen > INT_MAX) {
5910 			dout("Sparse read extent length too long (0x%llx)\n",
5911 			     elen);
5912 			return -EREMOTEIO;
5913 		}
5914 
5915 		/* zero out anything from sr_pos to start of extent */
5916 		if (sr->sr_pos < eoff)
5917 			advance_cursor(cursor, eoff - sr->sr_pos, true);
5918 
5919 		/* Set position to end of extent */
5920 		sr->sr_pos = eoff + elen;
5921 
5922 		/* send back the new length and nullify the ptr */
5923 		cursor->sr_resid = elen;
5924 		ret = elen;
5925 		*pbuf = NULL;
5926 
5927 		/* Bump the array index */
5928 		++sr->sr_index;
5929 		break;
5930 	}
5931 	return ret;
5932 }
5933 
5934 static const struct ceph_connection_operations osd_con_ops = {
5935 	.get = osd_get_con,
5936 	.put = osd_put_con,
5937 	.sparse_read = osd_sparse_read,
5938 	.alloc_msg = osd_alloc_msg,
5939 	.dispatch = osd_dispatch,
5940 	.fault = osd_fault,
5941 	.reencode_message = osd_reencode_message,
5942 	.get_authorizer = osd_get_authorizer,
5943 	.add_authorizer_challenge = osd_add_authorizer_challenge,
5944 	.verify_authorizer_reply = osd_verify_authorizer_reply,
5945 	.invalidate_authorizer = osd_invalidate_authorizer,
5946 	.sign_message = osd_sign_message,
5947 	.check_message_signature = osd_check_message_signature,
5948 	.get_auth_request = osd_get_auth_request,
5949 	.handle_auth_reply_more = osd_handle_auth_reply_more,
5950 	.handle_auth_done = osd_handle_auth_done,
5951 	.handle_auth_bad_method = osd_handle_auth_bad_method,
5952 };
5953