Lines Matching full:osd

36  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
41 * are described by the osd map.
43 * We keep track of pending OSD requests (read, write), resubmit
46 * channel with an OSD is reset.
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
53 static void unlink_linger(struct ceph_osd *osd,
55 static void clear_backoffs(struct ceph_osd *osd);
77 static inline void verify_osd_locked(struct ceph_osd *osd) in verify_osd_locked() argument
79 struct ceph_osd_client *osdc = osd->o_osdc; in verify_osd_locked()
81 WARN_ON(!(mutex_is_locked(&osd->lock) && in verify_osd_locked()
92 static inline void verify_osd_locked(struct ceph_osd *osd) { } in verify_osd_locked() argument
101 * fill osd op in request message.
455 t->osd = CEPH_HOMELESS_OSD; in target_init()
484 dest->osd = src->osd; in target_copy()
722 * oid, oloc and OSD op opcode(s) must be filled in before this function
736 * This is an osd op init function for opcodes that have no data or
1056 pr_err("unsupported osd opcode %s\n", in osd_req_encode_op()
1189 * We keep osd requests in an rbtree, sorted by ->r_tid.
1195 * Call @fn on each OSD request as long as @fn returns 0. in DEFINE_RB_FUNCS()
1204 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in DEFINE_RB_FUNCS() local
1206 for (p = rb_first(&osd->o_requests); p; ) { in DEFINE_RB_FUNCS()
1226 static bool osd_homeless(struct ceph_osd *osd) in osd_homeless() argument
1228 return osd->o_osd == CEPH_HOMELESS_OSD; in osd_homeless()
1231 static bool osd_registered(struct ceph_osd *osd) in osd_registered() argument
1233 verify_osdc_locked(osd->o_osdc); in osd_registered()
1235 return !RB_EMPTY_NODE(&osd->o_node); in osd_registered()
1239 * Assumes @osd is zero-initialized.
1241 static void osd_init(struct ceph_osd *osd) in osd_init() argument
1243 refcount_set(&osd->o_ref, 1); in osd_init()
1244 RB_CLEAR_NODE(&osd->o_node); in osd_init()
1245 spin_lock_init(&osd->o_requests_lock); in osd_init()
1246 osd->o_requests = RB_ROOT; in osd_init()
1247 osd->o_linger_requests = RB_ROOT; in osd_init()
1248 osd->o_backoff_mappings = RB_ROOT; in osd_init()
1249 osd->o_backoffs_by_id = RB_ROOT; in osd_init()
1250 INIT_LIST_HEAD(&osd->o_osd_lru); in osd_init()
1251 INIT_LIST_HEAD(&osd->o_keepalive_item); in osd_init()
1252 osd->o_incarnation = 1; in osd_init()
1253 mutex_init(&osd->lock); in osd_init()
1263 static void osd_cleanup(struct ceph_osd *osd) in osd_cleanup() argument
1265 WARN_ON(!RB_EMPTY_NODE(&osd->o_node)); in osd_cleanup()
1266 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); in osd_cleanup()
1267 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); in osd_cleanup()
1268 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings)); in osd_cleanup()
1269 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id)); in osd_cleanup()
1270 WARN_ON(!list_empty(&osd->o_osd_lru)); in osd_cleanup()
1271 WARN_ON(!list_empty(&osd->o_keepalive_item)); in osd_cleanup()
1273 ceph_init_sparse_read(&osd->o_sparse_read); in osd_cleanup()
1275 if (osd->o_auth.authorizer) { in osd_cleanup()
1276 WARN_ON(osd_homeless(osd)); in osd_cleanup()
1277 ceph_auth_destroy_authorizer(osd->o_auth.authorizer); in osd_cleanup()
1286 struct ceph_osd *osd; in create_osd() local
1290 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL); in create_osd()
1291 osd_init(osd); in create_osd()
1292 osd->o_osdc = osdc; in create_osd()
1293 osd->o_osd = onum; in create_osd()
1294 osd->o_sparse_op_idx = -1; in create_osd()
1296 ceph_init_sparse_read(&osd->o_sparse_read); in create_osd()
1298 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); in create_osd()
1300 return osd; in create_osd()
1303 static struct ceph_osd *get_osd(struct ceph_osd *osd) in get_osd() argument
1305 if (refcount_inc_not_zero(&osd->o_ref)) { in get_osd()
1306 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, in get_osd()
1307 refcount_read(&osd->o_ref)); in get_osd()
1308 return osd; in get_osd()
1310 dout("get_osd %p FAIL\n", osd); in get_osd()
1315 static void put_osd(struct ceph_osd *osd) in put_osd() argument
1317 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), in put_osd()
1318 refcount_read(&osd->o_ref) - 1); in put_osd()
1319 if (refcount_dec_and_test(&osd->o_ref)) { in put_osd()
1320 osd_cleanup(osd); in put_osd()
1321 kfree(osd); in put_osd()
1325 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node) in DEFINE_RB_FUNCS() argument
1327 static void __move_osd_to_lru(struct ceph_osd *osd) in DEFINE_RB_FUNCS()
1329 struct ceph_osd_client *osdc = osd->o_osdc; in DEFINE_RB_FUNCS()
1331 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in DEFINE_RB_FUNCS()
1332 BUG_ON(!list_empty(&osd->o_osd_lru)); in DEFINE_RB_FUNCS()
1335 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); in DEFINE_RB_FUNCS()
1338 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl; in DEFINE_RB_FUNCS()
1341 static void maybe_move_osd_to_lru(struct ceph_osd *osd) in maybe_move_osd_to_lru() argument
1343 if (RB_EMPTY_ROOT(&osd->o_requests) && in maybe_move_osd_to_lru()
1344 RB_EMPTY_ROOT(&osd->o_linger_requests)) in maybe_move_osd_to_lru()
1345 __move_osd_to_lru(osd); in maybe_move_osd_to_lru()
1348 static void __remove_osd_from_lru(struct ceph_osd *osd) in __remove_osd_from_lru() argument
1350 struct ceph_osd_client *osdc = osd->o_osdc; in __remove_osd_from_lru()
1352 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in __remove_osd_from_lru()
1355 if (!list_empty(&osd->o_osd_lru)) in __remove_osd_from_lru()
1356 list_del_init(&osd->o_osd_lru); in __remove_osd_from_lru()
1364 static void close_osd(struct ceph_osd *osd) in close_osd() argument
1366 struct ceph_osd_client *osdc = osd->o_osdc; in close_osd()
1370 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in close_osd()
1372 ceph_con_close(&osd->o_con); in close_osd()
1374 for (n = rb_first(&osd->o_requests); n; ) { in close_osd()
1381 unlink_request(osd, req); in close_osd()
1384 for (n = rb_first(&osd->o_linger_requests); n; ) { in close_osd()
1392 unlink_linger(osd, lreq); in close_osd()
1395 clear_backoffs(osd); in close_osd()
1397 __remove_osd_from_lru(osd); in close_osd()
1398 erase_osd(&osdc->osds, osd); in close_osd()
1399 put_osd(osd); in close_osd()
1403 * reset osd connect
1405 static int reopen_osd(struct ceph_osd *osd) in reopen_osd() argument
1409 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in reopen_osd()
1411 if (RB_EMPTY_ROOT(&osd->o_requests) && in reopen_osd()
1412 RB_EMPTY_ROOT(&osd->o_linger_requests)) { in reopen_osd()
1413 close_osd(osd); in reopen_osd()
1417 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd]; in reopen_osd()
1418 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && in reopen_osd()
1419 !ceph_con_opened(&osd->o_con)) { in reopen_osd()
1422 dout("osd addr hasn't changed and connection never opened, " in reopen_osd()
1425 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in reopen_osd()
1434 ceph_con_close(&osd->o_con); in reopen_osd()
1435 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); in reopen_osd()
1436 osd->o_incarnation++; in reopen_osd()
1444 struct ceph_osd *osd; in lookup_create_osd() local
1452 osd = lookup_osd(&osdc->osds, o); in lookup_create_osd()
1454 osd = &osdc->homeless_osd; in lookup_create_osd()
1455 if (!osd) { in lookup_create_osd()
1459 osd = create_osd(osdc, o); in lookup_create_osd()
1460 insert_osd(&osdc->osds, osd); in lookup_create_osd()
1461 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, in lookup_create_osd()
1462 &osdc->osdmap->osd_addr[osd->o_osd]); in lookup_create_osd()
1465 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd); in lookup_create_osd()
1466 return osd; in lookup_create_osd()
1470 * Create request <-> OSD session relation.
1472 * @req has to be assigned a tid, @osd may be homeless.
1474 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req) in link_request() argument
1476 verify_osd_locked(osd); in link_request()
1478 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, in link_request()
1481 if (!osd_homeless(osd)) in link_request()
1482 __remove_osd_from_lru(osd); in link_request()
1484 atomic_inc(&osd->o_osdc->num_homeless); in link_request()
1486 get_osd(osd); in link_request()
1487 spin_lock(&osd->o_requests_lock); in link_request()
1488 insert_request(&osd->o_requests, req); in link_request()
1489 spin_unlock(&osd->o_requests_lock); in link_request()
1490 req->r_osd = osd; in link_request()
1493 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req) in unlink_request() argument
1495 verify_osd_locked(osd); in unlink_request()
1496 WARN_ON(req->r_osd != osd); in unlink_request()
1497 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, in unlink_request()
1501 spin_lock(&osd->o_requests_lock); in unlink_request()
1502 erase_request(&osd->o_requests, req); in unlink_request()
1503 spin_unlock(&osd->o_requests_lock); in unlink_request()
1504 put_osd(osd); in unlink_request()
1506 if (!osd_homeless(osd)) in unlink_request()
1507 maybe_move_osd_to_lru(osd); in unlink_request()
1509 atomic_dec(&osd->o_osdc->num_homeless); in unlink_request()
1566 dout("%s picked osd%d, primary osd%d\n", __func__, in pick_random_replica()
1596 dout("%s picked osd%d with locality %d, primary osd%d\n", __func__, in pick_closest_replica()
1628 t->osd = CEPH_HOMELESS_OSD; in calc_target()
1653 t->osd = CEPH_HOMELESS_OSD; in calc_target()
1716 t->osd = acting.osds[pos]; in calc_target()
1719 t->osd = acting.primary; in calc_target()
1730 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused, in calc_target()
1731 legacy_change, force_resend, split, ct_res, t->osd); in calc_target()
1758 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
1986 * Each backoff has a unique id within its OSD session.
1990 static void clear_backoffs(struct ceph_osd *osd) in DEFINE_RB_FUNCS()
1992 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) { in DEFINE_RB_FUNCS()
1994 rb_entry(rb_first(&osd->o_backoff_mappings), in DEFINE_RB_FUNCS()
2003 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); in DEFINE_RB_FUNCS()
2006 erase_spg_mapping(&osd->o_backoff_mappings, spg); in DEFINE_RB_FUNCS()
2037 struct ceph_osd *osd = req->r_osd; in should_plug_request() local
2042 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid); in should_plug_request()
2051 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n", in should_plug_request()
2052 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool, in should_plug_request()
2247 /* luminous OSD -- encode features and be done */ in encode_request_finish()
2269 * Pre-luminous OSD -- reencode v8 into v4 using @head in encode_request_finish()
2337 struct ceph_osd *osd = req->r_osd; in send_request() local
2339 verify_osd_locked(osd); in send_request()
2340 WARN_ON(osd->o_osd != req->r_t.osd); in send_request()
2361 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n", in send_request()
2364 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags, in send_request()
2371 req->r_sent = osd->o_incarnation; in send_request()
2373 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request)); in send_request()
2403 struct ceph_osd *osd; in __submit_request() local
2417 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked); in __submit_request()
2418 if (IS_ERR(osd)) { in __submit_request()
2419 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked); in __submit_request()
2458 } else if (!osd_homeless(osd)) { in __submit_request()
2464 mutex_lock(&osd->lock); in __submit_request()
2471 link_request(osd, req); in __submit_request()
2476 mutex_unlock(&osd->lock); in __submit_request()
2528 * If an OSD has failed or returned and a request has been sent in finish_request()
2796 WARN_ON(lreq->osd); in linger_release()
2853 * Create linger request <-> OSD session relation. in DEFINE_RB_INSDEL_FUNCS()
2855 * @lreq has to be registered, @osd may be homeless. in DEFINE_RB_INSDEL_FUNCS()
2857 static void link_linger(struct ceph_osd *osd, in DEFINE_RB_INSDEL_FUNCS()
2860 verify_osd_locked(osd); in DEFINE_RB_INSDEL_FUNCS()
2861 WARN_ON(!lreq->linger_id || lreq->osd); in DEFINE_RB_INSDEL_FUNCS()
2862 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, in DEFINE_RB_INSDEL_FUNCS()
2863 osd->o_osd, lreq, lreq->linger_id); in DEFINE_RB_INSDEL_FUNCS()
2865 if (!osd_homeless(osd)) in DEFINE_RB_INSDEL_FUNCS()
2866 __remove_osd_from_lru(osd); in DEFINE_RB_INSDEL_FUNCS()
2868 atomic_inc(&osd->o_osdc->num_homeless); in DEFINE_RB_INSDEL_FUNCS()
2870 get_osd(osd); in DEFINE_RB_INSDEL_FUNCS()
2871 insert_linger(&osd->o_linger_requests, lreq); in DEFINE_RB_INSDEL_FUNCS()
2872 lreq->osd = osd; in DEFINE_RB_INSDEL_FUNCS()
2875 static void unlink_linger(struct ceph_osd *osd, in unlink_linger() argument
2878 verify_osd_locked(osd); in unlink_linger()
2879 WARN_ON(lreq->osd != osd); in unlink_linger()
2880 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, in unlink_linger()
2881 osd->o_osd, lreq, lreq->linger_id); in unlink_linger()
2883 lreq->osd = NULL; in unlink_linger()
2884 erase_linger(&osd->o_linger_requests, lreq); in unlink_linger()
2885 put_osd(osd); in unlink_linger()
2887 if (!osd_homeless(osd)) in unlink_linger()
2888 maybe_move_osd_to_lru(osd); in unlink_linger()
2890 atomic_dec(&osd->o_osdc->num_homeless); in unlink_linger()
3273 link_request(lreq->osd, req); in send_linger_ping()
3280 struct ceph_osd *osd; in linger_submit() local
3286 osd = lookup_create_osd(osdc, lreq->t.osd, true); in linger_submit()
3287 link_linger(osd, lreq); in linger_submit()
3320 unlink_linger(lreq->osd, lreq); in __linger_cancel()
3443 * Timeout callback, called every N seconds. When 1 or more OSD
3445 * (tag + timestamp) to its OSD to ensure any communications channel
3464 * a connection with that osd (from the fault callback). in handle_timeout()
3467 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in handle_timeout() local
3470 for (p = rb_first(&osd->o_requests); p; ) { in handle_timeout()
3477 dout(" req %p tid %llu on osd%d is laggy\n", in handle_timeout()
3478 req, req->r_tid, osd->o_osd); in handle_timeout()
3483 pr_err_ratelimited("tid %llu on osd%d timeout\n", in handle_timeout()
3484 req->r_tid, osd->o_osd); in handle_timeout()
3488 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { in handle_timeout()
3492 dout(" lreq %p linger_id %llu is served by osd%d\n", in handle_timeout()
3493 lreq, lreq->linger_id, osd->o_osd); in handle_timeout()
3503 list_move_tail(&osd->o_keepalive_item, &slow_osds); in handle_timeout()
3514 pr_err_ratelimited("tid %llu on osd%d timeout\n", in handle_timeout()
3525 struct ceph_osd *osd = list_first_entry(&slow_osds, in handle_timeout() local
3528 list_del_init(&osd->o_keepalive_item); in handle_timeout()
3529 ceph_con_keepalive(&osd->o_con); in handle_timeout()
3543 struct ceph_osd *osd, *nosd; in handle_osds_timeout() local
3547 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { in handle_osds_timeout()
3548 if (time_before(jiffies, osd->lru_ttl)) in handle_osds_timeout()
3551 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); in handle_osds_timeout()
3552 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); in handle_osds_timeout()
3553 close_osd(osd); in handle_osds_timeout()
3768 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) in handle_reply() argument
3770 struct ceph_osd_client *osdc = osd->o_osdc; in handle_reply()
3781 if (!osd_registered(osd)) { in handle_reply()
3782 dout("%s osd%d unknown\n", __func__, osd->o_osd); in handle_reply()
3785 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); in handle_reply()
3787 mutex_lock(&osd->lock); in handle_reply()
3788 req = lookup_request(&osd->o_requests, tid); in handle_reply()
3790 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid); in handle_reply()
3822 unlink_request(osd, req); in handle_reply()
3823 mutex_unlock(&osd->lock); in handle_reply()
3840 unlink_request(osd, req); in handle_reply()
3841 mutex_unlock(&osd->lock); in handle_reply()
3886 mutex_unlock(&osd->lock); in handle_reply()
3895 mutex_unlock(&osd->lock); in handle_reply()
3931 struct ceph_osd *osd; in recalc_linger_target() local
3933 osd = lookup_create_osd(osdc, lreq->t.osd, true); in recalc_linger_target()
3934 if (osd != lreq->osd) { in recalc_linger_target()
3935 unlink_linger(lreq->osd, lreq); in recalc_linger_target()
3936 link_linger(osd, lreq); in recalc_linger_target()
3944 * Requeue requests whose mapping to an OSD has changed.
3946 static void scan_requests(struct ceph_osd *osd, in scan_requests() argument
3953 struct ceph_osd_client *osdc = osd->o_osdc; in scan_requests()
3957 for (n = rb_first(&osd->o_linger_requests); n; ) { in scan_requests()
3993 for (n = rb_first(&osd->o_requests); n; ) { in scan_requests()
4015 unlink_request(osd, req); in scan_requests()
4080 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in handle_one_map() local
4084 scan_requests(osd, skipped_map, was_full, true, need_resend, in handle_one_map()
4086 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || in handle_one_map()
4087 memcmp(&osd->o_con.peer_addr, in handle_one_map()
4088 ceph_osd_addr(osdc->osdmap, osd->o_osd), in handle_one_map()
4090 close_osd(osd); in handle_one_map()
4123 struct ceph_osd *osd; in kick_requests() local
4128 osd = lookup_create_osd(osdc, req->r_t.osd, true); in kick_requests()
4129 link_request(osd, req); in kick_requests()
4131 if (!osd_homeless(osd) && !req->r_t.paused) in kick_requests()
4139 if (!osd_homeless(lreq->osd)) in kick_requests()
4147 * Process updated osd map.
4264 * Resubmit requests pending on the given osd.
4266 static void kick_osd_requests(struct ceph_osd *osd) in kick_osd_requests() argument
4270 clear_backoffs(osd); in kick_osd_requests()
4272 for (n = rb_first(&osd->o_requests); n; ) { in kick_osd_requests()
4285 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { in kick_osd_requests()
4294 * If the osd connection drops, we need to resubmit all requests.
4298 struct ceph_osd *osd = con->private; in osd_fault() local
4299 struct ceph_osd_client *osdc = osd->o_osdc; in osd_fault()
4301 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in osd_fault()
4304 if (!osd_registered(osd)) { in osd_fault()
4305 dout("%s osd%d unknown\n", __func__, osd->o_osd); in osd_fault()
4309 if (!reopen_osd(osd)) in osd_fault()
4310 kick_osd_requests(osd); in osd_fault()
4414 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m) in handle_backoff_block() argument
4420 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, in handle_backoff_block()
4423 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid); in handle_backoff_block()
4431 insert_spg_mapping(&osd->o_backoff_mappings, spg); in handle_backoff_block()
4447 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff); in handle_backoff_block()
4450 * Ack with original backoff's epoch so that the OSD can in handle_backoff_block()
4458 ceph_con_send(&osd->o_con, msg); in handle_backoff_block()
4473 static void handle_backoff_unblock(struct ceph_osd *osd, in handle_backoff_unblock() argument
4480 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, in handle_backoff_unblock()
4483 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id); in handle_backoff_unblock()
4485 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n", in handle_backoff_unblock()
4486 __func__, osd->o_osd, m->spgid.pgid.pool, in handle_backoff_unblock()
4493 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n", in handle_backoff_unblock()
4494 __func__, osd->o_osd, m->spgid.pgid.pool, in handle_backoff_unblock()
4499 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid); in handle_backoff_unblock()
4503 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); in handle_backoff_unblock()
4507 erase_spg_mapping(&osd->o_backoff_mappings, spg); in handle_backoff_unblock()
4511 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in handle_backoff_unblock()
4518 * have split on the OSD. in handle_backoff_unblock()
4531 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg) in handle_backoff() argument
4533 struct ceph_osd_client *osdc = osd->o_osdc; in handle_backoff()
4538 if (!osd_registered(osd)) { in handle_backoff()
4539 dout("%s osd%d unknown\n", __func__, osd->o_osd); in handle_backoff()
4543 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); in handle_backoff()
4545 mutex_lock(&osd->lock); in handle_backoff()
4555 handle_backoff_block(osd, &m); in handle_backoff()
4558 handle_backoff_unblock(osd, &m); in handle_backoff()
4561 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op); in handle_backoff()
4568 mutex_unlock(&osd->lock); in handle_backoff()
4573 * Process osd watch notifications
4745 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in ceph_osdc_sync() local
4747 mutex_lock(&osd->lock); in ceph_osdc_sync()
4748 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { in ceph_osdc_sync()
4759 mutex_unlock(&osd->lock); in ceph_osdc_sync()
4768 mutex_unlock(&osd->lock); in ceph_osdc_sync()
5172 * Execute an OSD class method on an object.
5229 * reset all osd connections
5237 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in ceph_osdc_reopen_osds() local
5240 if (!reopen_osd(osd)) in ceph_osdc_reopen_osds()
5241 kick_osd_requests(osd); in ceph_osdc_reopen_osds()
5328 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), in ceph_osdc_stop() local
5330 close_osd(osd); in ceph_osdc_stop()
5411 struct ceph_osd *osd = con->private; in osd_dispatch() local
5412 struct ceph_osd_client *osdc = osd->o_osdc; in osd_dispatch()
5420 handle_reply(osd, msg); in osd_dispatch()
5423 handle_backoff(osd, msg); in osd_dispatch()
5464 struct ceph_osd *osd = con->private; in get_reply() local
5465 struct ceph_osd_client *osdc = osd->o_osdc; in get_reply()
5474 if (!osd_registered(osd)) { in get_reply()
5475 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd); in get_reply()
5479 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num)); in get_reply()
5481 mutex_lock(&osd->lock); in get_reply()
5482 req = lookup_request(&osd->o_requests, tid); in get_reply()
5484 dout("%s osd%d tid %llu unknown, skipping\n", __func__, in get_reply()
5485 osd->o_osd, tid); in get_reply()
5493 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n", in get_reply()
5494 __func__, osd->o_osd, req->r_tid, front_len, in get_reply()
5506 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n", in get_reply()
5507 __func__, osd->o_osd, req->r_tid, data_len, in get_reply()
5520 mutex_unlock(&osd->lock); in get_reply()
5557 struct ceph_osd *osd = con->private; in osd_alloc_msg() local
5569 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__, in osd_alloc_msg()
5570 osd->o_osd, type); in osd_alloc_msg()
5581 struct ceph_osd *osd = con->private; in osd_get_con() local
5582 if (get_osd(osd)) in osd_get_con()
5589 struct ceph_osd *osd = con->private; in osd_put_con() local
5590 put_osd(osd); in osd_put_con()
5889 dout("%s: OSD returned 0x%x extents in a single reply!\n", in osd_sparse_read()