Lines Matching refs:dev
51 static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev) in rnbd_clt_put_dev() argument
55 if (!refcount_dec_and_test(&dev->refcount)) in rnbd_clt_put_dev()
58 ida_free(&index_ida, dev->clt_device_id); in rnbd_clt_put_dev()
59 kfree(dev->hw_queues); in rnbd_clt_put_dev()
60 kfree(dev->pathname); in rnbd_clt_put_dev()
61 rnbd_clt_put_sess(dev->sess); in rnbd_clt_put_dev()
62 mutex_destroy(&dev->lock); in rnbd_clt_put_dev()
63 kfree(dev); in rnbd_clt_put_dev()
66 static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev) in rnbd_clt_get_dev() argument
68 return refcount_inc_not_zero(&dev->refcount); in rnbd_clt_get_dev()
71 static void rnbd_clt_change_capacity(struct rnbd_clt_dev *dev, in rnbd_clt_change_capacity() argument
74 if (get_capacity(dev->gd) == new_nsectors) in rnbd_clt_change_capacity()
80 rnbd_clt_info(dev, "Device size changed from %llu to %llu sectors\n", in rnbd_clt_change_capacity()
81 get_capacity(dev->gd), new_nsectors); in rnbd_clt_change_capacity()
82 set_capacity_and_notify(dev->gd, new_nsectors); in rnbd_clt_change_capacity()
85 static int process_msg_open_rsp(struct rnbd_clt_dev *dev, in process_msg_open_rsp() argument
91 mutex_lock(&dev->lock); in process_msg_open_rsp()
92 if (dev->dev_state == DEV_STATE_UNMAPPED) { in process_msg_open_rsp()
93 rnbd_clt_info(dev, in process_msg_open_rsp()
98 if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) { in process_msg_open_rsp()
101 rnbd_clt_change_capacity(dev, nsectors); in process_msg_open_rsp()
102 gd_kobj = &disk_to_dev(dev->gd)->kobj; in process_msg_open_rsp()
104 rnbd_clt_info(dev, "Device online, device remapped successfully\n"); in process_msg_open_rsp()
110 dev->device_id = le32_to_cpu(rsp->device_id); in process_msg_open_rsp()
111 dev->dev_state = DEV_STATE_MAPPED; in process_msg_open_rsp()
114 mutex_unlock(&dev->lock); in process_msg_open_rsp()
119 int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, sector_t newsize) in rnbd_clt_resize_disk() argument
123 mutex_lock(&dev->lock); in rnbd_clt_resize_disk()
124 if (dev->dev_state != DEV_STATE_MAPPED) { in rnbd_clt_resize_disk()
129 rnbd_clt_change_capacity(dev, newsize); in rnbd_clt_resize_disk()
132 mutex_unlock(&dev->lock); in rnbd_clt_resize_disk()
368 struct rnbd_clt_dev *dev = rq->q->disk->private_data; in rnbd_softirq_done_fn() local
369 struct rnbd_clt_session *sess = dev->sess; in rnbd_softirq_done_fn()
381 struct rnbd_clt_dev *dev = iu->dev; in msg_io_conf() local
390 rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n", in msg_io_conf()
437 struct rnbd_clt_dev *dev = iu->dev; in msg_close_conf() local
440 rnbd_put_iu(dev->sess, iu); in msg_close_conf()
441 rnbd_clt_put_dev(dev); in msg_close_conf()
444 static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, in send_msg_close() argument
447 struct rnbd_clt_session *sess = dev->sess; in send_msg_close()
461 iu->dev = dev; in send_msg_close()
466 WARN_ON(!rnbd_clt_get_dev(dev)); in send_msg_close()
470 rnbd_clt_put_dev(dev); in send_msg_close()
484 struct rnbd_clt_dev *dev = iu->dev; in msg_open_conf() local
489 if (dev->dev_state == DEV_STATE_INIT) in msg_open_conf()
493 rnbd_clt_err(dev, in msg_open_conf()
497 errno = process_msg_open_rsp(dev, rsp); in msg_open_conf()
504 send_msg_close(dev, device_id, RTRS_PERMIT_NOWAIT); in msg_open_conf()
511 rnbd_put_iu(dev->sess, iu); in msg_open_conf()
512 rnbd_clt_put_dev(dev); in msg_open_conf()
530 static int send_msg_open(struct rnbd_clt_dev *dev, enum wait_type wait) in send_msg_open() argument
532 struct rnbd_clt_session *sess = dev->sess; in send_msg_open()
553 iu->dev = dev; in send_msg_open()
558 msg.access_mode = dev->access_mode; in send_msg_open()
559 strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name)); in send_msg_open()
561 WARN_ON(!rnbd_clt_get_dev(dev)); in send_msg_open()
566 rnbd_clt_put_dev(dev); in send_msg_open()
632 struct rnbd_clt_dev *dev; in set_dev_states_to_disconnected() local
636 list_for_each_entry(dev, &sess->devs_list, list) { in set_dev_states_to_disconnected()
637 rnbd_clt_err(dev, "Device disconnected.\n"); in set_dev_states_to_disconnected()
639 mutex_lock(&dev->lock); in set_dev_states_to_disconnected()
640 if (dev->dev_state == DEV_STATE_MAPPED) { in set_dev_states_to_disconnected()
641 dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED; in set_dev_states_to_disconnected()
642 gd_kobj = &disk_to_dev(dev->gd)->kobj; in set_dev_states_to_disconnected()
645 mutex_unlock(&dev->lock); in set_dev_states_to_disconnected()
652 struct rnbd_clt_dev *dev; in remap_devs() local
681 list_for_each_entry(dev, &sess->devs_list, list) { in remap_devs()
684 mutex_lock(&dev->lock); in remap_devs()
685 skip = (dev->dev_state == DEV_STATE_INIT); in remap_devs()
686 mutex_unlock(&dev->lock); in remap_devs()
694 rnbd_clt_info(dev, "session reconnected, remapping device\n"); in remap_devs()
695 err = send_msg_open(dev, RTRS_PERMIT_NOWAIT); in remap_devs()
697 rnbd_clt_err(dev, "send_msg_open(): %d\n", err); in remap_devs()
926 struct rnbd_clt_dev *dev = disk->private_data; in rnbd_client_open() local
928 if (get_disk_ro(dev->gd) && (mode & BLK_OPEN_WRITE)) in rnbd_client_open()
931 if (dev->dev_state == DEV_STATE_UNMAPPED || in rnbd_client_open()
932 !rnbd_clt_get_dev(dev)) in rnbd_client_open()
940 struct rnbd_clt_dev *dev = gen->private_data; in rnbd_client_release() local
942 rnbd_clt_put_dev(dev); in rnbd_client_release()
949 struct rnbd_clt_dev *dev = block_device->bd_disk->private_data; in rnbd_client_getgeo() local
950 struct queue_limits *limit = &dev->queue->limits; in rnbd_client_getgeo()
952 size = dev->size * (limit->logical_block_size / SECTOR_SIZE); in rnbd_client_getgeo()
988 static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev, in rnbd_client_xfer_request() argument
992 struct rtrs_clt_sess *rtrs = dev->sess->rtrs; in rnbd_client_xfer_request()
1002 iu->dev = dev; in rnbd_client_xfer_request()
1013 sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl); in rnbd_client_xfer_request()
1019 msg.device_id = cpu_to_le32(dev->device_id); in rnbd_client_xfer_request()
1033 rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n", in rnbd_client_xfer_request()
1051 static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev, in rnbd_clt_dev_add_to_requeue() argument
1054 struct rnbd_clt_session *sess = dev->sess; in rnbd_clt_dev_add_to_requeue()
1095 static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev, in rnbd_clt_dev_kick_mq_queue() argument
1103 else if (!rnbd_clt_dev_add_to_requeue(dev, q)) in rnbd_clt_dev_kick_mq_queue()
1115 struct rnbd_clt_dev *dev = rq->q->disk->private_data; in rnbd_queue_rq() local
1120 if (dev->dev_state != DEV_STATE_MAPPED) in rnbd_queue_rq()
1123 iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON, in rnbd_queue_rq()
1126 rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY); in rnbd_queue_rq()
1139 rnbd_clt_err_rl(dev, "sg_alloc_table_chained ret=%d\n", err); in rnbd_queue_rq()
1140 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); in rnbd_queue_rq()
1141 rnbd_put_permit(dev->sess, iu->permit); in rnbd_queue_rq()
1146 err = rnbd_client_xfer_request(dev, rq, iu); in rnbd_queue_rq()
1150 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); in rnbd_queue_rq()
1154 rnbd_put_permit(dev->sess, iu->permit); in rnbd_queue_rq()
1161 struct rnbd_clt_dev *dev = q->dev; in rnbd_rdma_poll() local
1163 return rtrs_clt_rdma_cq_direct(dev->sess->rtrs, hctx->queue_num); in rnbd_rdma_poll()
1310 static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev, in rnbd_init_hw_queue() argument
1315 q->dev = dev; in rnbd_init_hw_queue()
1319 static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev) in rnbd_init_mq_hw_queues() argument
1325 queue_for_each_hw_ctx(dev->queue, hctx, i) { in rnbd_init_mq_hw_queues()
1326 q = &dev->hw_queues[i]; in rnbd_init_mq_hw_queues()
1327 rnbd_init_hw_queue(dev, q, hctx); in rnbd_init_mq_hw_queues()
1332 static void setup_request_queue(struct rnbd_clt_dev *dev, in setup_request_queue() argument
1335 blk_queue_logical_block_size(dev->queue, in setup_request_queue()
1337 blk_queue_physical_block_size(dev->queue, in setup_request_queue()
1339 blk_queue_max_hw_sectors(dev->queue, in setup_request_queue()
1340 dev->sess->max_io_size / SECTOR_SIZE); in setup_request_queue()
1346 blk_queue_max_discard_segments(dev->queue, 1); in setup_request_queue()
1348 blk_queue_max_discard_sectors(dev->queue, in setup_request_queue()
1350 dev->queue->limits.discard_granularity = in setup_request_queue()
1352 dev->queue->limits.discard_alignment = in setup_request_queue()
1355 blk_queue_max_secure_erase_sectors(dev->queue, in setup_request_queue()
1357 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); in setup_request_queue()
1358 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); in setup_request_queue()
1359 blk_queue_max_segments(dev->queue, dev->sess->max_segments); in setup_request_queue()
1360 blk_queue_io_opt(dev->queue, dev->sess->max_io_size); in setup_request_queue()
1361 blk_queue_virt_boundary(dev->queue, SZ_4K - 1); in setup_request_queue()
1362 blk_queue_write_cache(dev->queue, in setup_request_queue()
1367 static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, in rnbd_clt_setup_gen_disk() argument
1372 dev->gd->major = rnbd_client_major; in rnbd_clt_setup_gen_disk()
1373 dev->gd->first_minor = idx << RNBD_PART_BITS; in rnbd_clt_setup_gen_disk()
1374 dev->gd->minors = 1 << RNBD_PART_BITS; in rnbd_clt_setup_gen_disk()
1375 dev->gd->fops = &rnbd_client_ops; in rnbd_clt_setup_gen_disk()
1376 dev->gd->queue = dev->queue; in rnbd_clt_setup_gen_disk()
1377 dev->gd->private_data = dev; in rnbd_clt_setup_gen_disk()
1378 snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d", in rnbd_clt_setup_gen_disk()
1381 dev->gd->disk_name, in rnbd_clt_setup_gen_disk()
1385 set_capacity(dev->gd, le64_to_cpu(rsp->nsectors)); in rnbd_clt_setup_gen_disk()
1387 if (dev->access_mode == RNBD_ACCESS_RO) in rnbd_clt_setup_gen_disk()
1388 set_disk_ro(dev->gd, true); in rnbd_clt_setup_gen_disk()
1393 blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue); in rnbd_clt_setup_gen_disk()
1394 err = add_disk(dev->gd); in rnbd_clt_setup_gen_disk()
1396 put_disk(dev->gd); in rnbd_clt_setup_gen_disk()
1401 static int rnbd_client_setup_device(struct rnbd_clt_dev *dev, in rnbd_client_setup_device() argument
1404 int idx = dev->clt_device_id; in rnbd_client_setup_device()
1406 dev->size = le64_to_cpu(rsp->nsectors) * in rnbd_client_setup_device()
1409 dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, dev); in rnbd_client_setup_device()
1410 if (IS_ERR(dev->gd)) in rnbd_client_setup_device()
1411 return PTR_ERR(dev->gd); in rnbd_client_setup_device()
1412 dev->queue = dev->gd->queue; in rnbd_client_setup_device()
1413 rnbd_init_mq_hw_queues(dev); in rnbd_client_setup_device()
1415 setup_request_queue(dev, rsp); in rnbd_client_setup_device()
1416 return rnbd_clt_setup_gen_disk(dev, rsp, idx); in rnbd_client_setup_device()
1424 struct rnbd_clt_dev *dev; in init_dev() local
1427 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE); in init_dev()
1428 if (!dev) in init_dev()
1435 dev->hw_queues = kcalloc(nr_cpu_ids + nr_poll_queues, in init_dev()
1436 sizeof(*dev->hw_queues), in init_dev()
1438 if (!dev->hw_queues) { in init_dev()
1451 dev->pathname = kstrdup(pathname, GFP_KERNEL); in init_dev()
1452 if (!dev->pathname) { in init_dev()
1457 dev->clt_device_id = ret; in init_dev()
1458 dev->sess = sess; in init_dev()
1459 dev->access_mode = access_mode; in init_dev()
1460 dev->nr_poll_queues = nr_poll_queues; in init_dev()
1461 mutex_init(&dev->lock); in init_dev()
1462 refcount_set(&dev->refcount, 1); in init_dev()
1463 dev->dev_state = DEV_STATE_INIT; in init_dev()
1471 return dev; in init_dev()
1474 kfree(dev->hw_queues); in init_dev()
1476 kfree(dev); in init_dev()
1483 struct rnbd_clt_dev *dev; in __exists_dev() local
1491 list_for_each_entry(dev, &sess->devs_list, list) { in __exists_dev()
1492 if (strlen(dev->pathname) == strlen(pathname) && in __exists_dev()
1493 !strcmp(dev->pathname, pathname)) { in __exists_dev()
1517 static bool insert_dev_if_not_exists_devpath(struct rnbd_clt_dev *dev) in insert_dev_if_not_exists_devpath() argument
1520 struct rnbd_clt_session *sess = dev->sess; in insert_dev_if_not_exists_devpath()
1523 found = __exists_dev(dev->pathname, sess->sessname); in insert_dev_if_not_exists_devpath()
1526 list_add_tail(&dev->list, &sess->devs_list); in insert_dev_if_not_exists_devpath()
1534 static void delete_dev(struct rnbd_clt_dev *dev) in delete_dev() argument
1536 struct rnbd_clt_session *sess = dev->sess; in delete_dev()
1539 list_del(&dev->list); in delete_dev()
1551 struct rnbd_clt_dev *dev; in rnbd_clt_map_device() local
1568 dev = init_dev(sess, access_mode, pathname, nr_poll_queues); in rnbd_clt_map_device()
1569 if (IS_ERR(dev)) { in rnbd_clt_map_device()
1571 pathname, sess->sessname, PTR_ERR(dev)); in rnbd_clt_map_device()
1572 ret = PTR_ERR(dev); in rnbd_clt_map_device()
1575 if (insert_dev_if_not_exists_devpath(dev)) { in rnbd_clt_map_device()
1593 iu->dev = dev; in rnbd_clt_map_device()
1597 msg.access_mode = dev->access_mode; in rnbd_clt_map_device()
1598 strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name)); in rnbd_clt_map_device()
1600 WARN_ON(!rnbd_clt_get_dev(dev)); in rnbd_clt_map_device()
1605 rnbd_clt_put_dev(dev); in rnbd_clt_map_device()
1611 rnbd_clt_err(dev, in rnbd_clt_map_device()
1616 mutex_lock(&dev->lock); in rnbd_clt_map_device()
1619 ret = rnbd_client_setup_device(dev, rsp); in rnbd_clt_map_device()
1621 rnbd_clt_err(dev, in rnbd_clt_map_device()
1624 mutex_unlock(&dev->lock); in rnbd_clt_map_device()
1628 rnbd_clt_info(dev, in rnbd_clt_map_device()
1630 dev->gd->disk_name, le64_to_cpu(rsp->nsectors), in rnbd_clt_map_device()
1641 mutex_unlock(&dev->lock); in rnbd_clt_map_device()
1646 return dev; in rnbd_clt_map_device()
1649 send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT); in rnbd_clt_map_device()
1654 delete_dev(dev); in rnbd_clt_map_device()
1656 rnbd_clt_put_dev(dev); in rnbd_clt_map_device()
1663 static void destroy_gen_disk(struct rnbd_clt_dev *dev) in destroy_gen_disk() argument
1665 del_gendisk(dev->gd); in destroy_gen_disk()
1666 put_disk(dev->gd); in destroy_gen_disk()
1669 static void destroy_sysfs(struct rnbd_clt_dev *dev, in destroy_sysfs() argument
1672 rnbd_clt_remove_dev_symlink(dev); in destroy_sysfs()
1673 if (dev->kobj.state_initialized) { in destroy_sysfs()
1676 sysfs_remove_file_self(&dev->kobj, sysfs_self); in destroy_sysfs()
1677 kobject_del(&dev->kobj); in destroy_sysfs()
1678 kobject_put(&dev->kobj); in destroy_sysfs()
1682 int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force, in rnbd_clt_unmap_device() argument
1685 struct rnbd_clt_session *sess = dev->sess; in rnbd_clt_unmap_device()
1689 mutex_lock(&dev->lock); in rnbd_clt_unmap_device()
1690 if (dev->dev_state == DEV_STATE_UNMAPPED) { in rnbd_clt_unmap_device()
1691 rnbd_clt_info(dev, "Device is already being unmapped\n"); in rnbd_clt_unmap_device()
1695 refcount = refcount_read(&dev->refcount); in rnbd_clt_unmap_device()
1697 rnbd_clt_err(dev, in rnbd_clt_unmap_device()
1703 was_mapped = (dev->dev_state == DEV_STATE_MAPPED); in rnbd_clt_unmap_device()
1704 dev->dev_state = DEV_STATE_UNMAPPED; in rnbd_clt_unmap_device()
1705 mutex_unlock(&dev->lock); in rnbd_clt_unmap_device()
1707 delete_dev(dev); in rnbd_clt_unmap_device()
1708 destroy_sysfs(dev, sysfs_self); in rnbd_clt_unmap_device()
1709 destroy_gen_disk(dev); in rnbd_clt_unmap_device()
1711 send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT); in rnbd_clt_unmap_device()
1713 rnbd_clt_info(dev, "Device is unmapped\n"); in rnbd_clt_unmap_device()
1716 rnbd_clt_put_dev(dev); in rnbd_clt_unmap_device()
1724 mutex_unlock(&dev->lock); in rnbd_clt_unmap_device()
1729 int rnbd_clt_remap_device(struct rnbd_clt_dev *dev) in rnbd_clt_remap_device() argument
1733 mutex_lock(&dev->lock); in rnbd_clt_remap_device()
1734 if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) in rnbd_clt_remap_device()
1736 else if (dev->dev_state == DEV_STATE_UNMAPPED) in rnbd_clt_remap_device()
1738 else if (dev->dev_state == DEV_STATE_MAPPED) in rnbd_clt_remap_device()
1742 mutex_unlock(&dev->lock); in rnbd_clt_remap_device()
1744 rnbd_clt_info(dev, "Remapping device.\n"); in rnbd_clt_remap_device()
1745 err = send_msg_open(dev, RTRS_PERMIT_WAIT); in rnbd_clt_remap_device()
1747 rnbd_clt_err(dev, "remap_device: %d\n", err); in rnbd_clt_remap_device()
1755 struct rnbd_clt_dev *dev; in unmap_device_work() local
1757 dev = container_of(work, typeof(*dev), unmap_on_rmmod_work); in unmap_device_work()
1758 rnbd_clt_unmap_device(dev, true, NULL); in unmap_device_work()
1764 struct rnbd_clt_dev *dev, *tn; in rnbd_destroy_sessions() local
1788 list_for_each_entry_safe(dev, tn, &sess->devs_list, list) { in rnbd_destroy_sessions()
1795 INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work); in rnbd_destroy_sessions()
1796 queue_work(rnbd_clt_wq, &dev->unmap_on_rmmod_work); in rnbd_destroy_sessions()