nbd.c (754451342fc5954061ede74b0a8485ec4a4c6eaa) | nbd.c (5657a819a8d94426c76be04dcedfad0f64cfff00) |
---|---|
1/* 2 * Network block device - make block devices work over TCP 3 * 4 * Note that you can not swap over this thing, yet. Seems to work but 5 * deadlocks sometimes - you can not swap over TCP in general. 6 * 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> 8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> --- 152 unchanged lines hidden (view full) --- 161{ 162 struct gendisk *disk = dev_to_disk(dev); 163 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; 164 165 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); 166} 167 168static const struct device_attribute pid_attr = { | 1/* 2 * Network block device - make block devices work over TCP 3 * 4 * Note that you can not swap over this thing, yet. Seems to work but 5 * deadlocks sometimes - you can not swap over TCP in general. 6 * 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> 8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> --- 152 unchanged lines hidden (view full) --- 161{ 162 struct gendisk *disk = dev_to_disk(dev); 163 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; 164 165 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); 166} 167 168static const struct device_attribute pid_attr = { |
169 .attr = { .name = "pid", .mode = S_IRUGO}, | 169 .attr = { .name = "pid", .mode = 0444}, |
170 .show = pid_show, 171}; 172 173static void nbd_dev_remove(struct nbd_device *nbd) 174{ 175 struct gendisk *disk = nbd->disk; | 170 .show = pid_show, 171}; 172 173static void nbd_dev_remove(struct nbd_device *nbd) 174{ 175 struct gendisk *disk = nbd->disk; |
176 struct request_queue *q; 177 |
|
176 if (disk) { | 178 if (disk) { |
179 q = disk->queue; |
|
177 del_gendisk(disk); | 180 del_gendisk(disk); |
178 blk_cleanup_queue(disk->queue); | 181 blk_cleanup_queue(q); |
179 blk_mq_free_tag_set(&nbd->tag_set); 180 disk->private_data = NULL; 181 put_disk(disk); 182 } 183 kfree(nbd); 184} 185 186static void nbd_put(struct nbd_device *nbd) --- 39 unchanged lines hidden (view full) --- 226 set_capacity(nbd->disk, 0); 227 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 228 } 229} 230 231static void nbd_size_update(struct nbd_device *nbd) 232{ 233 struct nbd_config *config = nbd->config; | 182 blk_mq_free_tag_set(&nbd->tag_set); 183 disk->private_data = NULL; 184 put_disk(disk); 185 } 186 kfree(nbd); 187} 188 189static void nbd_put(struct nbd_device *nbd) --- 39 unchanged lines hidden (view full) --- 229 set_capacity(nbd->disk, 0); 230 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 231 } 232} 233 234static void nbd_size_update(struct nbd_device *nbd) 235{ 236 struct nbd_config *config = nbd->config; |
237 struct block_device *bdev = bdget_disk(nbd->disk, 0); 238 239 if (config->flags & NBD_FLAG_SEND_TRIM) { 240 nbd->disk->queue->limits.discard_granularity = config->blksize; 241 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); 242 } |
|
234 blk_queue_logical_block_size(nbd->disk->queue, config->blksize); 235 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); 236 set_capacity(nbd->disk, config->bytesize >> 9); | 243 blk_queue_logical_block_size(nbd->disk->queue, config->blksize); 244 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); 245 set_capacity(nbd->disk, config->bytesize >> 9); |
246 if (bdev) { 247 if (bdev->bd_disk) 248 bd_set_size(bdev, config->bytesize); 249 else 250 bdev->bd_invalidated = 1; 251 bdput(bdev); 252 } |
|
237 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 238} 239 240static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, 241 loff_t nr_blocks) 242{ 243 struct nbd_config *config = nbd->config; 244 config->blksize = blocksize; 245 config->bytesize = blocksize * nr_blocks; | 253 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 254} 255 256static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, 257 loff_t nr_blocks) 258{ 259 struct nbd_config *config = nbd->config; 260 config->blksize = blocksize; 261 config->bytesize = blocksize * nr_blocks; |
262 if (nbd->task_recv != NULL) 263 nbd_size_update(nbd); |
|
246} 247 248static void nbd_complete_rq(struct request *req) 249{ 250 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 251 252 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd, 253 cmd->status ? "failed" : "done"); --- 691 unchanged lines hidden (view full) --- 945 return -ENOSPC; 946} 947 948static void nbd_bdev_reset(struct block_device *bdev) 949{ 950 if (bdev->bd_openers > 1) 951 return; 952 bd_set_size(bdev, 0); | 264} 265 266static void nbd_complete_rq(struct request *req) 267{ 268 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 269 270 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd, 271 cmd->status ? "failed" : "done"); --- 691 unchanged lines hidden (view full) --- 963 return -ENOSPC; 964} 965 966static void nbd_bdev_reset(struct block_device *bdev) 967{ 968 if (bdev->bd_openers > 1) 969 return; 970 bd_set_size(bdev, 0); |
953 if (max_part > 0) { 954 blkdev_reread_part(bdev); 955 bdev->bd_invalidated = 1; 956 } | |
957} 958 959static void nbd_parse_flags(struct nbd_device *nbd) 960{ 961 struct nbd_config *config = nbd->config; 962 if (config->flags & NBD_FLAG_READ_ONLY) 963 set_disk_ro(nbd->disk, true); 964 else --- 70 unchanged lines hidden (view full) --- 1035 kfree(config->socks[i]); 1036 } 1037 kfree(config->socks); 1038 } 1039 kfree(nbd->config); 1040 nbd->config = NULL; 1041 1042 nbd->tag_set.timeout = 0; | 971} 972 973static void nbd_parse_flags(struct nbd_device *nbd) 974{ 975 struct nbd_config *config = nbd->config; 976 if (config->flags & NBD_FLAG_READ_ONLY) 977 set_disk_ro(nbd->disk, true); 978 else --- 70 unchanged lines hidden (view full) --- 1049 kfree(config->socks[i]); 1050 } 1051 kfree(config->socks); 1052 } 1053 kfree(nbd->config); 1054 nbd->config = NULL; 1055 1056 nbd->tag_set.timeout = 0; |
1057 nbd->disk->queue->limits.discard_granularity = 0; 1058 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); |
|
1043 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); 1044 1045 mutex_unlock(&nbd->config_lock); 1046 nbd_put(nbd); 1047 module_put(THIS_MODULE); 1048 } 1049} 1050 --- 53 unchanged lines hidden (view full) --- 1104{ 1105 struct nbd_config *config = nbd->config; 1106 int ret; 1107 1108 ret = nbd_start_device(nbd); 1109 if (ret) 1110 return ret; 1111 | 1059 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); 1060 1061 mutex_unlock(&nbd->config_lock); 1062 nbd_put(nbd); 1063 module_put(THIS_MODULE); 1064 } 1065} 1066 --- 53 unchanged lines hidden (view full) --- 1120{ 1121 struct nbd_config *config = nbd->config; 1122 int ret; 1123 1124 ret = nbd_start_device(nbd); 1125 if (ret) 1126 return ret; 1127 |
1112 bd_set_size(bdev, config->bytesize); | |
1113 if (max_part) 1114 bdev->bd_invalidated = 1; 1115 mutex_unlock(&nbd->config_lock); 1116 ret = wait_event_interruptible(config->recv_wq, 1117 atomic_read(&config->recv_threads) == 0); 1118 if (ret) 1119 sock_shutdown(nbd); 1120 mutex_lock(&nbd->config_lock); | 1128 if (max_part) 1129 bdev->bd_invalidated = 1; 1130 mutex_unlock(&nbd->config_lock); 1131 ret = wait_event_interruptible(config->recv_wq, 1132 atomic_read(&config->recv_threads) == 0); 1133 if (ret) 1134 sock_shutdown(nbd); 1135 mutex_lock(&nbd->config_lock); |
1121 bd_set_size(bdev, 0); | 1136 nbd_bdev_reset(bdev); |
1122 /* user requested, ignore socket errors */ 1123 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags)) 1124 ret = 0; 1125 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags)) 1126 ret = -ETIMEDOUT; 1127 return ret; 1128} 1129 --- 134 unchanged lines hidden (view full) --- 1264 if (!config) { 1265 ret = -ENOMEM; 1266 mutex_unlock(&nbd->config_lock); 1267 goto out; 1268 } 1269 refcount_set(&nbd->config_refs, 1); 1270 refcount_inc(&nbd->refs); 1271 mutex_unlock(&nbd->config_lock); | 1137 /* user requested, ignore socket errors */ 1138 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags)) 1139 ret = 0; 1140 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags)) 1141 ret = -ETIMEDOUT; 1142 return ret; 1143} 1144 --- 134 unchanged lines hidden (view full) --- 1279 if (!config) { 1280 ret = -ENOMEM; 1281 mutex_unlock(&nbd->config_lock); 1282 goto out; 1283 } 1284 refcount_set(&nbd->config_refs, 1); 1285 refcount_inc(&nbd->refs); 1286 mutex_unlock(&nbd->config_lock); |
1287 bdev->bd_invalidated = 1; 1288 } else if (nbd_disconnected(nbd->config)) { 1289 bdev->bd_invalidated = 1; |
|
1272 } 1273out: 1274 mutex_unlock(&nbd_index_mutex); 1275 return ret; 1276} 1277 1278static void nbd_release(struct gendisk *disk, fmode_t mode) 1279{ --- 205 unchanged lines hidden (view full) --- 1485 } 1486 disk->queue = q; 1487 1488 /* 1489 * Tell the block layer that we are not a rotational device 1490 */ 1491 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); 1492 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); | 1290 } 1291out: 1292 mutex_unlock(&nbd_index_mutex); 1293 return ret; 1294} 1295 1296static void nbd_release(struct gendisk *disk, fmode_t mode) 1297{ --- 205 unchanged lines hidden (view full) --- 1503 } 1504 disk->queue = q; 1505 1506 /* 1507 * Tell the block layer that we are not a rotational device 1508 */ 1509 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); 1510 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); |
1493 disk->queue->limits.discard_granularity = 512; 1494 blk_queue_max_discard_sectors(disk->queue, UINT_MAX); | 1511 disk->queue->limits.discard_granularity = 0; 1512 blk_queue_max_discard_sectors(disk->queue, 0); |
1495 blk_queue_max_segment_size(disk->queue, UINT_MAX); 1496 blk_queue_max_segments(disk->queue, USHRT_MAX); 1497 blk_queue_max_hw_sectors(disk->queue, 65536); 1498 disk->queue->limits.max_sectors = 256; 1499 1500 mutex_init(&nbd->config_lock); 1501 refcount_set(&nbd->config_refs, 0); 1502 refcount_set(&nbd->refs, 1); --- 247 unchanged lines hidden (view full) --- 1750 } 1751 mutex_unlock(&nbd_index_mutex); 1752 if (!refcount_inc_not_zero(&nbd->config_refs)) { 1753 nbd_put(nbd); 1754 return 0; 1755 } 1756 mutex_lock(&nbd->config_lock); 1757 nbd_disconnect(nbd); | 1513 blk_queue_max_segment_size(disk->queue, UINT_MAX); 1514 blk_queue_max_segments(disk->queue, USHRT_MAX); 1515 blk_queue_max_hw_sectors(disk->queue, 65536); 1516 disk->queue->limits.max_sectors = 256; 1517 1518 mutex_init(&nbd->config_lock); 1519 refcount_set(&nbd->config_refs, 0); 1520 refcount_set(&nbd->refs, 1); --- 247 unchanged lines hidden (view full) --- 1768 } 1769 mutex_unlock(&nbd_index_mutex); 1770 if (!refcount_inc_not_zero(&nbd->config_refs)) { 1771 nbd_put(nbd); 1772 return 0; 1773 } 1774 mutex_lock(&nbd->config_lock); 1775 nbd_disconnect(nbd); |
1776 nbd_clear_sock(nbd); |
|
1758 mutex_unlock(&nbd->config_lock); 1759 if (test_and_clear_bit(NBD_HAS_CONFIG_REF, 1760 &nbd->config->runtime_flags)) 1761 nbd_config_put(nbd); 1762 nbd_config_put(nbd); 1763 nbd_put(nbd); 1764 return 0; 1765} --- 322 unchanged lines hidden (view full) --- 2088 } 2089 2090 if ((1UL << part_shift) > DISK_MAX_PARTS) 2091 return -EINVAL; 2092 2093 if (nbds_max > 1UL << (MINORBITS - part_shift)) 2094 return -EINVAL; 2095 recv_workqueue = alloc_workqueue("knbd-recv", | 1777 mutex_unlock(&nbd->config_lock); 1778 if (test_and_clear_bit(NBD_HAS_CONFIG_REF, 1779 &nbd->config->runtime_flags)) 1780 nbd_config_put(nbd); 1781 nbd_config_put(nbd); 1782 nbd_put(nbd); 1783 return 0; 1784} --- 322 unchanged lines hidden (view full) --- 2107 } 2108 2109 if ((1UL << part_shift) > DISK_MAX_PARTS) 2110 return -EINVAL; 2111 2112 if (nbds_max > 1UL << (MINORBITS - part_shift)) 2113 return -EINVAL; 2114 recv_workqueue = alloc_workqueue("knbd-recv", |
2096 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | 2115 WQ_MEM_RECLAIM | WQ_HIGHPRI | 2116 WQ_UNBOUND, 0); |
2097 if (!recv_workqueue) 2098 return -ENOMEM; 2099 2100 if (register_blkdev(NBD_MAJOR, "nbd")) { 2101 destroy_workqueue(recv_workqueue); 2102 return -EIO; 2103 } 2104 --- 58 unchanged lines hidden --- | 2117 if (!recv_workqueue) 2118 return -ENOMEM; 2119 2120 if (register_blkdev(NBD_MAJOR, "nbd")) { 2121 destroy_workqueue(recv_workqueue); 2122 return -EIO; 2123 } 2124 --- 58 unchanged lines hidden --- |