1 /* 2 * Sharing QEMU block devices via vhost-user protocal 3 * 4 * Parts of the code based on nbd/server.c. 5 * 6 * Copyright (c) Coiby Xu <coiby.xu@gmail.com>. 7 * Copyright (c) 2020 Red Hat, Inc. 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or 10 * later. See the COPYING file in the top-level directory. 11 */ 12 #include "qemu/osdep.h" 13 #include "block/block.h" 14 #include "contrib/libvhost-user/libvhost-user.h" 15 #include "standard-headers/linux/virtio_blk.h" 16 #include "qemu/vhost-user-server.h" 17 #include "vhost-user-blk-server.h" 18 #include "qapi/error.h" 19 #include "qom/object_interfaces.h" 20 #include "sysemu/block-backend.h" 21 #include "util/block-helpers.h" 22 23 enum { 24 VHOST_USER_BLK_NUM_QUEUES_DEFAULT = 1, 25 }; 26 struct virtio_blk_inhdr { 27 unsigned char status; 28 }; 29 30 typedef struct VuBlkReq { 31 VuVirtqElement elem; 32 int64_t sector_num; 33 size_t size; 34 struct virtio_blk_inhdr *in; 35 struct virtio_blk_outhdr out; 36 VuServer *server; 37 struct VuVirtq *vq; 38 } VuBlkReq; 39 40 /* vhost user block device */ 41 typedef struct { 42 BlockExport export; 43 VuServer vu_server; 44 uint32_t blk_size; 45 QIOChannelSocket *sioc; 46 struct virtio_blk_config blkcfg; 47 bool writable; 48 } VuBlkExport; 49 50 static void vu_blk_req_complete(VuBlkReq *req) 51 { 52 VuDev *vu_dev = &req->server->vu_dev; 53 54 /* IO size with 1 extra status byte */ 55 vu_queue_push(vu_dev, req->vq, &req->elem, req->size + 1); 56 vu_queue_notify(vu_dev, req->vq); 57 58 free(req); 59 } 60 61 static int coroutine_fn 62 vu_blk_discard_write_zeroes(BlockBackend *blk, struct iovec *iov, 63 uint32_t iovcnt, uint32_t type) 64 { 65 struct virtio_blk_discard_write_zeroes desc; 66 ssize_t size = iov_to_buf(iov, iovcnt, 0, &desc, sizeof(desc)); 67 if (unlikely(size != sizeof(desc))) { 68 error_report("Invalid size %zd, expect %zu", size, sizeof(desc)); 69 return -EINVAL; 70 } 71 72 uint64_t range[2] = { le64_to_cpu(desc.sector) << 9, 73 le32_to_cpu(desc.num_sectors) << 9 }; 74 if (type == VIRTIO_BLK_T_DISCARD) { 75 if (blk_co_pdiscard(blk, range[0], range[1]) == 0) { 76 return 0; 77 } 78 } else if (type == VIRTIO_BLK_T_WRITE_ZEROES) { 79 if (blk_co_pwrite_zeroes(blk, range[0], range[1], 0) == 0) { 80 return 0; 81 } 82 } 83 84 return -EINVAL; 85 } 86 87 static void coroutine_fn vu_blk_virtio_process_req(void *opaque) 88 { 89 VuBlkReq *req = opaque; 90 VuServer *server = req->server; 91 VuVirtqElement *elem = &req->elem; 92 uint32_t type; 93 94 VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server); 95 BlockBackend *blk = vexp->export.blk; 96 97 struct iovec *in_iov = elem->in_sg; 98 struct iovec *out_iov = elem->out_sg; 99 unsigned in_num = elem->in_num; 100 unsigned out_num = elem->out_num; 101 102 /* refer to hw/block/virtio_blk.c */ 103 if (elem->out_num < 1 || elem->in_num < 1) { 104 error_report("virtio-blk request missing headers"); 105 goto err; 106 } 107 108 if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out, 109 sizeof(req->out)) != sizeof(req->out))) { 110 error_report("virtio-blk request outhdr too short"); 111 goto err; 112 } 113 114 iov_discard_front(&out_iov, &out_num, sizeof(req->out)); 115 116 if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { 117 error_report("virtio-blk request inhdr too short"); 118 goto err; 119 } 120 121 /* We always touch the last byte, so just see how big in_iov is. */ 122 req->in = (void *)in_iov[in_num - 1].iov_base 123 + in_iov[in_num - 1].iov_len 124 - sizeof(struct virtio_blk_inhdr); 125 iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr)); 126 127 type = le32_to_cpu(req->out.type); 128 switch (type & ~VIRTIO_BLK_T_BARRIER) { 129 case VIRTIO_BLK_T_IN: 130 case VIRTIO_BLK_T_OUT: { 131 ssize_t ret = 0; 132 bool is_write = type & VIRTIO_BLK_T_OUT; 133 req->sector_num = le64_to_cpu(req->out.sector); 134 135 if (is_write && !vexp->writable) { 136 req->in->status = VIRTIO_BLK_S_IOERR; 137 break; 138 } 139 140 int64_t offset = req->sector_num * vexp->blk_size; 141 QEMUIOVector qiov; 142 if (is_write) { 143 qemu_iovec_init_external(&qiov, out_iov, out_num); 144 ret = blk_co_pwritev(blk, offset, qiov.size, &qiov, 0); 145 } else { 146 qemu_iovec_init_external(&qiov, in_iov, in_num); 147 ret = blk_co_preadv(blk, offset, qiov.size, &qiov, 0); 148 } 149 if (ret >= 0) { 150 req->in->status = VIRTIO_BLK_S_OK; 151 } else { 152 req->in->status = VIRTIO_BLK_S_IOERR; 153 } 154 break; 155 } 156 case VIRTIO_BLK_T_FLUSH: 157 if (blk_co_flush(blk) == 0) { 158 req->in->status = VIRTIO_BLK_S_OK; 159 } else { 160 req->in->status = VIRTIO_BLK_S_IOERR; 161 } 162 break; 163 case VIRTIO_BLK_T_GET_ID: { 164 size_t size = MIN(iov_size(&elem->in_sg[0], in_num), 165 VIRTIO_BLK_ID_BYTES); 166 snprintf(elem->in_sg[0].iov_base, size, "%s", "vhost_user_blk"); 167 req->in->status = VIRTIO_BLK_S_OK; 168 req->size = elem->in_sg[0].iov_len; 169 break; 170 } 171 case VIRTIO_BLK_T_DISCARD: 172 case VIRTIO_BLK_T_WRITE_ZEROES: { 173 int rc; 174 175 if (!vexp->writable) { 176 req->in->status = VIRTIO_BLK_S_IOERR; 177 break; 178 } 179 180 rc = vu_blk_discard_write_zeroes(blk, &elem->out_sg[1], out_num, type); 181 if (rc == 0) { 182 req->in->status = VIRTIO_BLK_S_OK; 183 } else { 184 req->in->status = VIRTIO_BLK_S_IOERR; 185 } 186 break; 187 } 188 default: 189 req->in->status = VIRTIO_BLK_S_UNSUPP; 190 break; 191 } 192 193 vu_blk_req_complete(req); 194 return; 195 196 err: 197 free(req); 198 } 199 200 static void vu_blk_process_vq(VuDev *vu_dev, int idx) 201 { 202 VuServer *server = container_of(vu_dev, VuServer, vu_dev); 203 VuVirtq *vq = vu_get_queue(vu_dev, idx); 204 205 while (1) { 206 VuBlkReq *req; 207 208 req = vu_queue_pop(vu_dev, vq, sizeof(VuBlkReq)); 209 if (!req) { 210 break; 211 } 212 213 req->server = server; 214 req->vq = vq; 215 216 Coroutine *co = 217 qemu_coroutine_create(vu_blk_virtio_process_req, req); 218 qemu_coroutine_enter(co); 219 } 220 } 221 222 static void vu_blk_queue_set_started(VuDev *vu_dev, int idx, bool started) 223 { 224 VuVirtq *vq; 225 226 assert(vu_dev); 227 228 vq = vu_get_queue(vu_dev, idx); 229 vu_set_queue_handler(vu_dev, vq, started ? vu_blk_process_vq : NULL); 230 } 231 232 static uint64_t vu_blk_get_features(VuDev *dev) 233 { 234 uint64_t features; 235 VuServer *server = container_of(dev, VuServer, vu_dev); 236 VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server); 237 features = 1ull << VIRTIO_BLK_F_SIZE_MAX | 238 1ull << VIRTIO_BLK_F_SEG_MAX | 239 1ull << VIRTIO_BLK_F_TOPOLOGY | 240 1ull << VIRTIO_BLK_F_BLK_SIZE | 241 1ull << VIRTIO_BLK_F_FLUSH | 242 1ull << VIRTIO_BLK_F_DISCARD | 243 1ull << VIRTIO_BLK_F_WRITE_ZEROES | 244 1ull << VIRTIO_BLK_F_CONFIG_WCE | 245 1ull << VIRTIO_BLK_F_MQ | 246 1ull << VIRTIO_F_VERSION_1 | 247 1ull << VIRTIO_RING_F_INDIRECT_DESC | 248 1ull << VIRTIO_RING_F_EVENT_IDX | 249 1ull << VHOST_USER_F_PROTOCOL_FEATURES; 250 251 if (!vexp->writable) { 252 features |= 1ull << VIRTIO_BLK_F_RO; 253 } 254 255 return features; 256 } 257 258 static uint64_t vu_blk_get_protocol_features(VuDev *dev) 259 { 260 return 1ull << VHOST_USER_PROTOCOL_F_CONFIG | 261 1ull << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD; 262 } 263 264 static int 265 vu_blk_get_config(VuDev *vu_dev, uint8_t *config, uint32_t len) 266 { 267 /* TODO blkcfg must be little-endian for VIRTIO 1.0 */ 268 VuServer *server = container_of(vu_dev, VuServer, vu_dev); 269 VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server); 270 memcpy(config, &vexp->blkcfg, len); 271 return 0; 272 } 273 274 static int 275 vu_blk_set_config(VuDev *vu_dev, const uint8_t *data, 276 uint32_t offset, uint32_t size, uint32_t flags) 277 { 278 VuServer *server = container_of(vu_dev, VuServer, vu_dev); 279 VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server); 280 uint8_t wce; 281 282 /* don't support live migration */ 283 if (flags != VHOST_SET_CONFIG_TYPE_MASTER) { 284 return -EINVAL; 285 } 286 287 if (offset != offsetof(struct virtio_blk_config, wce) || 288 size != 1) { 289 return -EINVAL; 290 } 291 292 wce = *data; 293 vexp->blkcfg.wce = wce; 294 blk_set_enable_write_cache(vexp->export.blk, wce); 295 return 0; 296 } 297 298 /* 299 * When the client disconnects, it sends a VHOST_USER_NONE request 300 * and vu_process_message will simple call exit which cause the VM 301 * to exit abruptly. 302 * To avoid this issue, process VHOST_USER_NONE request ahead 303 * of vu_process_message. 304 * 305 */ 306 static int vu_blk_process_msg(VuDev *dev, VhostUserMsg *vmsg, int *do_reply) 307 { 308 if (vmsg->request == VHOST_USER_NONE) { 309 dev->panic(dev, "disconnect"); 310 return true; 311 } 312 return false; 313 } 314 315 static const VuDevIface vu_blk_iface = { 316 .get_features = vu_blk_get_features, 317 .queue_set_started = vu_blk_queue_set_started, 318 .get_protocol_features = vu_blk_get_protocol_features, 319 .get_config = vu_blk_get_config, 320 .set_config = vu_blk_set_config, 321 .process_msg = vu_blk_process_msg, 322 }; 323 324 static void blk_aio_attached(AioContext *ctx, void *opaque) 325 { 326 VuBlkExport *vexp = opaque; 327 328 vexp->export.ctx = ctx; 329 vhost_user_server_attach_aio_context(&vexp->vu_server, ctx); 330 } 331 332 static void blk_aio_detach(void *opaque) 333 { 334 VuBlkExport *vexp = opaque; 335 336 vhost_user_server_detach_aio_context(&vexp->vu_server); 337 vexp->export.ctx = NULL; 338 } 339 340 static void 341 vu_blk_initialize_config(BlockDriverState *bs, 342 struct virtio_blk_config *config, 343 uint32_t blk_size, 344 uint16_t num_queues) 345 { 346 config->capacity = bdrv_getlength(bs) >> BDRV_SECTOR_BITS; 347 config->blk_size = blk_size; 348 config->size_max = 0; 349 config->seg_max = 128 - 2; 350 config->min_io_size = 1; 351 config->opt_io_size = 1; 352 config->num_queues = num_queues; 353 config->max_discard_sectors = 32768; 354 config->max_discard_seg = 1; 355 config->discard_sector_alignment = config->blk_size >> 9; 356 config->max_write_zeroes_sectors = 32768; 357 config->max_write_zeroes_seg = 1; 358 } 359 360 static void vu_blk_exp_request_shutdown(BlockExport *exp) 361 { 362 VuBlkExport *vexp = container_of(exp, VuBlkExport, export); 363 364 vhost_user_server_stop(&vexp->vu_server); 365 } 366 367 static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, 368 Error **errp) 369 { 370 VuBlkExport *vexp = container_of(exp, VuBlkExport, export); 371 BlockExportOptionsVhostUserBlk *vu_opts = &opts->u.vhost_user_blk; 372 Error *local_err = NULL; 373 uint64_t logical_block_size; 374 uint16_t num_queues = VHOST_USER_BLK_NUM_QUEUES_DEFAULT; 375 376 vexp->writable = opts->writable; 377 vexp->blkcfg.wce = 0; 378 379 if (vu_opts->has_logical_block_size) { 380 logical_block_size = vu_opts->logical_block_size; 381 } else { 382 logical_block_size = BDRV_SECTOR_SIZE; 383 } 384 check_block_size(exp->id, "logical-block-size", logical_block_size, 385 &local_err); 386 if (local_err) { 387 error_propagate(errp, local_err); 388 return -EINVAL; 389 } 390 vexp->blk_size = logical_block_size; 391 blk_set_guest_block_size(exp->blk, logical_block_size); 392 393 if (vu_opts->has_num_queues) { 394 num_queues = vu_opts->num_queues; 395 } 396 if (num_queues == 0) { 397 error_setg(errp, "num-queues must be greater than 0"); 398 return -EINVAL; 399 } 400 401 vu_blk_initialize_config(blk_bs(exp->blk), &vexp->blkcfg, 402 logical_block_size, num_queues); 403 404 blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, 405 vexp); 406 407 if (!vhost_user_server_start(&vexp->vu_server, vu_opts->addr, exp->ctx, 408 num_queues, &vu_blk_iface, errp)) { 409 blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, 410 blk_aio_detach, vexp); 411 return -EADDRNOTAVAIL; 412 } 413 414 return 0; 415 } 416 417 static void vu_blk_exp_delete(BlockExport *exp) 418 { 419 VuBlkExport *vexp = container_of(exp, VuBlkExport, export); 420 421 blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, 422 vexp); 423 } 424 425 const BlockExportDriver blk_exp_vhost_user_blk = { 426 .type = BLOCK_EXPORT_TYPE_VHOST_USER_BLK, 427 .instance_size = sizeof(VuBlkExport), 428 .create = vu_blk_exp_create, 429 .delete = vu_blk_exp_delete, 430 .request_shutdown = vu_blk_exp_request_shutdown, 431 }; 432