1 // SPDX-License-Identifier: GPL-2.0+ 2 /******************************************************************************* 3 * Vhost kernel TCM fabric driver for virtio SCSI initiators 4 * 5 * (C) Copyright 2010-2013 Datera, Inc. 6 * (C) Copyright 2010-2012 IBM Corp. 7 * 8 * Authors: Nicholas A. Bellinger <nab@daterainc.com> 9 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 10 ****************************************************************************/ 11 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <generated/utsrelease.h> 15 #include <linux/utsname.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/kthread.h> 19 #include <linux/types.h> 20 #include <linux/string.h> 21 #include <linux/configfs.h> 22 #include <linux/ctype.h> 23 #include <linux/compat.h> 24 #include <linux/eventfd.h> 25 #include <linux/fs.h> 26 #include <linux/vmalloc.h> 27 #include <linux/miscdevice.h> 28 #include <asm/unaligned.h> 29 #include <scsi/scsi_common.h> 30 #include <scsi/scsi_proto.h> 31 #include <target/target_core_base.h> 32 #include <target/target_core_fabric.h> 33 #include <linux/vhost.h> 34 #include <linux/virtio_scsi.h> 35 #include <linux/llist.h> 36 #include <linux/bitmap.h> 37 38 #include "vhost.h" 39 40 #define VHOST_SCSI_VERSION "v0.1" 41 #define VHOST_SCSI_NAMELEN 256 42 #define VHOST_SCSI_MAX_CDB_SIZE 32 43 #define VHOST_SCSI_PREALLOC_SGLS 2048 44 #define VHOST_SCSI_PREALLOC_UPAGES 2048 45 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048 46 47 /* Max number of requests before requeueing the job. 48 * Using this limit prevents one virtqueue from starving others with 49 * request. 50 */ 51 #define VHOST_SCSI_WEIGHT 256 52 53 struct vhost_scsi_inflight { 54 /* Wait for the flush operation to finish */ 55 struct completion comp; 56 /* Refcount for the inflight reqs */ 57 struct kref kref; 58 }; 59 60 struct vhost_scsi_cmd { 61 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ 62 int tvc_vq_desc; 63 /* virtio-scsi initiator task attribute */ 64 int tvc_task_attr; 65 /* virtio-scsi response incoming iovecs */ 66 int tvc_in_iovs; 67 /* virtio-scsi initiator data direction */ 68 enum dma_data_direction tvc_data_direction; 69 /* Expected data transfer length from virtio-scsi header */ 70 u32 tvc_exp_data_len; 71 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ 72 u64 tvc_tag; 73 /* The number of scatterlists associated with this cmd */ 74 u32 tvc_sgl_count; 75 u32 tvc_prot_sgl_count; 76 /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */ 77 u32 tvc_lun; 78 /* Pointer to the SGL formatted memory from virtio-scsi */ 79 struct scatterlist *tvc_sgl; 80 struct scatterlist *tvc_prot_sgl; 81 struct page **tvc_upages; 82 /* Pointer to response header iovec */ 83 struct iovec tvc_resp_iov; 84 /* Pointer to vhost_scsi for our device */ 85 struct vhost_scsi *tvc_vhost; 86 /* Pointer to vhost_virtqueue for the cmd */ 87 struct vhost_virtqueue *tvc_vq; 88 /* Pointer to vhost nexus memory */ 89 struct vhost_scsi_nexus *tvc_nexus; 90 /* The TCM I/O descriptor that is accessed via container_of() */ 91 struct se_cmd tvc_se_cmd; 92 /* Copy of the incoming SCSI command descriptor block (CDB) */ 93 unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE]; 94 /* Sense buffer that will be mapped into outgoing status */ 95 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; 96 /* Completed commands list, serviced from vhost worker thread */ 97 struct llist_node tvc_completion_list; 98 /* Used to track inflight cmd */ 99 struct vhost_scsi_inflight *inflight; 100 }; 101 102 struct vhost_scsi_nexus { 103 /* Pointer to TCM session for I_T Nexus */ 104 struct se_session *tvn_se_sess; 105 }; 106 107 struct vhost_scsi_tpg { 108 /* Vhost port target portal group tag for TCM */ 109 u16 tport_tpgt; 110 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ 111 int tv_tpg_port_count; 112 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ 113 int tv_tpg_vhost_count; 114 /* Used for enabling T10-PI with legacy devices */ 115 int tv_fabric_prot_type; 116 /* list for vhost_scsi_list */ 117 struct list_head tv_tpg_list; 118 /* Used to protect access for tpg_nexus */ 119 struct mutex tv_tpg_mutex; 120 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ 121 struct vhost_scsi_nexus *tpg_nexus; 122 /* Pointer back to vhost_scsi_tport */ 123 struct vhost_scsi_tport *tport; 124 /* Returned by vhost_scsi_make_tpg() */ 125 struct se_portal_group se_tpg; 126 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ 127 struct vhost_scsi *vhost_scsi; 128 struct list_head tmf_queue; 129 }; 130 131 struct vhost_scsi_tport { 132 /* SCSI protocol the tport is providing */ 133 u8 tport_proto_id; 134 /* Binary World Wide unique Port Name for Vhost Target port */ 135 u64 tport_wwpn; 136 /* ASCII formatted WWPN for Vhost Target port */ 137 char tport_name[VHOST_SCSI_NAMELEN]; 138 /* Returned by vhost_scsi_make_tport() */ 139 struct se_wwn tport_wwn; 140 }; 141 142 struct vhost_scsi_evt { 143 /* event to be sent to guest */ 144 struct virtio_scsi_event event; 145 /* event list, serviced from vhost worker thread */ 146 struct llist_node list; 147 }; 148 149 enum { 150 VHOST_SCSI_VQ_CTL = 0, 151 VHOST_SCSI_VQ_EVT = 1, 152 VHOST_SCSI_VQ_IO = 2, 153 }; 154 155 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ 156 enum { 157 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | 158 (1ULL << VIRTIO_SCSI_F_T10_PI) 159 }; 160 161 #define VHOST_SCSI_MAX_TARGET 256 162 #define VHOST_SCSI_MAX_VQ 128 163 #define VHOST_SCSI_MAX_EVENT 128 164 165 struct vhost_scsi_virtqueue { 166 struct vhost_virtqueue vq; 167 /* 168 * Reference counting for inflight reqs, used for flush operation. At 169 * each time, one reference tracks new commands submitted, while we 170 * wait for another one to reach 0. 171 */ 172 struct vhost_scsi_inflight inflights[2]; 173 /* 174 * Indicate current inflight in use, protected by vq->mutex. 175 * Writers must also take dev mutex and flush under it. 176 */ 177 int inflight_idx; 178 struct vhost_scsi_cmd *scsi_cmds; 179 struct sbitmap scsi_tags; 180 int max_cmds; 181 }; 182 183 struct vhost_scsi { 184 /* Protected by vhost_scsi->dev.mutex */ 185 struct vhost_scsi_tpg **vs_tpg; 186 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 187 188 struct vhost_dev dev; 189 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; 190 191 struct vhost_work vs_completion_work; /* cmd completion work item */ 192 struct llist_head vs_completion_list; /* cmd completion queue */ 193 194 struct vhost_work vs_event_work; /* evt injection work item */ 195 struct llist_head vs_event_list; /* evt injection queue */ 196 197 bool vs_events_missed; /* any missed events, protected by vq->mutex */ 198 int vs_events_nr; /* num of pending events, protected by vq->mutex */ 199 }; 200 201 struct vhost_scsi_tmf { 202 struct vhost_work vwork; 203 struct vhost_scsi_tpg *tpg; 204 struct vhost_scsi *vhost; 205 struct vhost_scsi_virtqueue *svq; 206 struct list_head queue_entry; 207 208 struct se_cmd se_cmd; 209 u8 scsi_resp; 210 struct vhost_scsi_inflight *inflight; 211 struct iovec resp_iov; 212 int in_iovs; 213 int vq_desc; 214 }; 215 216 /* 217 * Context for processing request and control queue operations. 218 */ 219 struct vhost_scsi_ctx { 220 int head; 221 unsigned int out, in; 222 size_t req_size, rsp_size; 223 size_t out_size, in_size; 224 u8 *target, *lunp; 225 void *req; 226 struct iov_iter out_iter; 227 }; 228 229 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ 230 static DEFINE_MUTEX(vhost_scsi_mutex); 231 static LIST_HEAD(vhost_scsi_list); 232 233 static void vhost_scsi_done_inflight(struct kref *kref) 234 { 235 struct vhost_scsi_inflight *inflight; 236 237 inflight = container_of(kref, struct vhost_scsi_inflight, kref); 238 complete(&inflight->comp); 239 } 240 241 static void vhost_scsi_init_inflight(struct vhost_scsi *vs, 242 struct vhost_scsi_inflight *old_inflight[]) 243 { 244 struct vhost_scsi_inflight *new_inflight; 245 struct vhost_virtqueue *vq; 246 int idx, i; 247 248 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 249 vq = &vs->vqs[i].vq; 250 251 mutex_lock(&vq->mutex); 252 253 /* store old infight */ 254 idx = vs->vqs[i].inflight_idx; 255 if (old_inflight) 256 old_inflight[i] = &vs->vqs[i].inflights[idx]; 257 258 /* setup new infight */ 259 vs->vqs[i].inflight_idx = idx ^ 1; 260 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; 261 kref_init(&new_inflight->kref); 262 init_completion(&new_inflight->comp); 263 264 mutex_unlock(&vq->mutex); 265 } 266 } 267 268 static struct vhost_scsi_inflight * 269 vhost_scsi_get_inflight(struct vhost_virtqueue *vq) 270 { 271 struct vhost_scsi_inflight *inflight; 272 struct vhost_scsi_virtqueue *svq; 273 274 svq = container_of(vq, struct vhost_scsi_virtqueue, vq); 275 inflight = &svq->inflights[svq->inflight_idx]; 276 kref_get(&inflight->kref); 277 278 return inflight; 279 } 280 281 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight) 282 { 283 kref_put(&inflight->kref, vhost_scsi_done_inflight); 284 } 285 286 static int vhost_scsi_check_true(struct se_portal_group *se_tpg) 287 { 288 return 1; 289 } 290 291 static int vhost_scsi_check_false(struct se_portal_group *se_tpg) 292 { 293 return 0; 294 } 295 296 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg) 297 { 298 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 299 struct vhost_scsi_tpg, se_tpg); 300 struct vhost_scsi_tport *tport = tpg->tport; 301 302 return &tport->tport_name[0]; 303 } 304 305 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg) 306 { 307 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 308 struct vhost_scsi_tpg, se_tpg); 309 return tpg->tport_tpgt; 310 } 311 312 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg) 313 { 314 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 315 struct vhost_scsi_tpg, se_tpg); 316 317 return tpg->tv_fabric_prot_type; 318 } 319 320 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg) 321 { 322 return 1; 323 } 324 325 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) 326 { 327 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd, 328 struct vhost_scsi_cmd, tvc_se_cmd); 329 struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq, 330 struct vhost_scsi_virtqueue, vq); 331 struct vhost_scsi_inflight *inflight = tv_cmd->inflight; 332 int i; 333 334 if (tv_cmd->tvc_sgl_count) { 335 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 336 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 337 } 338 if (tv_cmd->tvc_prot_sgl_count) { 339 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++) 340 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); 341 } 342 343 sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag); 344 vhost_scsi_put_inflight(inflight); 345 } 346 347 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf) 348 { 349 struct vhost_scsi_tpg *tpg = tmf->tpg; 350 struct vhost_scsi_inflight *inflight = tmf->inflight; 351 352 mutex_lock(&tpg->tv_tpg_mutex); 353 list_add_tail(&tpg->tmf_queue, &tmf->queue_entry); 354 mutex_unlock(&tpg->tv_tpg_mutex); 355 vhost_scsi_put_inflight(inflight); 356 } 357 358 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) 359 { 360 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { 361 struct vhost_scsi_tmf *tmf = container_of(se_cmd, 362 struct vhost_scsi_tmf, se_cmd); 363 364 vhost_work_queue(&tmf->vhost->dev, &tmf->vwork); 365 } else { 366 struct vhost_scsi_cmd *cmd = container_of(se_cmd, 367 struct vhost_scsi_cmd, tvc_se_cmd); 368 struct vhost_scsi *vs = cmd->tvc_vhost; 369 370 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); 371 vhost_work_queue(&vs->dev, &vs->vs_completion_work); 372 } 373 } 374 375 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) 376 { 377 return 0; 378 } 379 380 static int vhost_scsi_write_pending(struct se_cmd *se_cmd) 381 { 382 /* Go ahead and process the write immediately */ 383 target_execute_cmd(se_cmd); 384 return 0; 385 } 386 387 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl) 388 { 389 return; 390 } 391 392 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd) 393 { 394 return 0; 395 } 396 397 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd) 398 { 399 transport_generic_free_cmd(se_cmd, 0); 400 return 0; 401 } 402 403 static int vhost_scsi_queue_status(struct se_cmd *se_cmd) 404 { 405 transport_generic_free_cmd(se_cmd, 0); 406 return 0; 407 } 408 409 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd) 410 { 411 struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf, 412 se_cmd); 413 414 tmf->scsi_resp = se_cmd->se_tmr_req->response; 415 transport_generic_free_cmd(&tmf->se_cmd, 0); 416 } 417 418 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd) 419 { 420 return; 421 } 422 423 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) 424 { 425 vs->vs_events_nr--; 426 kfree(evt); 427 } 428 429 static struct vhost_scsi_evt * 430 vhost_scsi_allocate_evt(struct vhost_scsi *vs, 431 u32 event, u32 reason) 432 { 433 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 434 struct vhost_scsi_evt *evt; 435 436 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { 437 vs->vs_events_missed = true; 438 return NULL; 439 } 440 441 evt = kzalloc(sizeof(*evt), GFP_KERNEL); 442 if (!evt) { 443 vq_err(vq, "Failed to allocate vhost_scsi_evt\n"); 444 vs->vs_events_missed = true; 445 return NULL; 446 } 447 448 evt->event.event = cpu_to_vhost32(vq, event); 449 evt->event.reason = cpu_to_vhost32(vq, reason); 450 vs->vs_events_nr++; 451 452 return evt; 453 } 454 455 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) 456 { 457 return target_put_sess_cmd(se_cmd); 458 } 459 460 static void 461 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) 462 { 463 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 464 struct virtio_scsi_event *event = &evt->event; 465 struct virtio_scsi_event __user *eventp; 466 unsigned out, in; 467 int head, ret; 468 469 if (!vhost_vq_get_backend(vq)) { 470 vs->vs_events_missed = true; 471 return; 472 } 473 474 again: 475 vhost_disable_notify(&vs->dev, vq); 476 head = vhost_get_vq_desc(vq, vq->iov, 477 ARRAY_SIZE(vq->iov), &out, &in, 478 NULL, NULL); 479 if (head < 0) { 480 vs->vs_events_missed = true; 481 return; 482 } 483 if (head == vq->num) { 484 if (vhost_enable_notify(&vs->dev, vq)) 485 goto again; 486 vs->vs_events_missed = true; 487 return; 488 } 489 490 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { 491 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n", 492 vq->iov[out].iov_len); 493 vs->vs_events_missed = true; 494 return; 495 } 496 497 if (vs->vs_events_missed) { 498 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED); 499 vs->vs_events_missed = false; 500 } 501 502 eventp = vq->iov[out].iov_base; 503 ret = __copy_to_user(eventp, event, sizeof(*event)); 504 if (!ret) 505 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 506 else 507 vq_err(vq, "Faulted on vhost_scsi_send_event\n"); 508 } 509 510 static void vhost_scsi_evt_work(struct vhost_work *work) 511 { 512 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 513 vs_event_work); 514 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 515 struct vhost_scsi_evt *evt, *t; 516 struct llist_node *llnode; 517 518 mutex_lock(&vq->mutex); 519 llnode = llist_del_all(&vs->vs_event_list); 520 llist_for_each_entry_safe(evt, t, llnode, list) { 521 vhost_scsi_do_evt_work(vs, evt); 522 vhost_scsi_free_evt(vs, evt); 523 } 524 mutex_unlock(&vq->mutex); 525 } 526 527 /* Fill in status and signal that we are done processing this command 528 * 529 * This is scheduled in the vhost work queue so we are called with the owner 530 * process mm and can access the vring. 531 */ 532 static void vhost_scsi_complete_cmd_work(struct vhost_work *work) 533 { 534 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 535 vs_completion_work); 536 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); 537 struct virtio_scsi_cmd_resp v_rsp; 538 struct vhost_scsi_cmd *cmd, *t; 539 struct llist_node *llnode; 540 struct se_cmd *se_cmd; 541 struct iov_iter iov_iter; 542 int ret, vq; 543 544 bitmap_zero(signal, VHOST_SCSI_MAX_VQ); 545 llnode = llist_del_all(&vs->vs_completion_list); 546 llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) { 547 se_cmd = &cmd->tvc_se_cmd; 548 549 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, 550 cmd, se_cmd->residual_count, se_cmd->scsi_status); 551 552 memset(&v_rsp, 0, sizeof(v_rsp)); 553 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count); 554 /* TODO is status_qualifier field needed? */ 555 v_rsp.status = se_cmd->scsi_status; 556 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq, 557 se_cmd->scsi_sense_length); 558 memcpy(v_rsp.sense, cmd->tvc_sense_buf, 559 se_cmd->scsi_sense_length); 560 561 iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov, 562 cmd->tvc_in_iovs, sizeof(v_rsp)); 563 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); 564 if (likely(ret == sizeof(v_rsp))) { 565 struct vhost_scsi_virtqueue *q; 566 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); 567 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); 568 vq = q - vs->vqs; 569 __set_bit(vq, signal); 570 } else 571 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 572 573 vhost_scsi_release_cmd_res(se_cmd); 574 } 575 576 vq = -1; 577 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1)) 578 < VHOST_SCSI_MAX_VQ) 579 vhost_signal(&vs->dev, &vs->vqs[vq].vq); 580 } 581 582 static struct vhost_scsi_cmd * 583 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, 584 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, 585 u32 exp_data_len, int data_direction) 586 { 587 struct vhost_scsi_virtqueue *svq = container_of(vq, 588 struct vhost_scsi_virtqueue, vq); 589 struct vhost_scsi_cmd *cmd; 590 struct vhost_scsi_nexus *tv_nexus; 591 struct scatterlist *sg, *prot_sg; 592 struct page **pages; 593 int tag; 594 595 tv_nexus = tpg->tpg_nexus; 596 if (!tv_nexus) { 597 pr_err("Unable to locate active struct vhost_scsi_nexus\n"); 598 return ERR_PTR(-EIO); 599 } 600 601 tag = sbitmap_get(&svq->scsi_tags); 602 if (tag < 0) { 603 pr_err("Unable to obtain tag for vhost_scsi_cmd\n"); 604 return ERR_PTR(-ENOMEM); 605 } 606 607 cmd = &svq->scsi_cmds[tag]; 608 sg = cmd->tvc_sgl; 609 prot_sg = cmd->tvc_prot_sgl; 610 pages = cmd->tvc_upages; 611 memset(cmd, 0, sizeof(*cmd)); 612 cmd->tvc_sgl = sg; 613 cmd->tvc_prot_sgl = prot_sg; 614 cmd->tvc_upages = pages; 615 cmd->tvc_se_cmd.map_tag = tag; 616 cmd->tvc_tag = scsi_tag; 617 cmd->tvc_lun = lun; 618 cmd->tvc_task_attr = task_attr; 619 cmd->tvc_exp_data_len = exp_data_len; 620 cmd->tvc_data_direction = data_direction; 621 cmd->tvc_nexus = tv_nexus; 622 cmd->inflight = vhost_scsi_get_inflight(vq); 623 624 memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE); 625 626 return cmd; 627 } 628 629 /* 630 * Map a user memory range into a scatterlist 631 * 632 * Returns the number of scatterlist entries used or -errno on error. 633 */ 634 static int 635 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, 636 struct iov_iter *iter, 637 struct scatterlist *sgl, 638 bool write) 639 { 640 struct page **pages = cmd->tvc_upages; 641 struct scatterlist *sg = sgl; 642 ssize_t bytes; 643 size_t offset; 644 unsigned int npages = 0; 645 646 bytes = iov_iter_get_pages2(iter, pages, LONG_MAX, 647 VHOST_SCSI_PREALLOC_UPAGES, &offset); 648 /* No pages were pinned */ 649 if (bytes <= 0) 650 return bytes < 0 ? bytes : -EFAULT; 651 652 while (bytes) { 653 unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes); 654 sg_set_page(sg++, pages[npages++], n, offset); 655 bytes -= n; 656 offset = 0; 657 } 658 return npages; 659 } 660 661 static int 662 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) 663 { 664 int sgl_count = 0; 665 666 if (!iter || !iter->iov) { 667 pr_err("%s: iter->iov is NULL, but expected bytes: %zu" 668 " present\n", __func__, bytes); 669 return -EINVAL; 670 } 671 672 sgl_count = iov_iter_npages(iter, 0xffff); 673 if (sgl_count > max_sgls) { 674 pr_err("%s: requested sgl_count: %d exceeds pre-allocated" 675 " max_sgls: %d\n", __func__, sgl_count, max_sgls); 676 return -EINVAL; 677 } 678 return sgl_count; 679 } 680 681 static int 682 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, 683 struct iov_iter *iter, 684 struct scatterlist *sg, int sg_count) 685 { 686 struct scatterlist *p = sg; 687 int ret; 688 689 while (iov_iter_count(iter)) { 690 ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write); 691 if (ret < 0) { 692 while (p < sg) { 693 struct page *page = sg_page(p++); 694 if (page) 695 put_page(page); 696 } 697 return ret; 698 } 699 sg += ret; 700 } 701 return 0; 702 } 703 704 static int 705 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, 706 size_t prot_bytes, struct iov_iter *prot_iter, 707 size_t data_bytes, struct iov_iter *data_iter) 708 { 709 int sgl_count, ret; 710 bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE); 711 712 if (prot_bytes) { 713 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes, 714 VHOST_SCSI_PREALLOC_PROT_SGLS); 715 if (sgl_count < 0) 716 return sgl_count; 717 718 sg_init_table(cmd->tvc_prot_sgl, sgl_count); 719 cmd->tvc_prot_sgl_count = sgl_count; 720 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, 721 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count); 722 723 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter, 724 cmd->tvc_prot_sgl, 725 cmd->tvc_prot_sgl_count); 726 if (ret < 0) { 727 cmd->tvc_prot_sgl_count = 0; 728 return ret; 729 } 730 } 731 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes, 732 VHOST_SCSI_PREALLOC_SGLS); 733 if (sgl_count < 0) 734 return sgl_count; 735 736 sg_init_table(cmd->tvc_sgl, sgl_count); 737 cmd->tvc_sgl_count = sgl_count; 738 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__, 739 cmd->tvc_sgl, cmd->tvc_sgl_count); 740 741 ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter, 742 cmd->tvc_sgl, cmd->tvc_sgl_count); 743 if (ret < 0) { 744 cmd->tvc_sgl_count = 0; 745 return ret; 746 } 747 return 0; 748 } 749 750 static int vhost_scsi_to_tcm_attr(int attr) 751 { 752 switch (attr) { 753 case VIRTIO_SCSI_S_SIMPLE: 754 return TCM_SIMPLE_TAG; 755 case VIRTIO_SCSI_S_ORDERED: 756 return TCM_ORDERED_TAG; 757 case VIRTIO_SCSI_S_HEAD: 758 return TCM_HEAD_TAG; 759 case VIRTIO_SCSI_S_ACA: 760 return TCM_ACA_TAG; 761 default: 762 break; 763 } 764 return TCM_SIMPLE_TAG; 765 } 766 767 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd) 768 { 769 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 770 struct vhost_scsi_nexus *tv_nexus; 771 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; 772 773 /* FIXME: BIDI operation */ 774 if (cmd->tvc_sgl_count) { 775 sg_ptr = cmd->tvc_sgl; 776 777 if (cmd->tvc_prot_sgl_count) 778 sg_prot_ptr = cmd->tvc_prot_sgl; 779 else 780 se_cmd->prot_pto = true; 781 } else { 782 sg_ptr = NULL; 783 } 784 tv_nexus = cmd->tvc_nexus; 785 786 se_cmd->tag = 0; 787 target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0], 788 cmd->tvc_lun, cmd->tvc_exp_data_len, 789 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), 790 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF); 791 792 if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr, 793 cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, 794 cmd->tvc_prot_sgl_count, GFP_KERNEL)) 795 return; 796 797 target_queue_submission(se_cmd); 798 } 799 800 static void 801 vhost_scsi_send_bad_target(struct vhost_scsi *vs, 802 struct vhost_virtqueue *vq, 803 int head, unsigned out) 804 { 805 struct virtio_scsi_cmd_resp __user *resp; 806 struct virtio_scsi_cmd_resp rsp; 807 int ret; 808 809 memset(&rsp, 0, sizeof(rsp)); 810 rsp.response = VIRTIO_SCSI_S_BAD_TARGET; 811 resp = vq->iov[out].iov_base; 812 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 813 if (!ret) 814 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 815 else 816 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 817 } 818 819 static int 820 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq, 821 struct vhost_scsi_ctx *vc) 822 { 823 int ret = -ENXIO; 824 825 vc->head = vhost_get_vq_desc(vq, vq->iov, 826 ARRAY_SIZE(vq->iov), &vc->out, &vc->in, 827 NULL, NULL); 828 829 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", 830 vc->head, vc->out, vc->in); 831 832 /* On error, stop handling until the next kick. */ 833 if (unlikely(vc->head < 0)) 834 goto done; 835 836 /* Nothing new? Wait for eventfd to tell us they refilled. */ 837 if (vc->head == vq->num) { 838 if (unlikely(vhost_enable_notify(&vs->dev, vq))) { 839 vhost_disable_notify(&vs->dev, vq); 840 ret = -EAGAIN; 841 } 842 goto done; 843 } 844 845 /* 846 * Get the size of request and response buffers. 847 * FIXME: Not correct for BIDI operation 848 */ 849 vc->out_size = iov_length(vq->iov, vc->out); 850 vc->in_size = iov_length(&vq->iov[vc->out], vc->in); 851 852 /* 853 * Copy over the virtio-scsi request header, which for a 854 * ANY_LAYOUT enabled guest may span multiple iovecs, or a 855 * single iovec may contain both the header + outgoing 856 * WRITE payloads. 857 * 858 * copy_from_iter() will advance out_iter, so that it will 859 * point at the start of the outgoing WRITE payload, if 860 * DMA_TO_DEVICE is set. 861 */ 862 iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size); 863 ret = 0; 864 865 done: 866 return ret; 867 } 868 869 static int 870 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc) 871 { 872 if (unlikely(vc->in_size < vc->rsp_size)) { 873 vq_err(vq, 874 "Response buf too small, need min %zu bytes got %zu", 875 vc->rsp_size, vc->in_size); 876 return -EINVAL; 877 } else if (unlikely(vc->out_size < vc->req_size)) { 878 vq_err(vq, 879 "Request buf too small, need min %zu bytes got %zu", 880 vc->req_size, vc->out_size); 881 return -EIO; 882 } 883 884 return 0; 885 } 886 887 static int 888 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc, 889 struct vhost_scsi_tpg **tpgp) 890 { 891 int ret = -EIO; 892 893 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size, 894 &vc->out_iter))) { 895 vq_err(vq, "Faulted on copy_from_iter_full\n"); 896 } else if (unlikely(*vc->lunp != 1)) { 897 /* virtio-scsi spec requires byte 0 of the lun to be 1 */ 898 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp); 899 } else { 900 struct vhost_scsi_tpg **vs_tpg, *tpg; 901 902 vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */ 903 904 tpg = READ_ONCE(vs_tpg[*vc->target]); 905 if (unlikely(!tpg)) { 906 vq_err(vq, "Target 0x%x does not exist\n", *vc->target); 907 } else { 908 if (tpgp) 909 *tpgp = tpg; 910 ret = 0; 911 } 912 } 913 914 return ret; 915 } 916 917 static u16 vhost_buf_to_lun(u8 *lun_buf) 918 { 919 return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF; 920 } 921 922 static void 923 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) 924 { 925 struct vhost_scsi_tpg **vs_tpg, *tpg; 926 struct virtio_scsi_cmd_req v_req; 927 struct virtio_scsi_cmd_req_pi v_req_pi; 928 struct vhost_scsi_ctx vc; 929 struct vhost_scsi_cmd *cmd; 930 struct iov_iter in_iter, prot_iter, data_iter; 931 u64 tag; 932 u32 exp_data_len, data_direction; 933 int ret, prot_bytes, c = 0; 934 u16 lun; 935 u8 task_attr; 936 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); 937 void *cdb; 938 939 mutex_lock(&vq->mutex); 940 /* 941 * We can handle the vq only after the endpoint is setup by calling the 942 * VHOST_SCSI_SET_ENDPOINT ioctl. 943 */ 944 vs_tpg = vhost_vq_get_backend(vq); 945 if (!vs_tpg) 946 goto out; 947 948 memset(&vc, 0, sizeof(vc)); 949 vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp); 950 951 vhost_disable_notify(&vs->dev, vq); 952 953 do { 954 ret = vhost_scsi_get_desc(vs, vq, &vc); 955 if (ret) 956 goto err; 957 958 /* 959 * Setup pointers and values based upon different virtio-scsi 960 * request header if T10_PI is enabled in KVM guest. 961 */ 962 if (t10_pi) { 963 vc.req = &v_req_pi; 964 vc.req_size = sizeof(v_req_pi); 965 vc.lunp = &v_req_pi.lun[0]; 966 vc.target = &v_req_pi.lun[1]; 967 } else { 968 vc.req = &v_req; 969 vc.req_size = sizeof(v_req); 970 vc.lunp = &v_req.lun[0]; 971 vc.target = &v_req.lun[1]; 972 } 973 974 /* 975 * Validate the size of request and response buffers. 976 * Check for a sane response buffer so we can report 977 * early errors back to the guest. 978 */ 979 ret = vhost_scsi_chk_size(vq, &vc); 980 if (ret) 981 goto err; 982 983 ret = vhost_scsi_get_req(vq, &vc, &tpg); 984 if (ret) 985 goto err; 986 987 ret = -EIO; /* bad target on any error from here on */ 988 989 /* 990 * Determine data_direction by calculating the total outgoing 991 * iovec sizes + incoming iovec sizes vs. virtio-scsi request + 992 * response headers respectively. 993 * 994 * For DMA_TO_DEVICE this is out_iter, which is already pointing 995 * to the right place. 996 * 997 * For DMA_FROM_DEVICE, the iovec will be just past the end 998 * of the virtio-scsi response header in either the same 999 * or immediately following iovec. 1000 * 1001 * Any associated T10_PI bytes for the outgoing / incoming 1002 * payloads are included in calculation of exp_data_len here. 1003 */ 1004 prot_bytes = 0; 1005 1006 if (vc.out_size > vc.req_size) { 1007 data_direction = DMA_TO_DEVICE; 1008 exp_data_len = vc.out_size - vc.req_size; 1009 data_iter = vc.out_iter; 1010 } else if (vc.in_size > vc.rsp_size) { 1011 data_direction = DMA_FROM_DEVICE; 1012 exp_data_len = vc.in_size - vc.rsp_size; 1013 1014 iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in, 1015 vc.rsp_size + exp_data_len); 1016 iov_iter_advance(&in_iter, vc.rsp_size); 1017 data_iter = in_iter; 1018 } else { 1019 data_direction = DMA_NONE; 1020 exp_data_len = 0; 1021 } 1022 /* 1023 * If T10_PI header + payload is present, setup prot_iter values 1024 * and recalculate data_iter for vhost_scsi_mapal() mapping to 1025 * host scatterlists via get_user_pages_fast(). 1026 */ 1027 if (t10_pi) { 1028 if (v_req_pi.pi_bytesout) { 1029 if (data_direction != DMA_TO_DEVICE) { 1030 vq_err(vq, "Received non zero pi_bytesout," 1031 " but wrong data_direction\n"); 1032 goto err; 1033 } 1034 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); 1035 } else if (v_req_pi.pi_bytesin) { 1036 if (data_direction != DMA_FROM_DEVICE) { 1037 vq_err(vq, "Received non zero pi_bytesin," 1038 " but wrong data_direction\n"); 1039 goto err; 1040 } 1041 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); 1042 } 1043 /* 1044 * Set prot_iter to data_iter and truncate it to 1045 * prot_bytes, and advance data_iter past any 1046 * preceeding prot_bytes that may be present. 1047 * 1048 * Also fix up the exp_data_len to reflect only the 1049 * actual data payload length. 1050 */ 1051 if (prot_bytes) { 1052 exp_data_len -= prot_bytes; 1053 prot_iter = data_iter; 1054 iov_iter_truncate(&prot_iter, prot_bytes); 1055 iov_iter_advance(&data_iter, prot_bytes); 1056 } 1057 tag = vhost64_to_cpu(vq, v_req_pi.tag); 1058 task_attr = v_req_pi.task_attr; 1059 cdb = &v_req_pi.cdb[0]; 1060 lun = vhost_buf_to_lun(v_req_pi.lun); 1061 } else { 1062 tag = vhost64_to_cpu(vq, v_req.tag); 1063 task_attr = v_req.task_attr; 1064 cdb = &v_req.cdb[0]; 1065 lun = vhost_buf_to_lun(v_req.lun); 1066 } 1067 /* 1068 * Check that the received CDB size does not exceeded our 1069 * hardcoded max for vhost-scsi, then get a pre-allocated 1070 * cmd descriptor for the new virtio-scsi tag. 1071 * 1072 * TODO what if cdb was too small for varlen cdb header? 1073 */ 1074 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) { 1075 vq_err(vq, "Received SCSI CDB with command_size: %d that" 1076 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1077 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); 1078 goto err; 1079 } 1080 cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr, 1081 exp_data_len + prot_bytes, 1082 data_direction); 1083 if (IS_ERR(cmd)) { 1084 vq_err(vq, "vhost_scsi_get_cmd failed %ld\n", 1085 PTR_ERR(cmd)); 1086 goto err; 1087 } 1088 cmd->tvc_vhost = vs; 1089 cmd->tvc_vq = vq; 1090 cmd->tvc_resp_iov = vq->iov[vc.out]; 1091 cmd->tvc_in_iovs = vc.in; 1092 1093 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1094 cmd->tvc_cdb[0], cmd->tvc_lun); 1095 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:" 1096 " %d\n", cmd, exp_data_len, prot_bytes, data_direction); 1097 1098 if (data_direction != DMA_NONE) { 1099 if (unlikely(vhost_scsi_mapal(cmd, prot_bytes, 1100 &prot_iter, exp_data_len, 1101 &data_iter))) { 1102 vq_err(vq, "Failed to map iov to sgl\n"); 1103 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); 1104 goto err; 1105 } 1106 } 1107 /* 1108 * Save the descriptor from vhost_get_vq_desc() to be used to 1109 * complete the virtio-scsi request in TCM callback context via 1110 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() 1111 */ 1112 cmd->tvc_vq_desc = vc.head; 1113 vhost_scsi_target_queue_cmd(cmd); 1114 ret = 0; 1115 err: 1116 /* 1117 * ENXIO: No more requests, or read error, wait for next kick 1118 * EINVAL: Invalid response buffer, drop the request 1119 * EIO: Respond with bad target 1120 * EAGAIN: Pending request 1121 */ 1122 if (ret == -ENXIO) 1123 break; 1124 else if (ret == -EIO) 1125 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); 1126 } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); 1127 out: 1128 mutex_unlock(&vq->mutex); 1129 } 1130 1131 static void 1132 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq, 1133 int in_iovs, int vq_desc, struct iovec *resp_iov, 1134 int tmf_resp_code) 1135 { 1136 struct virtio_scsi_ctrl_tmf_resp rsp; 1137 struct iov_iter iov_iter; 1138 int ret; 1139 1140 pr_debug("%s\n", __func__); 1141 memset(&rsp, 0, sizeof(rsp)); 1142 rsp.response = tmf_resp_code; 1143 1144 iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp)); 1145 1146 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); 1147 if (likely(ret == sizeof(rsp))) 1148 vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0); 1149 else 1150 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); 1151 } 1152 1153 static void vhost_scsi_tmf_resp_work(struct vhost_work *work) 1154 { 1155 struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf, 1156 vwork); 1157 int resp_code; 1158 1159 if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) 1160 resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; 1161 else 1162 resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED; 1163 1164 vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs, 1165 tmf->vq_desc, &tmf->resp_iov, resp_code); 1166 vhost_scsi_release_tmf_res(tmf); 1167 } 1168 1169 static void 1170 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, 1171 struct vhost_virtqueue *vq, 1172 struct virtio_scsi_ctrl_tmf_req *vtmf, 1173 struct vhost_scsi_ctx *vc) 1174 { 1175 struct vhost_scsi_virtqueue *svq = container_of(vq, 1176 struct vhost_scsi_virtqueue, vq); 1177 struct vhost_scsi_tmf *tmf; 1178 1179 if (vhost32_to_cpu(vq, vtmf->subtype) != 1180 VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET) 1181 goto send_reject; 1182 1183 if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) { 1184 pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n"); 1185 goto send_reject; 1186 } 1187 1188 mutex_lock(&tpg->tv_tpg_mutex); 1189 if (list_empty(&tpg->tmf_queue)) { 1190 pr_err("Missing reserve TMF. Could not handle LUN RESET.\n"); 1191 mutex_unlock(&tpg->tv_tpg_mutex); 1192 goto send_reject; 1193 } 1194 1195 tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf, 1196 queue_entry); 1197 list_del_init(&tmf->queue_entry); 1198 mutex_unlock(&tpg->tv_tpg_mutex); 1199 1200 tmf->tpg = tpg; 1201 tmf->vhost = vs; 1202 tmf->svq = svq; 1203 tmf->resp_iov = vq->iov[vc->out]; 1204 tmf->vq_desc = vc->head; 1205 tmf->in_iovs = vc->in; 1206 tmf->inflight = vhost_scsi_get_inflight(vq); 1207 1208 if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL, 1209 vhost_buf_to_lun(vtmf->lun), NULL, 1210 TMR_LUN_RESET, GFP_KERNEL, 0, 1211 TARGET_SCF_ACK_KREF) < 0) { 1212 vhost_scsi_release_tmf_res(tmf); 1213 goto send_reject; 1214 } 1215 1216 return; 1217 1218 send_reject: 1219 vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out], 1220 VIRTIO_SCSI_S_FUNCTION_REJECTED); 1221 } 1222 1223 static void 1224 vhost_scsi_send_an_resp(struct vhost_scsi *vs, 1225 struct vhost_virtqueue *vq, 1226 struct vhost_scsi_ctx *vc) 1227 { 1228 struct virtio_scsi_ctrl_an_resp rsp; 1229 struct iov_iter iov_iter; 1230 int ret; 1231 1232 pr_debug("%s\n", __func__); 1233 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ 1234 rsp.response = VIRTIO_SCSI_S_OK; 1235 1236 iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); 1237 1238 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); 1239 if (likely(ret == sizeof(rsp))) 1240 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); 1241 else 1242 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); 1243 } 1244 1245 static void 1246 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) 1247 { 1248 struct vhost_scsi_tpg *tpg; 1249 union { 1250 __virtio32 type; 1251 struct virtio_scsi_ctrl_an_req an; 1252 struct virtio_scsi_ctrl_tmf_req tmf; 1253 } v_req; 1254 struct vhost_scsi_ctx vc; 1255 size_t typ_size; 1256 int ret, c = 0; 1257 1258 mutex_lock(&vq->mutex); 1259 /* 1260 * We can handle the vq only after the endpoint is setup by calling the 1261 * VHOST_SCSI_SET_ENDPOINT ioctl. 1262 */ 1263 if (!vhost_vq_get_backend(vq)) 1264 goto out; 1265 1266 memset(&vc, 0, sizeof(vc)); 1267 1268 vhost_disable_notify(&vs->dev, vq); 1269 1270 do { 1271 ret = vhost_scsi_get_desc(vs, vq, &vc); 1272 if (ret) 1273 goto err; 1274 1275 /* 1276 * Get the request type first in order to setup 1277 * other parameters dependent on the type. 1278 */ 1279 vc.req = &v_req.type; 1280 typ_size = sizeof(v_req.type); 1281 1282 if (unlikely(!copy_from_iter_full(vc.req, typ_size, 1283 &vc.out_iter))) { 1284 vq_err(vq, "Faulted on copy_from_iter tmf type\n"); 1285 /* 1286 * The size of the response buffer depends on the 1287 * request type and must be validated against it. 1288 * Since the request type is not known, don't send 1289 * a response. 1290 */ 1291 continue; 1292 } 1293 1294 switch (vhost32_to_cpu(vq, v_req.type)) { 1295 case VIRTIO_SCSI_T_TMF: 1296 vc.req = &v_req.tmf; 1297 vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req); 1298 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp); 1299 vc.lunp = &v_req.tmf.lun[0]; 1300 vc.target = &v_req.tmf.lun[1]; 1301 break; 1302 case VIRTIO_SCSI_T_AN_QUERY: 1303 case VIRTIO_SCSI_T_AN_SUBSCRIBE: 1304 vc.req = &v_req.an; 1305 vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req); 1306 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp); 1307 vc.lunp = &v_req.an.lun[0]; 1308 vc.target = NULL; 1309 break; 1310 default: 1311 vq_err(vq, "Unknown control request %d", v_req.type); 1312 continue; 1313 } 1314 1315 /* 1316 * Validate the size of request and response buffers. 1317 * Check for a sane response buffer so we can report 1318 * early errors back to the guest. 1319 */ 1320 ret = vhost_scsi_chk_size(vq, &vc); 1321 if (ret) 1322 goto err; 1323 1324 /* 1325 * Get the rest of the request now that its size is known. 1326 */ 1327 vc.req += typ_size; 1328 vc.req_size -= typ_size; 1329 1330 ret = vhost_scsi_get_req(vq, &vc, &tpg); 1331 if (ret) 1332 goto err; 1333 1334 if (v_req.type == VIRTIO_SCSI_T_TMF) 1335 vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc); 1336 else 1337 vhost_scsi_send_an_resp(vs, vq, &vc); 1338 err: 1339 /* 1340 * ENXIO: No more requests, or read error, wait for next kick 1341 * EINVAL: Invalid response buffer, drop the request 1342 * EIO: Respond with bad target 1343 * EAGAIN: Pending request 1344 */ 1345 if (ret == -ENXIO) 1346 break; 1347 else if (ret == -EIO) 1348 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); 1349 } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); 1350 out: 1351 mutex_unlock(&vq->mutex); 1352 } 1353 1354 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 1355 { 1356 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1357 poll.work); 1358 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 1359 1360 pr_debug("%s: The handling func for control queue.\n", __func__); 1361 vhost_scsi_ctl_handle_vq(vs, vq); 1362 } 1363 1364 static void 1365 vhost_scsi_send_evt(struct vhost_scsi *vs, 1366 struct vhost_scsi_tpg *tpg, 1367 struct se_lun *lun, 1368 u32 event, 1369 u32 reason) 1370 { 1371 struct vhost_scsi_evt *evt; 1372 1373 evt = vhost_scsi_allocate_evt(vs, event, reason); 1374 if (!evt) 1375 return; 1376 1377 if (tpg && lun) { 1378 /* TODO: share lun setup code with virtio-scsi.ko */ 1379 /* 1380 * Note: evt->event is zeroed when we allocate it and 1381 * lun[4-7] need to be zero according to virtio-scsi spec. 1382 */ 1383 evt->event.lun[0] = 0x01; 1384 evt->event.lun[1] = tpg->tport_tpgt; 1385 if (lun->unpacked_lun >= 256) 1386 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; 1387 evt->event.lun[3] = lun->unpacked_lun & 0xFF; 1388 } 1389 1390 llist_add(&evt->list, &vs->vs_event_list); 1391 vhost_work_queue(&vs->dev, &vs->vs_event_work); 1392 } 1393 1394 static void vhost_scsi_evt_handle_kick(struct vhost_work *work) 1395 { 1396 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1397 poll.work); 1398 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 1399 1400 mutex_lock(&vq->mutex); 1401 if (!vhost_vq_get_backend(vq)) 1402 goto out; 1403 1404 if (vs->vs_events_missed) 1405 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); 1406 out: 1407 mutex_unlock(&vq->mutex); 1408 } 1409 1410 static void vhost_scsi_handle_kick(struct vhost_work *work) 1411 { 1412 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1413 poll.work); 1414 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 1415 1416 vhost_scsi_handle_vq(vs, vq); 1417 } 1418 1419 /* Callers must hold dev mutex */ 1420 static void vhost_scsi_flush(struct vhost_scsi *vs) 1421 { 1422 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ]; 1423 int i; 1424 1425 /* Init new inflight and remember the old inflight */ 1426 vhost_scsi_init_inflight(vs, old_inflight); 1427 1428 /* 1429 * The inflight->kref was initialized to 1. We decrement it here to 1430 * indicate the start of the flush operation so that it will reach 0 1431 * when all the reqs are finished. 1432 */ 1433 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1434 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight); 1435 1436 /* Flush both the vhost poll and vhost work */ 1437 vhost_dev_flush(&vs->dev); 1438 1439 /* Wait for all reqs issued before the flush to be finished */ 1440 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1441 wait_for_completion(&old_inflight[i]->comp); 1442 } 1443 1444 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq) 1445 { 1446 struct vhost_scsi_virtqueue *svq = container_of(vq, 1447 struct vhost_scsi_virtqueue, vq); 1448 struct vhost_scsi_cmd *tv_cmd; 1449 unsigned int i; 1450 1451 if (!svq->scsi_cmds) 1452 return; 1453 1454 for (i = 0; i < svq->max_cmds; i++) { 1455 tv_cmd = &svq->scsi_cmds[i]; 1456 1457 kfree(tv_cmd->tvc_sgl); 1458 kfree(tv_cmd->tvc_prot_sgl); 1459 kfree(tv_cmd->tvc_upages); 1460 } 1461 1462 sbitmap_free(&svq->scsi_tags); 1463 kfree(svq->scsi_cmds); 1464 svq->scsi_cmds = NULL; 1465 } 1466 1467 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) 1468 { 1469 struct vhost_scsi_virtqueue *svq = container_of(vq, 1470 struct vhost_scsi_virtqueue, vq); 1471 struct vhost_scsi_cmd *tv_cmd; 1472 unsigned int i; 1473 1474 if (svq->scsi_cmds) 1475 return 0; 1476 1477 if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL, 1478 NUMA_NO_NODE, false, true)) 1479 return -ENOMEM; 1480 svq->max_cmds = max_cmds; 1481 1482 svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL); 1483 if (!svq->scsi_cmds) { 1484 sbitmap_free(&svq->scsi_tags); 1485 return -ENOMEM; 1486 } 1487 1488 for (i = 0; i < max_cmds; i++) { 1489 tv_cmd = &svq->scsi_cmds[i]; 1490 1491 tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS, 1492 sizeof(struct scatterlist), 1493 GFP_KERNEL); 1494 if (!tv_cmd->tvc_sgl) { 1495 pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); 1496 goto out; 1497 } 1498 1499 tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, 1500 sizeof(struct page *), 1501 GFP_KERNEL); 1502 if (!tv_cmd->tvc_upages) { 1503 pr_err("Unable to allocate tv_cmd->tvc_upages\n"); 1504 goto out; 1505 } 1506 1507 tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS, 1508 sizeof(struct scatterlist), 1509 GFP_KERNEL); 1510 if (!tv_cmd->tvc_prot_sgl) { 1511 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); 1512 goto out; 1513 } 1514 } 1515 return 0; 1516 out: 1517 vhost_scsi_destroy_vq_cmds(vq); 1518 return -ENOMEM; 1519 } 1520 1521 /* 1522 * Called from vhost_scsi_ioctl() context to walk the list of available 1523 * vhost_scsi_tpg with an active struct vhost_scsi_nexus 1524 * 1525 * The lock nesting rule is: 1526 * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex 1527 */ 1528 static int 1529 vhost_scsi_set_endpoint(struct vhost_scsi *vs, 1530 struct vhost_scsi_target *t) 1531 { 1532 struct se_portal_group *se_tpg; 1533 struct vhost_scsi_tport *tv_tport; 1534 struct vhost_scsi_tpg *tpg; 1535 struct vhost_scsi_tpg **vs_tpg; 1536 struct vhost_virtqueue *vq; 1537 int index, ret, i, len; 1538 bool match = false; 1539 1540 mutex_lock(&vhost_scsi_mutex); 1541 mutex_lock(&vs->dev.mutex); 1542 1543 /* Verify that ring has been setup correctly. */ 1544 for (index = 0; index < vs->dev.nvqs; ++index) { 1545 /* Verify that ring has been setup correctly. */ 1546 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { 1547 ret = -EFAULT; 1548 goto out; 1549 } 1550 } 1551 1552 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; 1553 vs_tpg = kzalloc(len, GFP_KERNEL); 1554 if (!vs_tpg) { 1555 ret = -ENOMEM; 1556 goto out; 1557 } 1558 if (vs->vs_tpg) 1559 memcpy(vs_tpg, vs->vs_tpg, len); 1560 1561 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { 1562 mutex_lock(&tpg->tv_tpg_mutex); 1563 if (!tpg->tpg_nexus) { 1564 mutex_unlock(&tpg->tv_tpg_mutex); 1565 continue; 1566 } 1567 if (tpg->tv_tpg_vhost_count != 0) { 1568 mutex_unlock(&tpg->tv_tpg_mutex); 1569 continue; 1570 } 1571 tv_tport = tpg->tport; 1572 1573 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 1574 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { 1575 mutex_unlock(&tpg->tv_tpg_mutex); 1576 ret = -EEXIST; 1577 goto undepend; 1578 } 1579 /* 1580 * In order to ensure individual vhost-scsi configfs 1581 * groups cannot be removed while in use by vhost ioctl, 1582 * go ahead and take an explicit se_tpg->tpg_group.cg_item 1583 * dependency now. 1584 */ 1585 se_tpg = &tpg->se_tpg; 1586 ret = target_depend_item(&se_tpg->tpg_group.cg_item); 1587 if (ret) { 1588 pr_warn("target_depend_item() failed: %d\n", ret); 1589 mutex_unlock(&tpg->tv_tpg_mutex); 1590 goto undepend; 1591 } 1592 tpg->tv_tpg_vhost_count++; 1593 tpg->vhost_scsi = vs; 1594 vs_tpg[tpg->tport_tpgt] = tpg; 1595 match = true; 1596 } 1597 mutex_unlock(&tpg->tv_tpg_mutex); 1598 } 1599 1600 if (match) { 1601 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, 1602 sizeof(vs->vs_vhost_wwpn)); 1603 1604 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) { 1605 vq = &vs->vqs[i].vq; 1606 if (!vhost_vq_is_setup(vq)) 1607 continue; 1608 1609 ret = vhost_scsi_setup_vq_cmds(vq, vq->num); 1610 if (ret) 1611 goto destroy_vq_cmds; 1612 } 1613 1614 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1615 vq = &vs->vqs[i].vq; 1616 mutex_lock(&vq->mutex); 1617 vhost_vq_set_backend(vq, vs_tpg); 1618 vhost_vq_init_access(vq); 1619 mutex_unlock(&vq->mutex); 1620 } 1621 ret = 0; 1622 } else { 1623 ret = -EEXIST; 1624 } 1625 1626 /* 1627 * Act as synchronize_rcu to make sure access to 1628 * old vs->vs_tpg is finished. 1629 */ 1630 vhost_scsi_flush(vs); 1631 kfree(vs->vs_tpg); 1632 vs->vs_tpg = vs_tpg; 1633 goto out; 1634 1635 destroy_vq_cmds: 1636 for (i--; i >= VHOST_SCSI_VQ_IO; i--) { 1637 if (!vhost_vq_get_backend(&vs->vqs[i].vq)) 1638 vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq); 1639 } 1640 undepend: 1641 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 1642 tpg = vs_tpg[i]; 1643 if (tpg) { 1644 tpg->tv_tpg_vhost_count--; 1645 target_undepend_item(&tpg->se_tpg.tpg_group.cg_item); 1646 } 1647 } 1648 kfree(vs_tpg); 1649 out: 1650 mutex_unlock(&vs->dev.mutex); 1651 mutex_unlock(&vhost_scsi_mutex); 1652 return ret; 1653 } 1654 1655 static int 1656 vhost_scsi_clear_endpoint(struct vhost_scsi *vs, 1657 struct vhost_scsi_target *t) 1658 { 1659 struct se_portal_group *se_tpg; 1660 struct vhost_scsi_tport *tv_tport; 1661 struct vhost_scsi_tpg *tpg; 1662 struct vhost_virtqueue *vq; 1663 bool match = false; 1664 int index, ret, i; 1665 u8 target; 1666 1667 mutex_lock(&vhost_scsi_mutex); 1668 mutex_lock(&vs->dev.mutex); 1669 /* Verify that ring has been setup correctly. */ 1670 for (index = 0; index < vs->dev.nvqs; ++index) { 1671 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { 1672 ret = -EFAULT; 1673 goto err_dev; 1674 } 1675 } 1676 1677 if (!vs->vs_tpg) { 1678 ret = 0; 1679 goto err_dev; 1680 } 1681 1682 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 1683 target = i; 1684 tpg = vs->vs_tpg[target]; 1685 if (!tpg) 1686 continue; 1687 1688 mutex_lock(&tpg->tv_tpg_mutex); 1689 tv_tport = tpg->tport; 1690 if (!tv_tport) { 1691 ret = -ENODEV; 1692 goto err_tpg; 1693 } 1694 1695 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 1696 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu" 1697 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", 1698 tv_tport->tport_name, tpg->tport_tpgt, 1699 t->vhost_wwpn, t->vhost_tpgt); 1700 ret = -EINVAL; 1701 goto err_tpg; 1702 } 1703 tpg->tv_tpg_vhost_count--; 1704 tpg->vhost_scsi = NULL; 1705 vs->vs_tpg[target] = NULL; 1706 match = true; 1707 mutex_unlock(&tpg->tv_tpg_mutex); 1708 /* 1709 * Release se_tpg->tpg_group.cg_item configfs dependency now 1710 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. 1711 */ 1712 se_tpg = &tpg->se_tpg; 1713 target_undepend_item(&se_tpg->tpg_group.cg_item); 1714 } 1715 if (match) { 1716 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1717 vq = &vs->vqs[i].vq; 1718 mutex_lock(&vq->mutex); 1719 vhost_vq_set_backend(vq, NULL); 1720 mutex_unlock(&vq->mutex); 1721 } 1722 /* Make sure cmds are not running before tearing them down. */ 1723 vhost_scsi_flush(vs); 1724 1725 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1726 vq = &vs->vqs[i].vq; 1727 vhost_scsi_destroy_vq_cmds(vq); 1728 } 1729 } 1730 /* 1731 * Act as synchronize_rcu to make sure access to 1732 * old vs->vs_tpg is finished. 1733 */ 1734 vhost_scsi_flush(vs); 1735 kfree(vs->vs_tpg); 1736 vs->vs_tpg = NULL; 1737 WARN_ON(vs->vs_events_nr); 1738 mutex_unlock(&vs->dev.mutex); 1739 mutex_unlock(&vhost_scsi_mutex); 1740 return 0; 1741 1742 err_tpg: 1743 mutex_unlock(&tpg->tv_tpg_mutex); 1744 err_dev: 1745 mutex_unlock(&vs->dev.mutex); 1746 mutex_unlock(&vhost_scsi_mutex); 1747 return ret; 1748 } 1749 1750 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) 1751 { 1752 struct vhost_virtqueue *vq; 1753 int i; 1754 1755 if (features & ~VHOST_SCSI_FEATURES) 1756 return -EOPNOTSUPP; 1757 1758 mutex_lock(&vs->dev.mutex); 1759 if ((features & (1 << VHOST_F_LOG_ALL)) && 1760 !vhost_log_access_ok(&vs->dev)) { 1761 mutex_unlock(&vs->dev.mutex); 1762 return -EFAULT; 1763 } 1764 1765 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1766 vq = &vs->vqs[i].vq; 1767 mutex_lock(&vq->mutex); 1768 vq->acked_features = features; 1769 mutex_unlock(&vq->mutex); 1770 } 1771 mutex_unlock(&vs->dev.mutex); 1772 return 0; 1773 } 1774 1775 static int vhost_scsi_open(struct inode *inode, struct file *f) 1776 { 1777 struct vhost_scsi *vs; 1778 struct vhost_virtqueue **vqs; 1779 int r = -ENOMEM, i; 1780 1781 vs = kvzalloc(sizeof(*vs), GFP_KERNEL); 1782 if (!vs) 1783 goto err_vs; 1784 1785 vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL); 1786 if (!vqs) 1787 goto err_vqs; 1788 1789 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); 1790 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); 1791 1792 vs->vs_events_nr = 0; 1793 vs->vs_events_missed = false; 1794 1795 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; 1796 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1797 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; 1798 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; 1799 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) { 1800 vqs[i] = &vs->vqs[i].vq; 1801 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1802 } 1803 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV, 1804 VHOST_SCSI_WEIGHT, 0, true, NULL); 1805 1806 vhost_scsi_init_inflight(vs, NULL); 1807 1808 f->private_data = vs; 1809 return 0; 1810 1811 err_vqs: 1812 kvfree(vs); 1813 err_vs: 1814 return r; 1815 } 1816 1817 static int vhost_scsi_release(struct inode *inode, struct file *f) 1818 { 1819 struct vhost_scsi *vs = f->private_data; 1820 struct vhost_scsi_target t; 1821 1822 mutex_lock(&vs->dev.mutex); 1823 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); 1824 mutex_unlock(&vs->dev.mutex); 1825 vhost_scsi_clear_endpoint(vs, &t); 1826 vhost_dev_stop(&vs->dev); 1827 vhost_dev_cleanup(&vs->dev); 1828 kfree(vs->dev.vqs); 1829 kvfree(vs); 1830 return 0; 1831 } 1832 1833 static long 1834 vhost_scsi_ioctl(struct file *f, 1835 unsigned int ioctl, 1836 unsigned long arg) 1837 { 1838 struct vhost_scsi *vs = f->private_data; 1839 struct vhost_scsi_target backend; 1840 void __user *argp = (void __user *)arg; 1841 u64 __user *featurep = argp; 1842 u32 __user *eventsp = argp; 1843 u32 events_missed; 1844 u64 features; 1845 int r, abi_version = VHOST_SCSI_ABI_VERSION; 1846 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1847 1848 switch (ioctl) { 1849 case VHOST_SCSI_SET_ENDPOINT: 1850 if (copy_from_user(&backend, argp, sizeof backend)) 1851 return -EFAULT; 1852 if (backend.reserved != 0) 1853 return -EOPNOTSUPP; 1854 1855 return vhost_scsi_set_endpoint(vs, &backend); 1856 case VHOST_SCSI_CLEAR_ENDPOINT: 1857 if (copy_from_user(&backend, argp, sizeof backend)) 1858 return -EFAULT; 1859 if (backend.reserved != 0) 1860 return -EOPNOTSUPP; 1861 1862 return vhost_scsi_clear_endpoint(vs, &backend); 1863 case VHOST_SCSI_GET_ABI_VERSION: 1864 if (copy_to_user(argp, &abi_version, sizeof abi_version)) 1865 return -EFAULT; 1866 return 0; 1867 case VHOST_SCSI_SET_EVENTS_MISSED: 1868 if (get_user(events_missed, eventsp)) 1869 return -EFAULT; 1870 mutex_lock(&vq->mutex); 1871 vs->vs_events_missed = events_missed; 1872 mutex_unlock(&vq->mutex); 1873 return 0; 1874 case VHOST_SCSI_GET_EVENTS_MISSED: 1875 mutex_lock(&vq->mutex); 1876 events_missed = vs->vs_events_missed; 1877 mutex_unlock(&vq->mutex); 1878 if (put_user(events_missed, eventsp)) 1879 return -EFAULT; 1880 return 0; 1881 case VHOST_GET_FEATURES: 1882 features = VHOST_SCSI_FEATURES; 1883 if (copy_to_user(featurep, &features, sizeof features)) 1884 return -EFAULT; 1885 return 0; 1886 case VHOST_SET_FEATURES: 1887 if (copy_from_user(&features, featurep, sizeof features)) 1888 return -EFAULT; 1889 return vhost_scsi_set_features(vs, features); 1890 default: 1891 mutex_lock(&vs->dev.mutex); 1892 r = vhost_dev_ioctl(&vs->dev, ioctl, argp); 1893 /* TODO: flush backend after dev ioctl. */ 1894 if (r == -ENOIOCTLCMD) 1895 r = vhost_vring_ioctl(&vs->dev, ioctl, argp); 1896 mutex_unlock(&vs->dev.mutex); 1897 return r; 1898 } 1899 } 1900 1901 static const struct file_operations vhost_scsi_fops = { 1902 .owner = THIS_MODULE, 1903 .release = vhost_scsi_release, 1904 .unlocked_ioctl = vhost_scsi_ioctl, 1905 .compat_ioctl = compat_ptr_ioctl, 1906 .open = vhost_scsi_open, 1907 .llseek = noop_llseek, 1908 }; 1909 1910 static struct miscdevice vhost_scsi_misc = { 1911 MISC_DYNAMIC_MINOR, 1912 "vhost-scsi", 1913 &vhost_scsi_fops, 1914 }; 1915 1916 static int __init vhost_scsi_register(void) 1917 { 1918 return misc_register(&vhost_scsi_misc); 1919 } 1920 1921 static void vhost_scsi_deregister(void) 1922 { 1923 misc_deregister(&vhost_scsi_misc); 1924 } 1925 1926 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport) 1927 { 1928 switch (tport->tport_proto_id) { 1929 case SCSI_PROTOCOL_SAS: 1930 return "SAS"; 1931 case SCSI_PROTOCOL_FCP: 1932 return "FCP"; 1933 case SCSI_PROTOCOL_ISCSI: 1934 return "iSCSI"; 1935 default: 1936 break; 1937 } 1938 1939 return "Unknown"; 1940 } 1941 1942 static void 1943 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg, 1944 struct se_lun *lun, bool plug) 1945 { 1946 1947 struct vhost_scsi *vs = tpg->vhost_scsi; 1948 struct vhost_virtqueue *vq; 1949 u32 reason; 1950 1951 if (!vs) 1952 return; 1953 1954 mutex_lock(&vs->dev.mutex); 1955 1956 if (plug) 1957 reason = VIRTIO_SCSI_EVT_RESET_RESCAN; 1958 else 1959 reason = VIRTIO_SCSI_EVT_RESET_REMOVED; 1960 1961 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1962 mutex_lock(&vq->mutex); 1963 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) 1964 vhost_scsi_send_evt(vs, tpg, lun, 1965 VIRTIO_SCSI_T_TRANSPORT_RESET, reason); 1966 mutex_unlock(&vq->mutex); 1967 mutex_unlock(&vs->dev.mutex); 1968 } 1969 1970 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) 1971 { 1972 vhost_scsi_do_plug(tpg, lun, true); 1973 } 1974 1975 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) 1976 { 1977 vhost_scsi_do_plug(tpg, lun, false); 1978 } 1979 1980 static int vhost_scsi_port_link(struct se_portal_group *se_tpg, 1981 struct se_lun *lun) 1982 { 1983 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 1984 struct vhost_scsi_tpg, se_tpg); 1985 struct vhost_scsi_tmf *tmf; 1986 1987 tmf = kzalloc(sizeof(*tmf), GFP_KERNEL); 1988 if (!tmf) 1989 return -ENOMEM; 1990 INIT_LIST_HEAD(&tmf->queue_entry); 1991 vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work); 1992 1993 mutex_lock(&vhost_scsi_mutex); 1994 1995 mutex_lock(&tpg->tv_tpg_mutex); 1996 tpg->tv_tpg_port_count++; 1997 list_add_tail(&tmf->queue_entry, &tpg->tmf_queue); 1998 mutex_unlock(&tpg->tv_tpg_mutex); 1999 2000 vhost_scsi_hotplug(tpg, lun); 2001 2002 mutex_unlock(&vhost_scsi_mutex); 2003 2004 return 0; 2005 } 2006 2007 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg, 2008 struct se_lun *lun) 2009 { 2010 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 2011 struct vhost_scsi_tpg, se_tpg); 2012 struct vhost_scsi_tmf *tmf; 2013 2014 mutex_lock(&vhost_scsi_mutex); 2015 2016 mutex_lock(&tpg->tv_tpg_mutex); 2017 tpg->tv_tpg_port_count--; 2018 tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf, 2019 queue_entry); 2020 list_del(&tmf->queue_entry); 2021 kfree(tmf); 2022 mutex_unlock(&tpg->tv_tpg_mutex); 2023 2024 vhost_scsi_hotunplug(tpg, lun); 2025 2026 mutex_unlock(&vhost_scsi_mutex); 2027 } 2028 2029 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store( 2030 struct config_item *item, const char *page, size_t count) 2031 { 2032 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2033 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 2034 struct vhost_scsi_tpg, se_tpg); 2035 unsigned long val; 2036 int ret = kstrtoul(page, 0, &val); 2037 2038 if (ret) { 2039 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 2040 return ret; 2041 } 2042 if (val != 0 && val != 1 && val != 3) { 2043 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val); 2044 return -EINVAL; 2045 } 2046 tpg->tv_fabric_prot_type = val; 2047 2048 return count; 2049 } 2050 2051 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show( 2052 struct config_item *item, char *page) 2053 { 2054 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2055 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 2056 struct vhost_scsi_tpg, se_tpg); 2057 2058 return sprintf(page, "%d\n", tpg->tv_fabric_prot_type); 2059 } 2060 2061 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type); 2062 2063 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = { 2064 &vhost_scsi_tpg_attrib_attr_fabric_prot_type, 2065 NULL, 2066 }; 2067 2068 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, 2069 const char *name) 2070 { 2071 struct vhost_scsi_nexus *tv_nexus; 2072 2073 mutex_lock(&tpg->tv_tpg_mutex); 2074 if (tpg->tpg_nexus) { 2075 mutex_unlock(&tpg->tv_tpg_mutex); 2076 pr_debug("tpg->tpg_nexus already exists\n"); 2077 return -EEXIST; 2078 } 2079 2080 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL); 2081 if (!tv_nexus) { 2082 mutex_unlock(&tpg->tv_tpg_mutex); 2083 pr_err("Unable to allocate struct vhost_scsi_nexus\n"); 2084 return -ENOMEM; 2085 } 2086 /* 2087 * Since we are running in 'demo mode' this call with generate a 2088 * struct se_node_acl for the vhost_scsi struct se_portal_group with 2089 * the SCSI Initiator port name of the passed configfs group 'name'. 2090 */ 2091 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0, 2092 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, 2093 (unsigned char *)name, tv_nexus, NULL); 2094 if (IS_ERR(tv_nexus->tvn_se_sess)) { 2095 mutex_unlock(&tpg->tv_tpg_mutex); 2096 kfree(tv_nexus); 2097 return -ENOMEM; 2098 } 2099 tpg->tpg_nexus = tv_nexus; 2100 2101 mutex_unlock(&tpg->tv_tpg_mutex); 2102 return 0; 2103 } 2104 2105 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg) 2106 { 2107 struct se_session *se_sess; 2108 struct vhost_scsi_nexus *tv_nexus; 2109 2110 mutex_lock(&tpg->tv_tpg_mutex); 2111 tv_nexus = tpg->tpg_nexus; 2112 if (!tv_nexus) { 2113 mutex_unlock(&tpg->tv_tpg_mutex); 2114 return -ENODEV; 2115 } 2116 2117 se_sess = tv_nexus->tvn_se_sess; 2118 if (!se_sess) { 2119 mutex_unlock(&tpg->tv_tpg_mutex); 2120 return -ENODEV; 2121 } 2122 2123 if (tpg->tv_tpg_port_count != 0) { 2124 mutex_unlock(&tpg->tv_tpg_mutex); 2125 pr_err("Unable to remove TCM_vhost I_T Nexus with" 2126 " active TPG port count: %d\n", 2127 tpg->tv_tpg_port_count); 2128 return -EBUSY; 2129 } 2130 2131 if (tpg->tv_tpg_vhost_count != 0) { 2132 mutex_unlock(&tpg->tv_tpg_mutex); 2133 pr_err("Unable to remove TCM_vhost I_T Nexus with" 2134 " active TPG vhost count: %d\n", 2135 tpg->tv_tpg_vhost_count); 2136 return -EBUSY; 2137 } 2138 2139 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" 2140 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport), 2141 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 2142 2143 /* 2144 * Release the SCSI I_T Nexus to the emulated vhost Target Port 2145 */ 2146 target_remove_session(se_sess); 2147 tpg->tpg_nexus = NULL; 2148 mutex_unlock(&tpg->tv_tpg_mutex); 2149 2150 kfree(tv_nexus); 2151 return 0; 2152 } 2153 2154 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page) 2155 { 2156 struct se_portal_group *se_tpg = to_tpg(item); 2157 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 2158 struct vhost_scsi_tpg, se_tpg); 2159 struct vhost_scsi_nexus *tv_nexus; 2160 ssize_t ret; 2161 2162 mutex_lock(&tpg->tv_tpg_mutex); 2163 tv_nexus = tpg->tpg_nexus; 2164 if (!tv_nexus) { 2165 mutex_unlock(&tpg->tv_tpg_mutex); 2166 return -ENODEV; 2167 } 2168 ret = snprintf(page, PAGE_SIZE, "%s\n", 2169 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 2170 mutex_unlock(&tpg->tv_tpg_mutex); 2171 2172 return ret; 2173 } 2174 2175 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item, 2176 const char *page, size_t count) 2177 { 2178 struct se_portal_group *se_tpg = to_tpg(item); 2179 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 2180 struct vhost_scsi_tpg, se_tpg); 2181 struct vhost_scsi_tport *tport_wwn = tpg->tport; 2182 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr; 2183 int ret; 2184 /* 2185 * Shutdown the active I_T nexus if 'NULL' is passed.. 2186 */ 2187 if (!strncmp(page, "NULL", 4)) { 2188 ret = vhost_scsi_drop_nexus(tpg); 2189 return (!ret) ? count : ret; 2190 } 2191 /* 2192 * Otherwise make sure the passed virtual Initiator port WWN matches 2193 * the fabric protocol_id set in vhost_scsi_make_tport(), and call 2194 * vhost_scsi_make_nexus(). 2195 */ 2196 if (strlen(page) >= VHOST_SCSI_NAMELEN) { 2197 pr_err("Emulated NAA Sas Address: %s, exceeds" 2198 " max: %d\n", page, VHOST_SCSI_NAMELEN); 2199 return -EINVAL; 2200 } 2201 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page); 2202 2203 ptr = strstr(i_port, "naa."); 2204 if (ptr) { 2205 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { 2206 pr_err("Passed SAS Initiator Port %s does not" 2207 " match target port protoid: %s\n", i_port, 2208 vhost_scsi_dump_proto_id(tport_wwn)); 2209 return -EINVAL; 2210 } 2211 port_ptr = &i_port[0]; 2212 goto check_newline; 2213 } 2214 ptr = strstr(i_port, "fc."); 2215 if (ptr) { 2216 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { 2217 pr_err("Passed FCP Initiator Port %s does not" 2218 " match target port protoid: %s\n", i_port, 2219 vhost_scsi_dump_proto_id(tport_wwn)); 2220 return -EINVAL; 2221 } 2222 port_ptr = &i_port[3]; /* Skip over "fc." */ 2223 goto check_newline; 2224 } 2225 ptr = strstr(i_port, "iqn."); 2226 if (ptr) { 2227 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { 2228 pr_err("Passed iSCSI Initiator Port %s does not" 2229 " match target port protoid: %s\n", i_port, 2230 vhost_scsi_dump_proto_id(tport_wwn)); 2231 return -EINVAL; 2232 } 2233 port_ptr = &i_port[0]; 2234 goto check_newline; 2235 } 2236 pr_err("Unable to locate prefix for emulated Initiator Port:" 2237 " %s\n", i_port); 2238 return -EINVAL; 2239 /* 2240 * Clear any trailing newline for the NAA WWN 2241 */ 2242 check_newline: 2243 if (i_port[strlen(i_port)-1] == '\n') 2244 i_port[strlen(i_port)-1] = '\0'; 2245 2246 ret = vhost_scsi_make_nexus(tpg, port_ptr); 2247 if (ret < 0) 2248 return ret; 2249 2250 return count; 2251 } 2252 2253 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus); 2254 2255 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = { 2256 &vhost_scsi_tpg_attr_nexus, 2257 NULL, 2258 }; 2259 2260 static struct se_portal_group * 2261 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name) 2262 { 2263 struct vhost_scsi_tport *tport = container_of(wwn, 2264 struct vhost_scsi_tport, tport_wwn); 2265 2266 struct vhost_scsi_tpg *tpg; 2267 u16 tpgt; 2268 int ret; 2269 2270 if (strstr(name, "tpgt_") != name) 2271 return ERR_PTR(-EINVAL); 2272 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET) 2273 return ERR_PTR(-EINVAL); 2274 2275 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 2276 if (!tpg) { 2277 pr_err("Unable to allocate struct vhost_scsi_tpg"); 2278 return ERR_PTR(-ENOMEM); 2279 } 2280 mutex_init(&tpg->tv_tpg_mutex); 2281 INIT_LIST_HEAD(&tpg->tv_tpg_list); 2282 INIT_LIST_HEAD(&tpg->tmf_queue); 2283 tpg->tport = tport; 2284 tpg->tport_tpgt = tpgt; 2285 2286 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id); 2287 if (ret < 0) { 2288 kfree(tpg); 2289 return NULL; 2290 } 2291 mutex_lock(&vhost_scsi_mutex); 2292 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list); 2293 mutex_unlock(&vhost_scsi_mutex); 2294 2295 return &tpg->se_tpg; 2296 } 2297 2298 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg) 2299 { 2300 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 2301 struct vhost_scsi_tpg, se_tpg); 2302 2303 mutex_lock(&vhost_scsi_mutex); 2304 list_del(&tpg->tv_tpg_list); 2305 mutex_unlock(&vhost_scsi_mutex); 2306 /* 2307 * Release the virtual I_T Nexus for this vhost TPG 2308 */ 2309 vhost_scsi_drop_nexus(tpg); 2310 /* 2311 * Deregister the se_tpg from TCM.. 2312 */ 2313 core_tpg_deregister(se_tpg); 2314 kfree(tpg); 2315 } 2316 2317 static struct se_wwn * 2318 vhost_scsi_make_tport(struct target_fabric_configfs *tf, 2319 struct config_group *group, 2320 const char *name) 2321 { 2322 struct vhost_scsi_tport *tport; 2323 char *ptr; 2324 u64 wwpn = 0; 2325 int off = 0; 2326 2327 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) 2328 return ERR_PTR(-EINVAL); */ 2329 2330 tport = kzalloc(sizeof(*tport), GFP_KERNEL); 2331 if (!tport) { 2332 pr_err("Unable to allocate struct vhost_scsi_tport"); 2333 return ERR_PTR(-ENOMEM); 2334 } 2335 tport->tport_wwpn = wwpn; 2336 /* 2337 * Determine the emulated Protocol Identifier and Target Port Name 2338 * based on the incoming configfs directory name. 2339 */ 2340 ptr = strstr(name, "naa."); 2341 if (ptr) { 2342 tport->tport_proto_id = SCSI_PROTOCOL_SAS; 2343 goto check_len; 2344 } 2345 ptr = strstr(name, "fc."); 2346 if (ptr) { 2347 tport->tport_proto_id = SCSI_PROTOCOL_FCP; 2348 off = 3; /* Skip over "fc." */ 2349 goto check_len; 2350 } 2351 ptr = strstr(name, "iqn."); 2352 if (ptr) { 2353 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; 2354 goto check_len; 2355 } 2356 2357 pr_err("Unable to locate prefix for emulated Target Port:" 2358 " %s\n", name); 2359 kfree(tport); 2360 return ERR_PTR(-EINVAL); 2361 2362 check_len: 2363 if (strlen(name) >= VHOST_SCSI_NAMELEN) { 2364 pr_err("Emulated %s Address: %s, exceeds" 2365 " max: %d\n", name, vhost_scsi_dump_proto_id(tport), 2366 VHOST_SCSI_NAMELEN); 2367 kfree(tport); 2368 return ERR_PTR(-EINVAL); 2369 } 2370 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]); 2371 2372 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" 2373 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name); 2374 2375 return &tport->tport_wwn; 2376 } 2377 2378 static void vhost_scsi_drop_tport(struct se_wwn *wwn) 2379 { 2380 struct vhost_scsi_tport *tport = container_of(wwn, 2381 struct vhost_scsi_tport, tport_wwn); 2382 2383 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" 2384 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), 2385 tport->tport_name); 2386 2387 kfree(tport); 2388 } 2389 2390 static ssize_t 2391 vhost_scsi_wwn_version_show(struct config_item *item, char *page) 2392 { 2393 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" 2394 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, 2395 utsname()->machine); 2396 } 2397 2398 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version); 2399 2400 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { 2401 &vhost_scsi_wwn_attr_version, 2402 NULL, 2403 }; 2404 2405 static const struct target_core_fabric_ops vhost_scsi_ops = { 2406 .module = THIS_MODULE, 2407 .fabric_name = "vhost", 2408 .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS, 2409 .tpg_get_wwn = vhost_scsi_get_fabric_wwn, 2410 .tpg_get_tag = vhost_scsi_get_tpgt, 2411 .tpg_check_demo_mode = vhost_scsi_check_true, 2412 .tpg_check_demo_mode_cache = vhost_scsi_check_true, 2413 .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, 2414 .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, 2415 .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only, 2416 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, 2417 .release_cmd = vhost_scsi_release_cmd, 2418 .check_stop_free = vhost_scsi_check_stop_free, 2419 .sess_get_index = vhost_scsi_sess_get_index, 2420 .sess_get_initiator_sid = NULL, 2421 .write_pending = vhost_scsi_write_pending, 2422 .set_default_node_attributes = vhost_scsi_set_default_node_attrs, 2423 .get_cmd_state = vhost_scsi_get_cmd_state, 2424 .queue_data_in = vhost_scsi_queue_data_in, 2425 .queue_status = vhost_scsi_queue_status, 2426 .queue_tm_rsp = vhost_scsi_queue_tm_rsp, 2427 .aborted_task = vhost_scsi_aborted_task, 2428 /* 2429 * Setup callers for generic logic in target_core_fabric_configfs.c 2430 */ 2431 .fabric_make_wwn = vhost_scsi_make_tport, 2432 .fabric_drop_wwn = vhost_scsi_drop_tport, 2433 .fabric_make_tpg = vhost_scsi_make_tpg, 2434 .fabric_drop_tpg = vhost_scsi_drop_tpg, 2435 .fabric_post_link = vhost_scsi_port_link, 2436 .fabric_pre_unlink = vhost_scsi_port_unlink, 2437 2438 .tfc_wwn_attrs = vhost_scsi_wwn_attrs, 2439 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs, 2440 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs, 2441 }; 2442 2443 static int __init vhost_scsi_init(void) 2444 { 2445 int ret = -ENOMEM; 2446 2447 pr_debug("TCM_VHOST fabric module %s on %s/%s" 2448 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, 2449 utsname()->machine); 2450 2451 ret = vhost_scsi_register(); 2452 if (ret < 0) 2453 goto out; 2454 2455 ret = target_register_template(&vhost_scsi_ops); 2456 if (ret < 0) 2457 goto out_vhost_scsi_deregister; 2458 2459 return 0; 2460 2461 out_vhost_scsi_deregister: 2462 vhost_scsi_deregister(); 2463 out: 2464 return ret; 2465 }; 2466 2467 static void vhost_scsi_exit(void) 2468 { 2469 target_unregister_template(&vhost_scsi_ops); 2470 vhost_scsi_deregister(); 2471 }; 2472 2473 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); 2474 MODULE_ALIAS("tcm_vhost"); 2475 MODULE_LICENSE("GPL"); 2476 module_init(vhost_scsi_init); 2477 module_exit(vhost_scsi_exit); 2478