1 /******************************************************************************* 2 * Vhost kernel TCM fabric driver for virtio SCSI initiators 3 * 4 * (C) Copyright 2010-2013 Datera, Inc. 5 * (C) Copyright 2010-2012 IBM Corp. 6 * 7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 8 * 9 * Authors: Nicholas A. Bellinger <nab@daterainc.com> 10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 ****************************************************************************/ 23 24 #include <linux/module.h> 25 #include <linux/moduleparam.h> 26 #include <generated/utsrelease.h> 27 #include <linux/utsname.h> 28 #include <linux/init.h> 29 #include <linux/slab.h> 30 #include <linux/kthread.h> 31 #include <linux/types.h> 32 #include <linux/string.h> 33 #include <linux/configfs.h> 34 #include <linux/ctype.h> 35 #include <linux/compat.h> 36 #include <linux/eventfd.h> 37 #include <linux/fs.h> 38 #include <linux/miscdevice.h> 39 #include <asm/unaligned.h> 40 #include <scsi/scsi.h> 41 #include <scsi/scsi_tcq.h> 42 #include <target/target_core_base.h> 43 #include <target/target_core_fabric.h> 44 #include <target/target_core_fabric_configfs.h> 45 #include <target/target_core_configfs.h> 46 #include <target/configfs_macros.h> 47 #include <linux/vhost.h> 48 #include <linux/virtio_scsi.h> 49 #include <linux/llist.h> 50 #include <linux/bitmap.h> 51 #include <linux/percpu_ida.h> 52 53 #include "vhost.h" 54 55 #define TCM_VHOST_VERSION "v0.1" 56 #define TCM_VHOST_NAMELEN 256 57 #define TCM_VHOST_MAX_CDB_SIZE 32 58 #define TCM_VHOST_DEFAULT_TAGS 256 59 #define TCM_VHOST_PREALLOC_SGLS 2048 60 #define TCM_VHOST_PREALLOC_UPAGES 2048 61 #define TCM_VHOST_PREALLOC_PROT_SGLS 512 62 63 struct vhost_scsi_inflight { 64 /* Wait for the flush operation to finish */ 65 struct completion comp; 66 /* Refcount for the inflight reqs */ 67 struct kref kref; 68 }; 69 70 struct tcm_vhost_cmd { 71 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ 72 int tvc_vq_desc; 73 /* virtio-scsi initiator task attribute */ 74 int tvc_task_attr; 75 /* virtio-scsi initiator data direction */ 76 enum dma_data_direction tvc_data_direction; 77 /* Expected data transfer length from virtio-scsi header */ 78 u32 tvc_exp_data_len; 79 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ 80 u64 tvc_tag; 81 /* The number of scatterlists associated with this cmd */ 82 u32 tvc_sgl_count; 83 u32 tvc_prot_sgl_count; 84 /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ 85 u32 tvc_lun; 86 /* Pointer to the SGL formatted memory from virtio-scsi */ 87 struct scatterlist *tvc_sgl; 88 struct scatterlist *tvc_prot_sgl; 89 struct page **tvc_upages; 90 /* Pointer to response */ 91 struct virtio_scsi_cmd_resp __user *tvc_resp; 92 /* Pointer to vhost_scsi for our device */ 93 struct vhost_scsi *tvc_vhost; 94 /* Pointer to vhost_virtqueue for the cmd */ 95 struct vhost_virtqueue *tvc_vq; 96 /* Pointer to vhost nexus memory */ 97 struct tcm_vhost_nexus *tvc_nexus; 98 /* The TCM I/O descriptor that is accessed via container_of() */ 99 struct se_cmd tvc_se_cmd; 100 /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ 101 struct work_struct work; 102 /* Copy of the incoming SCSI command descriptor block (CDB) */ 103 unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; 104 /* Sense buffer that will be mapped into outgoing status */ 105 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; 106 /* Completed commands list, serviced from vhost worker thread */ 107 struct llist_node tvc_completion_list; 108 /* Used to track inflight cmd */ 109 struct vhost_scsi_inflight *inflight; 110 }; 111 112 struct tcm_vhost_nexus { 113 /* Pointer to TCM session for I_T Nexus */ 114 struct se_session *tvn_se_sess; 115 }; 116 117 struct tcm_vhost_nacl { 118 /* Binary World Wide unique Port Name for Vhost Initiator port */ 119 u64 iport_wwpn; 120 /* ASCII formatted WWPN for Sas Initiator port */ 121 char iport_name[TCM_VHOST_NAMELEN]; 122 /* Returned by tcm_vhost_make_nodeacl() */ 123 struct se_node_acl se_node_acl; 124 }; 125 126 struct tcm_vhost_tpg { 127 /* Vhost port target portal group tag for TCM */ 128 u16 tport_tpgt; 129 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ 130 int tv_tpg_port_count; 131 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ 132 int tv_tpg_vhost_count; 133 /* list for tcm_vhost_list */ 134 struct list_head tv_tpg_list; 135 /* Used to protect access for tpg_nexus */ 136 struct mutex tv_tpg_mutex; 137 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ 138 struct tcm_vhost_nexus *tpg_nexus; 139 /* Pointer back to tcm_vhost_tport */ 140 struct tcm_vhost_tport *tport; 141 /* Returned by tcm_vhost_make_tpg() */ 142 struct se_portal_group se_tpg; 143 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ 144 struct vhost_scsi *vhost_scsi; 145 }; 146 147 struct tcm_vhost_tport { 148 /* SCSI protocol the tport is providing */ 149 u8 tport_proto_id; 150 /* Binary World Wide unique Port Name for Vhost Target port */ 151 u64 tport_wwpn; 152 /* ASCII formatted WWPN for Vhost Target port */ 153 char tport_name[TCM_VHOST_NAMELEN]; 154 /* Returned by tcm_vhost_make_tport() */ 155 struct se_wwn tport_wwn; 156 }; 157 158 struct tcm_vhost_evt { 159 /* event to be sent to guest */ 160 struct virtio_scsi_event event; 161 /* event list, serviced from vhost worker thread */ 162 struct llist_node list; 163 }; 164 165 enum { 166 VHOST_SCSI_VQ_CTL = 0, 167 VHOST_SCSI_VQ_EVT = 1, 168 VHOST_SCSI_VQ_IO = 2, 169 }; 170 171 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ 172 enum { 173 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | 174 (1ULL << VIRTIO_SCSI_F_T10_PI) 175 }; 176 177 #define VHOST_SCSI_MAX_TARGET 256 178 #define VHOST_SCSI_MAX_VQ 128 179 #define VHOST_SCSI_MAX_EVENT 128 180 181 struct vhost_scsi_virtqueue { 182 struct vhost_virtqueue vq; 183 /* 184 * Reference counting for inflight reqs, used for flush operation. At 185 * each time, one reference tracks new commands submitted, while we 186 * wait for another one to reach 0. 187 */ 188 struct vhost_scsi_inflight inflights[2]; 189 /* 190 * Indicate current inflight in use, protected by vq->mutex. 191 * Writers must also take dev mutex and flush under it. 192 */ 193 int inflight_idx; 194 }; 195 196 struct vhost_scsi { 197 /* Protected by vhost_scsi->dev.mutex */ 198 struct tcm_vhost_tpg **vs_tpg; 199 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 200 201 struct vhost_dev dev; 202 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; 203 204 struct vhost_work vs_completion_work; /* cmd completion work item */ 205 struct llist_head vs_completion_list; /* cmd completion queue */ 206 207 struct vhost_work vs_event_work; /* evt injection work item */ 208 struct llist_head vs_event_list; /* evt injection queue */ 209 210 bool vs_events_missed; /* any missed events, protected by vq->mutex */ 211 int vs_events_nr; /* num of pending events, protected by vq->mutex */ 212 }; 213 214 /* Local pointer to allocated TCM configfs fabric module */ 215 static struct target_fabric_configfs *tcm_vhost_fabric_configfs; 216 217 static struct workqueue_struct *tcm_vhost_workqueue; 218 219 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ 220 static DEFINE_MUTEX(tcm_vhost_mutex); 221 static LIST_HEAD(tcm_vhost_list); 222 223 static int iov_num_pages(struct iovec *iov) 224 { 225 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - 226 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; 227 } 228 229 static void tcm_vhost_done_inflight(struct kref *kref) 230 { 231 struct vhost_scsi_inflight *inflight; 232 233 inflight = container_of(kref, struct vhost_scsi_inflight, kref); 234 complete(&inflight->comp); 235 } 236 237 static void tcm_vhost_init_inflight(struct vhost_scsi *vs, 238 struct vhost_scsi_inflight *old_inflight[]) 239 { 240 struct vhost_scsi_inflight *new_inflight; 241 struct vhost_virtqueue *vq; 242 int idx, i; 243 244 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 245 vq = &vs->vqs[i].vq; 246 247 mutex_lock(&vq->mutex); 248 249 /* store old infight */ 250 idx = vs->vqs[i].inflight_idx; 251 if (old_inflight) 252 old_inflight[i] = &vs->vqs[i].inflights[idx]; 253 254 /* setup new infight */ 255 vs->vqs[i].inflight_idx = idx ^ 1; 256 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; 257 kref_init(&new_inflight->kref); 258 init_completion(&new_inflight->comp); 259 260 mutex_unlock(&vq->mutex); 261 } 262 } 263 264 static struct vhost_scsi_inflight * 265 tcm_vhost_get_inflight(struct vhost_virtqueue *vq) 266 { 267 struct vhost_scsi_inflight *inflight; 268 struct vhost_scsi_virtqueue *svq; 269 270 svq = container_of(vq, struct vhost_scsi_virtqueue, vq); 271 inflight = &svq->inflights[svq->inflight_idx]; 272 kref_get(&inflight->kref); 273 274 return inflight; 275 } 276 277 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight) 278 { 279 kref_put(&inflight->kref, tcm_vhost_done_inflight); 280 } 281 282 static int tcm_vhost_check_true(struct se_portal_group *se_tpg) 283 { 284 return 1; 285 } 286 287 static int tcm_vhost_check_false(struct se_portal_group *se_tpg) 288 { 289 return 0; 290 } 291 292 static char *tcm_vhost_get_fabric_name(void) 293 { 294 return "vhost"; 295 } 296 297 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) 298 { 299 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 300 struct tcm_vhost_tpg, se_tpg); 301 struct tcm_vhost_tport *tport = tpg->tport; 302 303 switch (tport->tport_proto_id) { 304 case SCSI_PROTOCOL_SAS: 305 return sas_get_fabric_proto_ident(se_tpg); 306 case SCSI_PROTOCOL_FCP: 307 return fc_get_fabric_proto_ident(se_tpg); 308 case SCSI_PROTOCOL_ISCSI: 309 return iscsi_get_fabric_proto_ident(se_tpg); 310 default: 311 pr_err("Unknown tport_proto_id: 0x%02x, using" 312 " SAS emulation\n", tport->tport_proto_id); 313 break; 314 } 315 316 return sas_get_fabric_proto_ident(se_tpg); 317 } 318 319 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) 320 { 321 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 322 struct tcm_vhost_tpg, se_tpg); 323 struct tcm_vhost_tport *tport = tpg->tport; 324 325 return &tport->tport_name[0]; 326 } 327 328 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) 329 { 330 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 331 struct tcm_vhost_tpg, se_tpg); 332 return tpg->tport_tpgt; 333 } 334 335 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) 336 { 337 return 1; 338 } 339 340 static u32 341 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, 342 struct se_node_acl *se_nacl, 343 struct t10_pr_registration *pr_reg, 344 int *format_code, 345 unsigned char *buf) 346 { 347 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 348 struct tcm_vhost_tpg, se_tpg); 349 struct tcm_vhost_tport *tport = tpg->tport; 350 351 switch (tport->tport_proto_id) { 352 case SCSI_PROTOCOL_SAS: 353 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 354 format_code, buf); 355 case SCSI_PROTOCOL_FCP: 356 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 357 format_code, buf); 358 case SCSI_PROTOCOL_ISCSI: 359 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 360 format_code, buf); 361 default: 362 pr_err("Unknown tport_proto_id: 0x%02x, using" 363 " SAS emulation\n", tport->tport_proto_id); 364 break; 365 } 366 367 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 368 format_code, buf); 369 } 370 371 static u32 372 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, 373 struct se_node_acl *se_nacl, 374 struct t10_pr_registration *pr_reg, 375 int *format_code) 376 { 377 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 378 struct tcm_vhost_tpg, se_tpg); 379 struct tcm_vhost_tport *tport = tpg->tport; 380 381 switch (tport->tport_proto_id) { 382 case SCSI_PROTOCOL_SAS: 383 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 384 format_code); 385 case SCSI_PROTOCOL_FCP: 386 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 387 format_code); 388 case SCSI_PROTOCOL_ISCSI: 389 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 390 format_code); 391 default: 392 pr_err("Unknown tport_proto_id: 0x%02x, using" 393 " SAS emulation\n", tport->tport_proto_id); 394 break; 395 } 396 397 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 398 format_code); 399 } 400 401 static char * 402 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, 403 const char *buf, 404 u32 *out_tid_len, 405 char **port_nexus_ptr) 406 { 407 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 408 struct tcm_vhost_tpg, se_tpg); 409 struct tcm_vhost_tport *tport = tpg->tport; 410 411 switch (tport->tport_proto_id) { 412 case SCSI_PROTOCOL_SAS: 413 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 414 port_nexus_ptr); 415 case SCSI_PROTOCOL_FCP: 416 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 417 port_nexus_ptr); 418 case SCSI_PROTOCOL_ISCSI: 419 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 420 port_nexus_ptr); 421 default: 422 pr_err("Unknown tport_proto_id: 0x%02x, using" 423 " SAS emulation\n", tport->tport_proto_id); 424 break; 425 } 426 427 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 428 port_nexus_ptr); 429 } 430 431 static struct se_node_acl * 432 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) 433 { 434 struct tcm_vhost_nacl *nacl; 435 436 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); 437 if (!nacl) { 438 pr_err("Unable to allocate struct tcm_vhost_nacl\n"); 439 return NULL; 440 } 441 442 return &nacl->se_node_acl; 443 } 444 445 static void 446 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, 447 struct se_node_acl *se_nacl) 448 { 449 struct tcm_vhost_nacl *nacl = container_of(se_nacl, 450 struct tcm_vhost_nacl, se_node_acl); 451 kfree(nacl); 452 } 453 454 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) 455 { 456 return 1; 457 } 458 459 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) 460 { 461 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, 462 struct tcm_vhost_cmd, tvc_se_cmd); 463 struct se_session *se_sess = se_cmd->se_sess; 464 int i; 465 466 if (tv_cmd->tvc_sgl_count) { 467 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 468 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 469 } 470 if (tv_cmd->tvc_prot_sgl_count) { 471 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++) 472 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); 473 } 474 475 tcm_vhost_put_inflight(tv_cmd->inflight); 476 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 477 } 478 479 static int tcm_vhost_shutdown_session(struct se_session *se_sess) 480 { 481 return 0; 482 } 483 484 static void tcm_vhost_close_session(struct se_session *se_sess) 485 { 486 return; 487 } 488 489 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) 490 { 491 return 0; 492 } 493 494 static int tcm_vhost_write_pending(struct se_cmd *se_cmd) 495 { 496 /* Go ahead and process the write immediately */ 497 target_execute_cmd(se_cmd); 498 return 0; 499 } 500 501 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) 502 { 503 return 0; 504 } 505 506 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) 507 { 508 return; 509 } 510 511 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) 512 { 513 return 0; 514 } 515 516 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) 517 { 518 return 0; 519 } 520 521 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) 522 { 523 struct vhost_scsi *vs = cmd->tvc_vhost; 524 525 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); 526 527 vhost_work_queue(&vs->dev, &vs->vs_completion_work); 528 } 529 530 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) 531 { 532 struct tcm_vhost_cmd *cmd = container_of(se_cmd, 533 struct tcm_vhost_cmd, tvc_se_cmd); 534 vhost_scsi_complete_cmd(cmd); 535 return 0; 536 } 537 538 static int tcm_vhost_queue_status(struct se_cmd *se_cmd) 539 { 540 struct tcm_vhost_cmd *cmd = container_of(se_cmd, 541 struct tcm_vhost_cmd, tvc_se_cmd); 542 vhost_scsi_complete_cmd(cmd); 543 return 0; 544 } 545 546 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) 547 { 548 return; 549 } 550 551 static void tcm_vhost_aborted_task(struct se_cmd *se_cmd) 552 { 553 return; 554 } 555 556 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 557 { 558 vs->vs_events_nr--; 559 kfree(evt); 560 } 561 562 static struct tcm_vhost_evt * 563 tcm_vhost_allocate_evt(struct vhost_scsi *vs, 564 u32 event, u32 reason) 565 { 566 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 567 struct tcm_vhost_evt *evt; 568 569 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { 570 vs->vs_events_missed = true; 571 return NULL; 572 } 573 574 evt = kzalloc(sizeof(*evt), GFP_KERNEL); 575 if (!evt) { 576 vq_err(vq, "Failed to allocate tcm_vhost_evt\n"); 577 vs->vs_events_missed = true; 578 return NULL; 579 } 580 581 evt->event.event = cpu_to_vhost32(vq, event); 582 evt->event.reason = cpu_to_vhost32(vq, reason); 583 vs->vs_events_nr++; 584 585 return evt; 586 } 587 588 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd) 589 { 590 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 591 592 /* TODO locking against target/backend threads? */ 593 transport_generic_free_cmd(se_cmd, 0); 594 595 } 596 597 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) 598 { 599 return target_put_sess_cmd(se_cmd->se_sess, se_cmd); 600 } 601 602 static void 603 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 604 { 605 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 606 struct virtio_scsi_event *event = &evt->event; 607 struct virtio_scsi_event __user *eventp; 608 unsigned out, in; 609 int head, ret; 610 611 if (!vq->private_data) { 612 vs->vs_events_missed = true; 613 return; 614 } 615 616 again: 617 vhost_disable_notify(&vs->dev, vq); 618 head = vhost_get_vq_desc(vq, vq->iov, 619 ARRAY_SIZE(vq->iov), &out, &in, 620 NULL, NULL); 621 if (head < 0) { 622 vs->vs_events_missed = true; 623 return; 624 } 625 if (head == vq->num) { 626 if (vhost_enable_notify(&vs->dev, vq)) 627 goto again; 628 vs->vs_events_missed = true; 629 return; 630 } 631 632 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { 633 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n", 634 vq->iov[out].iov_len); 635 vs->vs_events_missed = true; 636 return; 637 } 638 639 if (vs->vs_events_missed) { 640 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED); 641 vs->vs_events_missed = false; 642 } 643 644 eventp = vq->iov[out].iov_base; 645 ret = __copy_to_user(eventp, event, sizeof(*event)); 646 if (!ret) 647 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 648 else 649 vq_err(vq, "Faulted on tcm_vhost_send_event\n"); 650 } 651 652 static void tcm_vhost_evt_work(struct vhost_work *work) 653 { 654 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 655 vs_event_work); 656 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 657 struct tcm_vhost_evt *evt; 658 struct llist_node *llnode; 659 660 mutex_lock(&vq->mutex); 661 llnode = llist_del_all(&vs->vs_event_list); 662 while (llnode) { 663 evt = llist_entry(llnode, struct tcm_vhost_evt, list); 664 llnode = llist_next(llnode); 665 tcm_vhost_do_evt_work(vs, evt); 666 tcm_vhost_free_evt(vs, evt); 667 } 668 mutex_unlock(&vq->mutex); 669 } 670 671 /* Fill in status and signal that we are done processing this command 672 * 673 * This is scheduled in the vhost work queue so we are called with the owner 674 * process mm and can access the vring. 675 */ 676 static void vhost_scsi_complete_cmd_work(struct vhost_work *work) 677 { 678 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 679 vs_completion_work); 680 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); 681 struct virtio_scsi_cmd_resp v_rsp; 682 struct tcm_vhost_cmd *cmd; 683 struct llist_node *llnode; 684 struct se_cmd *se_cmd; 685 int ret, vq; 686 687 bitmap_zero(signal, VHOST_SCSI_MAX_VQ); 688 llnode = llist_del_all(&vs->vs_completion_list); 689 while (llnode) { 690 cmd = llist_entry(llnode, struct tcm_vhost_cmd, 691 tvc_completion_list); 692 llnode = llist_next(llnode); 693 se_cmd = &cmd->tvc_se_cmd; 694 695 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, 696 cmd, se_cmd->residual_count, se_cmd->scsi_status); 697 698 memset(&v_rsp, 0, sizeof(v_rsp)); 699 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count); 700 /* TODO is status_qualifier field needed? */ 701 v_rsp.status = se_cmd->scsi_status; 702 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq, 703 se_cmd->scsi_sense_length); 704 memcpy(v_rsp.sense, cmd->tvc_sense_buf, 705 se_cmd->scsi_sense_length); 706 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); 707 if (likely(ret == 0)) { 708 struct vhost_scsi_virtqueue *q; 709 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); 710 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); 711 vq = q - vs->vqs; 712 __set_bit(vq, signal); 713 } else 714 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 715 716 vhost_scsi_free_cmd(cmd); 717 } 718 719 vq = -1; 720 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1)) 721 < VHOST_SCSI_MAX_VQ) 722 vhost_signal(&vs->dev, &vs->vqs[vq].vq); 723 } 724 725 static struct tcm_vhost_cmd * 726 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, 727 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, 728 u32 exp_data_len, int data_direction) 729 { 730 struct tcm_vhost_cmd *cmd; 731 struct tcm_vhost_nexus *tv_nexus; 732 struct se_session *se_sess; 733 struct scatterlist *sg, *prot_sg; 734 struct page **pages; 735 int tag; 736 737 tv_nexus = tpg->tpg_nexus; 738 if (!tv_nexus) { 739 pr_err("Unable to locate active struct tcm_vhost_nexus\n"); 740 return ERR_PTR(-EIO); 741 } 742 se_sess = tv_nexus->tvn_se_sess; 743 744 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 745 if (tag < 0) { 746 pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); 747 return ERR_PTR(-ENOMEM); 748 } 749 750 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; 751 sg = cmd->tvc_sgl; 752 prot_sg = cmd->tvc_prot_sgl; 753 pages = cmd->tvc_upages; 754 memset(cmd, 0, sizeof(struct tcm_vhost_cmd)); 755 756 cmd->tvc_sgl = sg; 757 cmd->tvc_prot_sgl = prot_sg; 758 cmd->tvc_upages = pages; 759 cmd->tvc_se_cmd.map_tag = tag; 760 cmd->tvc_tag = scsi_tag; 761 cmd->tvc_lun = lun; 762 cmd->tvc_task_attr = task_attr; 763 cmd->tvc_exp_data_len = exp_data_len; 764 cmd->tvc_data_direction = data_direction; 765 cmd->tvc_nexus = tv_nexus; 766 cmd->inflight = tcm_vhost_get_inflight(vq); 767 768 memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE); 769 770 return cmd; 771 } 772 773 /* 774 * Map a user memory range into a scatterlist 775 * 776 * Returns the number of scatterlist entries used or -errno on error. 777 */ 778 static int 779 vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd, 780 struct scatterlist *sgl, 781 unsigned int sgl_count, 782 struct iovec *iov, 783 struct page **pages, 784 bool write) 785 { 786 unsigned int npages = 0, pages_nr, offset, nbytes; 787 struct scatterlist *sg = sgl; 788 void __user *ptr = iov->iov_base; 789 size_t len = iov->iov_len; 790 int ret, i; 791 792 pages_nr = iov_num_pages(iov); 793 if (pages_nr > sgl_count) { 794 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" 795 " sgl_count: %u\n", pages_nr, sgl_count); 796 return -ENOBUFS; 797 } 798 if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) { 799 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" 800 " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n", 801 pages_nr, TCM_VHOST_PREALLOC_UPAGES); 802 return -ENOBUFS; 803 } 804 805 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages); 806 /* No pages were pinned */ 807 if (ret < 0) 808 goto out; 809 /* Less pages pinned than wanted */ 810 if (ret != pages_nr) { 811 for (i = 0; i < ret; i++) 812 put_page(pages[i]); 813 ret = -EFAULT; 814 goto out; 815 } 816 817 while (len > 0) { 818 offset = (uintptr_t)ptr & ~PAGE_MASK; 819 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len); 820 sg_set_page(sg, pages[npages], nbytes, offset); 821 ptr += nbytes; 822 len -= nbytes; 823 sg++; 824 npages++; 825 } 826 827 out: 828 return ret; 829 } 830 831 static int 832 vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, 833 struct iovec *iov, 834 int niov, 835 bool write) 836 { 837 struct scatterlist *sg = cmd->tvc_sgl; 838 unsigned int sgl_count = 0; 839 int ret, i; 840 841 for (i = 0; i < niov; i++) 842 sgl_count += iov_num_pages(&iov[i]); 843 844 if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { 845 pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than" 846 " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n", 847 sgl_count, TCM_VHOST_PREALLOC_SGLS); 848 return -ENOBUFS; 849 } 850 851 pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count); 852 sg_init_table(sg, sgl_count); 853 cmd->tvc_sgl_count = sgl_count; 854 855 pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count); 856 857 for (i = 0; i < niov; i++) { 858 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i], 859 cmd->tvc_upages, write); 860 if (ret < 0) { 861 for (i = 0; i < cmd->tvc_sgl_count; i++) 862 put_page(sg_page(&cmd->tvc_sgl[i])); 863 864 cmd->tvc_sgl_count = 0; 865 return ret; 866 } 867 sg += ret; 868 sgl_count -= ret; 869 } 870 return 0; 871 } 872 873 static int 874 vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, 875 struct iovec *iov, 876 int niov, 877 bool write) 878 { 879 struct scatterlist *prot_sg = cmd->tvc_prot_sgl; 880 unsigned int prot_sgl_count = 0; 881 int ret, i; 882 883 for (i = 0; i < niov; i++) 884 prot_sgl_count += iov_num_pages(&iov[i]); 885 886 if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) { 887 pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than" 888 " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n", 889 prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS); 890 return -ENOBUFS; 891 } 892 893 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, 894 prot_sg, prot_sgl_count); 895 sg_init_table(prot_sg, prot_sgl_count); 896 cmd->tvc_prot_sgl_count = prot_sgl_count; 897 898 for (i = 0; i < niov; i++) { 899 ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i], 900 cmd->tvc_upages, write); 901 if (ret < 0) { 902 for (i = 0; i < cmd->tvc_prot_sgl_count; i++) 903 put_page(sg_page(&cmd->tvc_prot_sgl[i])); 904 905 cmd->tvc_prot_sgl_count = 0; 906 return ret; 907 } 908 prot_sg += ret; 909 prot_sgl_count -= ret; 910 } 911 return 0; 912 } 913 914 static int vhost_scsi_to_tcm_attr(int attr) 915 { 916 switch (attr) { 917 case VIRTIO_SCSI_S_SIMPLE: 918 return TCM_SIMPLE_TAG; 919 case VIRTIO_SCSI_S_ORDERED: 920 return TCM_ORDERED_TAG; 921 case VIRTIO_SCSI_S_HEAD: 922 return TCM_HEAD_TAG; 923 case VIRTIO_SCSI_S_ACA: 924 return TCM_ACA_TAG; 925 default: 926 break; 927 } 928 return TCM_SIMPLE_TAG; 929 } 930 931 static void tcm_vhost_submission_work(struct work_struct *work) 932 { 933 struct tcm_vhost_cmd *cmd = 934 container_of(work, struct tcm_vhost_cmd, work); 935 struct tcm_vhost_nexus *tv_nexus; 936 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 937 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; 938 int rc; 939 940 /* FIXME: BIDI operation */ 941 if (cmd->tvc_sgl_count) { 942 sg_ptr = cmd->tvc_sgl; 943 944 if (cmd->tvc_prot_sgl_count) 945 sg_prot_ptr = cmd->tvc_prot_sgl; 946 else 947 se_cmd->prot_pto = true; 948 } else { 949 sg_ptr = NULL; 950 } 951 tv_nexus = cmd->tvc_nexus; 952 953 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, 954 cmd->tvc_cdb, &cmd->tvc_sense_buf[0], 955 cmd->tvc_lun, cmd->tvc_exp_data_len, 956 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), 957 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, 958 sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, 959 cmd->tvc_prot_sgl_count); 960 if (rc < 0) { 961 transport_send_check_condition_and_sense(se_cmd, 962 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 963 transport_generic_free_cmd(se_cmd, 0); 964 } 965 } 966 967 static void 968 vhost_scsi_send_bad_target(struct vhost_scsi *vs, 969 struct vhost_virtqueue *vq, 970 int head, unsigned out) 971 { 972 struct virtio_scsi_cmd_resp __user *resp; 973 struct virtio_scsi_cmd_resp rsp; 974 int ret; 975 976 memset(&rsp, 0, sizeof(rsp)); 977 rsp.response = VIRTIO_SCSI_S_BAD_TARGET; 978 resp = vq->iov[out].iov_base; 979 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 980 if (!ret) 981 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 982 else 983 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 984 } 985 986 static void 987 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) 988 { 989 struct tcm_vhost_tpg **vs_tpg; 990 struct virtio_scsi_cmd_req v_req; 991 struct virtio_scsi_cmd_req_pi v_req_pi; 992 struct tcm_vhost_tpg *tpg; 993 struct tcm_vhost_cmd *cmd; 994 u64 tag; 995 u32 exp_data_len, data_first, data_num, data_direction, prot_first; 996 unsigned out, in, i; 997 int head, ret, data_niov, prot_niov, prot_bytes; 998 size_t req_size; 999 u16 lun; 1000 u8 *target, *lunp, task_attr; 1001 bool hdr_pi; 1002 void *req, *cdb; 1003 1004 mutex_lock(&vq->mutex); 1005 /* 1006 * We can handle the vq only after the endpoint is setup by calling the 1007 * VHOST_SCSI_SET_ENDPOINT ioctl. 1008 */ 1009 vs_tpg = vq->private_data; 1010 if (!vs_tpg) 1011 goto out; 1012 1013 vhost_disable_notify(&vs->dev, vq); 1014 1015 for (;;) { 1016 head = vhost_get_vq_desc(vq, vq->iov, 1017 ARRAY_SIZE(vq->iov), &out, &in, 1018 NULL, NULL); 1019 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", 1020 head, out, in); 1021 /* On error, stop handling until the next kick. */ 1022 if (unlikely(head < 0)) 1023 break; 1024 /* Nothing new? Wait for eventfd to tell us they refilled. */ 1025 if (head == vq->num) { 1026 if (unlikely(vhost_enable_notify(&vs->dev, vq))) { 1027 vhost_disable_notify(&vs->dev, vq); 1028 continue; 1029 } 1030 break; 1031 } 1032 1033 /* FIXME: BIDI operation */ 1034 if (out == 1 && in == 1) { 1035 data_direction = DMA_NONE; 1036 data_first = 0; 1037 data_num = 0; 1038 } else if (out == 1 && in > 1) { 1039 data_direction = DMA_FROM_DEVICE; 1040 data_first = out + 1; 1041 data_num = in - 1; 1042 } else if (out > 1 && in == 1) { 1043 data_direction = DMA_TO_DEVICE; 1044 data_first = 1; 1045 data_num = out - 1; 1046 } else { 1047 vq_err(vq, "Invalid buffer layout out: %u in: %u\n", 1048 out, in); 1049 break; 1050 } 1051 1052 /* 1053 * Check for a sane resp buffer so we can report errors to 1054 * the guest. 1055 */ 1056 if (unlikely(vq->iov[out].iov_len != 1057 sizeof(struct virtio_scsi_cmd_resp))) { 1058 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" 1059 " bytes\n", vq->iov[out].iov_len); 1060 break; 1061 } 1062 1063 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) { 1064 req = &v_req_pi; 1065 lunp = &v_req_pi.lun[0]; 1066 target = &v_req_pi.lun[1]; 1067 req_size = sizeof(v_req_pi); 1068 hdr_pi = true; 1069 } else { 1070 req = &v_req; 1071 lunp = &v_req.lun[0]; 1072 target = &v_req.lun[1]; 1073 req_size = sizeof(v_req); 1074 hdr_pi = false; 1075 } 1076 1077 if (unlikely(vq->iov[0].iov_len < req_size)) { 1078 pr_err("Expecting virtio-scsi header: %zu, got %zu\n", 1079 req_size, vq->iov[0].iov_len); 1080 break; 1081 } 1082 ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size); 1083 if (unlikely(ret)) { 1084 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); 1085 break; 1086 } 1087 1088 /* virtio-scsi spec requires byte 0 of the lun to be 1 */ 1089 if (unlikely(*lunp != 1)) { 1090 vhost_scsi_send_bad_target(vs, vq, head, out); 1091 continue; 1092 } 1093 1094 tpg = ACCESS_ONCE(vs_tpg[*target]); 1095 1096 /* Target does not exist, fail the request */ 1097 if (unlikely(!tpg)) { 1098 vhost_scsi_send_bad_target(vs, vq, head, out); 1099 continue; 1100 } 1101 1102 data_niov = data_num; 1103 prot_niov = prot_first = prot_bytes = 0; 1104 /* 1105 * Determine if any protection information iovecs are preceeding 1106 * the actual data payload, and adjust data_first + data_niov 1107 * values accordingly for vhost_scsi_map_iov_to_sgl() below. 1108 * 1109 * Also extract virtio_scsi header bits for vhost_scsi_get_tag() 1110 */ 1111 if (hdr_pi) { 1112 if (v_req_pi.pi_bytesout) { 1113 if (data_direction != DMA_TO_DEVICE) { 1114 vq_err(vq, "Received non zero do_pi_niov" 1115 ", but wrong data_direction\n"); 1116 goto err_cmd; 1117 } 1118 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); 1119 } else if (v_req_pi.pi_bytesin) { 1120 if (data_direction != DMA_FROM_DEVICE) { 1121 vq_err(vq, "Received non zero di_pi_niov" 1122 ", but wrong data_direction\n"); 1123 goto err_cmd; 1124 } 1125 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); 1126 } 1127 if (prot_bytes) { 1128 int tmp = 0; 1129 1130 for (i = 0; i < data_num; i++) { 1131 tmp += vq->iov[data_first + i].iov_len; 1132 prot_niov++; 1133 if (tmp >= prot_bytes) 1134 break; 1135 } 1136 prot_first = data_first; 1137 data_first += prot_niov; 1138 data_niov = data_num - prot_niov; 1139 } 1140 tag = vhost64_to_cpu(vq, v_req_pi.tag); 1141 task_attr = v_req_pi.task_attr; 1142 cdb = &v_req_pi.cdb[0]; 1143 lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF; 1144 } else { 1145 tag = vhost64_to_cpu(vq, v_req.tag); 1146 task_attr = v_req.task_attr; 1147 cdb = &v_req.cdb[0]; 1148 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; 1149 } 1150 exp_data_len = 0; 1151 for (i = 0; i < data_niov; i++) 1152 exp_data_len += vq->iov[data_first + i].iov_len; 1153 /* 1154 * Check that the recieved CDB size does not exceeded our 1155 * hardcoded max for vhost-scsi 1156 * 1157 * TODO what if cdb was too small for varlen cdb header? 1158 */ 1159 if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) { 1160 vq_err(vq, "Received SCSI CDB with command_size: %d that" 1161 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1162 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE); 1163 goto err_cmd; 1164 } 1165 1166 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, 1167 exp_data_len + prot_bytes, 1168 data_direction); 1169 if (IS_ERR(cmd)) { 1170 vq_err(vq, "vhost_scsi_get_tag failed %ld\n", 1171 PTR_ERR(cmd)); 1172 goto err_cmd; 1173 } 1174 1175 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" 1176 ": %d\n", cmd, exp_data_len, data_direction); 1177 1178 cmd->tvc_vhost = vs; 1179 cmd->tvc_vq = vq; 1180 cmd->tvc_resp = vq->iov[out].iov_base; 1181 1182 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1183 cmd->tvc_cdb[0], cmd->tvc_lun); 1184 1185 if (prot_niov) { 1186 ret = vhost_scsi_map_iov_to_prot(cmd, 1187 &vq->iov[prot_first], prot_niov, 1188 data_direction == DMA_FROM_DEVICE); 1189 if (unlikely(ret)) { 1190 vq_err(vq, "Failed to map iov to" 1191 " prot_sgl\n"); 1192 goto err_free; 1193 } 1194 } 1195 if (data_direction != DMA_NONE) { 1196 ret = vhost_scsi_map_iov_to_sgl(cmd, 1197 &vq->iov[data_first], data_niov, 1198 data_direction == DMA_FROM_DEVICE); 1199 if (unlikely(ret)) { 1200 vq_err(vq, "Failed to map iov to sgl\n"); 1201 goto err_free; 1202 } 1203 } 1204 /* 1205 * Save the descriptor from vhost_get_vq_desc() to be used to 1206 * complete the virtio-scsi request in TCM callback context via 1207 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() 1208 */ 1209 cmd->tvc_vq_desc = head; 1210 /* 1211 * Dispatch tv_cmd descriptor for cmwq execution in process 1212 * context provided by tcm_vhost_workqueue. This also ensures 1213 * tv_cmd is executed on the same kworker CPU as this vhost 1214 * thread to gain positive L2 cache locality effects.. 1215 */ 1216 INIT_WORK(&cmd->work, tcm_vhost_submission_work); 1217 queue_work(tcm_vhost_workqueue, &cmd->work); 1218 } 1219 1220 mutex_unlock(&vq->mutex); 1221 return; 1222 1223 err_free: 1224 vhost_scsi_free_cmd(cmd); 1225 err_cmd: 1226 vhost_scsi_send_bad_target(vs, vq, head, out); 1227 out: 1228 mutex_unlock(&vq->mutex); 1229 } 1230 1231 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 1232 { 1233 pr_debug("%s: The handling func for control queue.\n", __func__); 1234 } 1235 1236 static void 1237 tcm_vhost_send_evt(struct vhost_scsi *vs, 1238 struct tcm_vhost_tpg *tpg, 1239 struct se_lun *lun, 1240 u32 event, 1241 u32 reason) 1242 { 1243 struct tcm_vhost_evt *evt; 1244 1245 evt = tcm_vhost_allocate_evt(vs, event, reason); 1246 if (!evt) 1247 return; 1248 1249 if (tpg && lun) { 1250 /* TODO: share lun setup code with virtio-scsi.ko */ 1251 /* 1252 * Note: evt->event is zeroed when we allocate it and 1253 * lun[4-7] need to be zero according to virtio-scsi spec. 1254 */ 1255 evt->event.lun[0] = 0x01; 1256 evt->event.lun[1] = tpg->tport_tpgt & 0xFF; 1257 if (lun->unpacked_lun >= 256) 1258 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; 1259 evt->event.lun[3] = lun->unpacked_lun & 0xFF; 1260 } 1261 1262 llist_add(&evt->list, &vs->vs_event_list); 1263 vhost_work_queue(&vs->dev, &vs->vs_event_work); 1264 } 1265 1266 static void vhost_scsi_evt_handle_kick(struct vhost_work *work) 1267 { 1268 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1269 poll.work); 1270 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 1271 1272 mutex_lock(&vq->mutex); 1273 if (!vq->private_data) 1274 goto out; 1275 1276 if (vs->vs_events_missed) 1277 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); 1278 out: 1279 mutex_unlock(&vq->mutex); 1280 } 1281 1282 static void vhost_scsi_handle_kick(struct vhost_work *work) 1283 { 1284 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1285 poll.work); 1286 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 1287 1288 vhost_scsi_handle_vq(vs, vq); 1289 } 1290 1291 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) 1292 { 1293 vhost_poll_flush(&vs->vqs[index].vq.poll); 1294 } 1295 1296 /* Callers must hold dev mutex */ 1297 static void vhost_scsi_flush(struct vhost_scsi *vs) 1298 { 1299 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ]; 1300 int i; 1301 1302 /* Init new inflight and remember the old inflight */ 1303 tcm_vhost_init_inflight(vs, old_inflight); 1304 1305 /* 1306 * The inflight->kref was initialized to 1. We decrement it here to 1307 * indicate the start of the flush operation so that it will reach 0 1308 * when all the reqs are finished. 1309 */ 1310 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1311 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight); 1312 1313 /* Flush both the vhost poll and vhost work */ 1314 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1315 vhost_scsi_flush_vq(vs, i); 1316 vhost_work_flush(&vs->dev, &vs->vs_completion_work); 1317 vhost_work_flush(&vs->dev, &vs->vs_event_work); 1318 1319 /* Wait for all reqs issued before the flush to be finished */ 1320 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1321 wait_for_completion(&old_inflight[i]->comp); 1322 } 1323 1324 /* 1325 * Called from vhost_scsi_ioctl() context to walk the list of available 1326 * tcm_vhost_tpg with an active struct tcm_vhost_nexus 1327 * 1328 * The lock nesting rule is: 1329 * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex 1330 */ 1331 static int 1332 vhost_scsi_set_endpoint(struct vhost_scsi *vs, 1333 struct vhost_scsi_target *t) 1334 { 1335 struct se_portal_group *se_tpg; 1336 struct tcm_vhost_tport *tv_tport; 1337 struct tcm_vhost_tpg *tpg; 1338 struct tcm_vhost_tpg **vs_tpg; 1339 struct vhost_virtqueue *vq; 1340 int index, ret, i, len; 1341 bool match = false; 1342 1343 mutex_lock(&tcm_vhost_mutex); 1344 mutex_lock(&vs->dev.mutex); 1345 1346 /* Verify that ring has been setup correctly. */ 1347 for (index = 0; index < vs->dev.nvqs; ++index) { 1348 /* Verify that ring has been setup correctly. */ 1349 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { 1350 ret = -EFAULT; 1351 goto out; 1352 } 1353 } 1354 1355 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; 1356 vs_tpg = kzalloc(len, GFP_KERNEL); 1357 if (!vs_tpg) { 1358 ret = -ENOMEM; 1359 goto out; 1360 } 1361 if (vs->vs_tpg) 1362 memcpy(vs_tpg, vs->vs_tpg, len); 1363 1364 list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) { 1365 mutex_lock(&tpg->tv_tpg_mutex); 1366 if (!tpg->tpg_nexus) { 1367 mutex_unlock(&tpg->tv_tpg_mutex); 1368 continue; 1369 } 1370 if (tpg->tv_tpg_vhost_count != 0) { 1371 mutex_unlock(&tpg->tv_tpg_mutex); 1372 continue; 1373 } 1374 tv_tport = tpg->tport; 1375 1376 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 1377 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { 1378 kfree(vs_tpg); 1379 mutex_unlock(&tpg->tv_tpg_mutex); 1380 ret = -EEXIST; 1381 goto out; 1382 } 1383 /* 1384 * In order to ensure individual vhost-scsi configfs 1385 * groups cannot be removed while in use by vhost ioctl, 1386 * go ahead and take an explicit se_tpg->tpg_group.cg_item 1387 * dependency now. 1388 */ 1389 se_tpg = &tpg->se_tpg; 1390 ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, 1391 &se_tpg->tpg_group.cg_item); 1392 if (ret) { 1393 pr_warn("configfs_depend_item() failed: %d\n", ret); 1394 kfree(vs_tpg); 1395 mutex_unlock(&tpg->tv_tpg_mutex); 1396 goto out; 1397 } 1398 tpg->tv_tpg_vhost_count++; 1399 tpg->vhost_scsi = vs; 1400 vs_tpg[tpg->tport_tpgt] = tpg; 1401 smp_mb__after_atomic(); 1402 match = true; 1403 } 1404 mutex_unlock(&tpg->tv_tpg_mutex); 1405 } 1406 1407 if (match) { 1408 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, 1409 sizeof(vs->vs_vhost_wwpn)); 1410 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1411 vq = &vs->vqs[i].vq; 1412 mutex_lock(&vq->mutex); 1413 vq->private_data = vs_tpg; 1414 vhost_init_used(vq); 1415 mutex_unlock(&vq->mutex); 1416 } 1417 ret = 0; 1418 } else { 1419 ret = -EEXIST; 1420 } 1421 1422 /* 1423 * Act as synchronize_rcu to make sure access to 1424 * old vs->vs_tpg is finished. 1425 */ 1426 vhost_scsi_flush(vs); 1427 kfree(vs->vs_tpg); 1428 vs->vs_tpg = vs_tpg; 1429 1430 out: 1431 mutex_unlock(&vs->dev.mutex); 1432 mutex_unlock(&tcm_vhost_mutex); 1433 return ret; 1434 } 1435 1436 static int 1437 vhost_scsi_clear_endpoint(struct vhost_scsi *vs, 1438 struct vhost_scsi_target *t) 1439 { 1440 struct se_portal_group *se_tpg; 1441 struct tcm_vhost_tport *tv_tport; 1442 struct tcm_vhost_tpg *tpg; 1443 struct vhost_virtqueue *vq; 1444 bool match = false; 1445 int index, ret, i; 1446 u8 target; 1447 1448 mutex_lock(&tcm_vhost_mutex); 1449 mutex_lock(&vs->dev.mutex); 1450 /* Verify that ring has been setup correctly. */ 1451 for (index = 0; index < vs->dev.nvqs; ++index) { 1452 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { 1453 ret = -EFAULT; 1454 goto err_dev; 1455 } 1456 } 1457 1458 if (!vs->vs_tpg) { 1459 ret = 0; 1460 goto err_dev; 1461 } 1462 1463 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 1464 target = i; 1465 tpg = vs->vs_tpg[target]; 1466 if (!tpg) 1467 continue; 1468 1469 mutex_lock(&tpg->tv_tpg_mutex); 1470 tv_tport = tpg->tport; 1471 if (!tv_tport) { 1472 ret = -ENODEV; 1473 goto err_tpg; 1474 } 1475 1476 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 1477 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu" 1478 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", 1479 tv_tport->tport_name, tpg->tport_tpgt, 1480 t->vhost_wwpn, t->vhost_tpgt); 1481 ret = -EINVAL; 1482 goto err_tpg; 1483 } 1484 tpg->tv_tpg_vhost_count--; 1485 tpg->vhost_scsi = NULL; 1486 vs->vs_tpg[target] = NULL; 1487 match = true; 1488 mutex_unlock(&tpg->tv_tpg_mutex); 1489 /* 1490 * Release se_tpg->tpg_group.cg_item configfs dependency now 1491 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. 1492 */ 1493 se_tpg = &tpg->se_tpg; 1494 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, 1495 &se_tpg->tpg_group.cg_item); 1496 } 1497 if (match) { 1498 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1499 vq = &vs->vqs[i].vq; 1500 mutex_lock(&vq->mutex); 1501 vq->private_data = NULL; 1502 mutex_unlock(&vq->mutex); 1503 } 1504 } 1505 /* 1506 * Act as synchronize_rcu to make sure access to 1507 * old vs->vs_tpg is finished. 1508 */ 1509 vhost_scsi_flush(vs); 1510 kfree(vs->vs_tpg); 1511 vs->vs_tpg = NULL; 1512 WARN_ON(vs->vs_events_nr); 1513 mutex_unlock(&vs->dev.mutex); 1514 mutex_unlock(&tcm_vhost_mutex); 1515 return 0; 1516 1517 err_tpg: 1518 mutex_unlock(&tpg->tv_tpg_mutex); 1519 err_dev: 1520 mutex_unlock(&vs->dev.mutex); 1521 mutex_unlock(&tcm_vhost_mutex); 1522 return ret; 1523 } 1524 1525 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) 1526 { 1527 struct vhost_virtqueue *vq; 1528 int i; 1529 1530 if (features & ~VHOST_SCSI_FEATURES) 1531 return -EOPNOTSUPP; 1532 1533 mutex_lock(&vs->dev.mutex); 1534 if ((features & (1 << VHOST_F_LOG_ALL)) && 1535 !vhost_log_access_ok(&vs->dev)) { 1536 mutex_unlock(&vs->dev.mutex); 1537 return -EFAULT; 1538 } 1539 1540 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1541 vq = &vs->vqs[i].vq; 1542 mutex_lock(&vq->mutex); 1543 vq->acked_features = features; 1544 mutex_unlock(&vq->mutex); 1545 } 1546 mutex_unlock(&vs->dev.mutex); 1547 return 0; 1548 } 1549 1550 static int vhost_scsi_open(struct inode *inode, struct file *f) 1551 { 1552 struct vhost_scsi *vs; 1553 struct vhost_virtqueue **vqs; 1554 int r = -ENOMEM, i; 1555 1556 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 1557 if (!vs) { 1558 vs = vzalloc(sizeof(*vs)); 1559 if (!vs) 1560 goto err_vs; 1561 } 1562 1563 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); 1564 if (!vqs) 1565 goto err_vqs; 1566 1567 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); 1568 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); 1569 1570 vs->vs_events_nr = 0; 1571 vs->vs_events_missed = false; 1572 1573 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; 1574 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1575 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; 1576 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; 1577 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) { 1578 vqs[i] = &vs->vqs[i].vq; 1579 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1580 } 1581 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); 1582 1583 tcm_vhost_init_inflight(vs, NULL); 1584 1585 f->private_data = vs; 1586 return 0; 1587 1588 err_vqs: 1589 kvfree(vs); 1590 err_vs: 1591 return r; 1592 } 1593 1594 static int vhost_scsi_release(struct inode *inode, struct file *f) 1595 { 1596 struct vhost_scsi *vs = f->private_data; 1597 struct vhost_scsi_target t; 1598 1599 mutex_lock(&vs->dev.mutex); 1600 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); 1601 mutex_unlock(&vs->dev.mutex); 1602 vhost_scsi_clear_endpoint(vs, &t); 1603 vhost_dev_stop(&vs->dev); 1604 vhost_dev_cleanup(&vs->dev, false); 1605 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ 1606 vhost_scsi_flush(vs); 1607 kfree(vs->dev.vqs); 1608 kvfree(vs); 1609 return 0; 1610 } 1611 1612 static long 1613 vhost_scsi_ioctl(struct file *f, 1614 unsigned int ioctl, 1615 unsigned long arg) 1616 { 1617 struct vhost_scsi *vs = f->private_data; 1618 struct vhost_scsi_target backend; 1619 void __user *argp = (void __user *)arg; 1620 u64 __user *featurep = argp; 1621 u32 __user *eventsp = argp; 1622 u32 events_missed; 1623 u64 features; 1624 int r, abi_version = VHOST_SCSI_ABI_VERSION; 1625 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1626 1627 switch (ioctl) { 1628 case VHOST_SCSI_SET_ENDPOINT: 1629 if (copy_from_user(&backend, argp, sizeof backend)) 1630 return -EFAULT; 1631 if (backend.reserved != 0) 1632 return -EOPNOTSUPP; 1633 1634 return vhost_scsi_set_endpoint(vs, &backend); 1635 case VHOST_SCSI_CLEAR_ENDPOINT: 1636 if (copy_from_user(&backend, argp, sizeof backend)) 1637 return -EFAULT; 1638 if (backend.reserved != 0) 1639 return -EOPNOTSUPP; 1640 1641 return vhost_scsi_clear_endpoint(vs, &backend); 1642 case VHOST_SCSI_GET_ABI_VERSION: 1643 if (copy_to_user(argp, &abi_version, sizeof abi_version)) 1644 return -EFAULT; 1645 return 0; 1646 case VHOST_SCSI_SET_EVENTS_MISSED: 1647 if (get_user(events_missed, eventsp)) 1648 return -EFAULT; 1649 mutex_lock(&vq->mutex); 1650 vs->vs_events_missed = events_missed; 1651 mutex_unlock(&vq->mutex); 1652 return 0; 1653 case VHOST_SCSI_GET_EVENTS_MISSED: 1654 mutex_lock(&vq->mutex); 1655 events_missed = vs->vs_events_missed; 1656 mutex_unlock(&vq->mutex); 1657 if (put_user(events_missed, eventsp)) 1658 return -EFAULT; 1659 return 0; 1660 case VHOST_GET_FEATURES: 1661 features = VHOST_SCSI_FEATURES; 1662 if (copy_to_user(featurep, &features, sizeof features)) 1663 return -EFAULT; 1664 return 0; 1665 case VHOST_SET_FEATURES: 1666 if (copy_from_user(&features, featurep, sizeof features)) 1667 return -EFAULT; 1668 return vhost_scsi_set_features(vs, features); 1669 default: 1670 mutex_lock(&vs->dev.mutex); 1671 r = vhost_dev_ioctl(&vs->dev, ioctl, argp); 1672 /* TODO: flush backend after dev ioctl. */ 1673 if (r == -ENOIOCTLCMD) 1674 r = vhost_vring_ioctl(&vs->dev, ioctl, argp); 1675 mutex_unlock(&vs->dev.mutex); 1676 return r; 1677 } 1678 } 1679 1680 #ifdef CONFIG_COMPAT 1681 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl, 1682 unsigned long arg) 1683 { 1684 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); 1685 } 1686 #endif 1687 1688 static const struct file_operations vhost_scsi_fops = { 1689 .owner = THIS_MODULE, 1690 .release = vhost_scsi_release, 1691 .unlocked_ioctl = vhost_scsi_ioctl, 1692 #ifdef CONFIG_COMPAT 1693 .compat_ioctl = vhost_scsi_compat_ioctl, 1694 #endif 1695 .open = vhost_scsi_open, 1696 .llseek = noop_llseek, 1697 }; 1698 1699 static struct miscdevice vhost_scsi_misc = { 1700 MISC_DYNAMIC_MINOR, 1701 "vhost-scsi", 1702 &vhost_scsi_fops, 1703 }; 1704 1705 static int __init vhost_scsi_register(void) 1706 { 1707 return misc_register(&vhost_scsi_misc); 1708 } 1709 1710 static int vhost_scsi_deregister(void) 1711 { 1712 return misc_deregister(&vhost_scsi_misc); 1713 } 1714 1715 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) 1716 { 1717 switch (tport->tport_proto_id) { 1718 case SCSI_PROTOCOL_SAS: 1719 return "SAS"; 1720 case SCSI_PROTOCOL_FCP: 1721 return "FCP"; 1722 case SCSI_PROTOCOL_ISCSI: 1723 return "iSCSI"; 1724 default: 1725 break; 1726 } 1727 1728 return "Unknown"; 1729 } 1730 1731 static void 1732 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, 1733 struct se_lun *lun, bool plug) 1734 { 1735 1736 struct vhost_scsi *vs = tpg->vhost_scsi; 1737 struct vhost_virtqueue *vq; 1738 u32 reason; 1739 1740 if (!vs) 1741 return; 1742 1743 mutex_lock(&vs->dev.mutex); 1744 1745 if (plug) 1746 reason = VIRTIO_SCSI_EVT_RESET_RESCAN; 1747 else 1748 reason = VIRTIO_SCSI_EVT_RESET_REMOVED; 1749 1750 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1751 mutex_lock(&vq->mutex); 1752 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) 1753 tcm_vhost_send_evt(vs, tpg, lun, 1754 VIRTIO_SCSI_T_TRANSPORT_RESET, reason); 1755 mutex_unlock(&vq->mutex); 1756 mutex_unlock(&vs->dev.mutex); 1757 } 1758 1759 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) 1760 { 1761 tcm_vhost_do_plug(tpg, lun, true); 1762 } 1763 1764 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) 1765 { 1766 tcm_vhost_do_plug(tpg, lun, false); 1767 } 1768 1769 static int tcm_vhost_port_link(struct se_portal_group *se_tpg, 1770 struct se_lun *lun) 1771 { 1772 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1773 struct tcm_vhost_tpg, se_tpg); 1774 1775 mutex_lock(&tcm_vhost_mutex); 1776 1777 mutex_lock(&tpg->tv_tpg_mutex); 1778 tpg->tv_tpg_port_count++; 1779 mutex_unlock(&tpg->tv_tpg_mutex); 1780 1781 tcm_vhost_hotplug(tpg, lun); 1782 1783 mutex_unlock(&tcm_vhost_mutex); 1784 1785 return 0; 1786 } 1787 1788 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, 1789 struct se_lun *lun) 1790 { 1791 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1792 struct tcm_vhost_tpg, se_tpg); 1793 1794 mutex_lock(&tcm_vhost_mutex); 1795 1796 mutex_lock(&tpg->tv_tpg_mutex); 1797 tpg->tv_tpg_port_count--; 1798 mutex_unlock(&tpg->tv_tpg_mutex); 1799 1800 tcm_vhost_hotunplug(tpg, lun); 1801 1802 mutex_unlock(&tcm_vhost_mutex); 1803 } 1804 1805 static struct se_node_acl * 1806 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, 1807 struct config_group *group, 1808 const char *name) 1809 { 1810 struct se_node_acl *se_nacl, *se_nacl_new; 1811 struct tcm_vhost_nacl *nacl; 1812 u64 wwpn = 0; 1813 u32 nexus_depth; 1814 1815 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) 1816 return ERR_PTR(-EINVAL); */ 1817 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); 1818 if (!se_nacl_new) 1819 return ERR_PTR(-ENOMEM); 1820 1821 nexus_depth = 1; 1822 /* 1823 * se_nacl_new may be released by core_tpg_add_initiator_node_acl() 1824 * when converting a NodeACL from demo mode -> explict 1825 */ 1826 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, 1827 name, nexus_depth); 1828 if (IS_ERR(se_nacl)) { 1829 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); 1830 return se_nacl; 1831 } 1832 /* 1833 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN 1834 */ 1835 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); 1836 nacl->iport_wwpn = wwpn; 1837 1838 return se_nacl; 1839 } 1840 1841 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) 1842 { 1843 struct tcm_vhost_nacl *nacl = container_of(se_acl, 1844 struct tcm_vhost_nacl, se_node_acl); 1845 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); 1846 kfree(nacl); 1847 } 1848 1849 static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, 1850 struct se_session *se_sess) 1851 { 1852 struct tcm_vhost_cmd *tv_cmd; 1853 unsigned int i; 1854 1855 if (!se_sess->sess_cmd_map) 1856 return; 1857 1858 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { 1859 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; 1860 1861 kfree(tv_cmd->tvc_sgl); 1862 kfree(tv_cmd->tvc_prot_sgl); 1863 kfree(tv_cmd->tvc_upages); 1864 } 1865 } 1866 1867 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, 1868 const char *name) 1869 { 1870 struct se_portal_group *se_tpg; 1871 struct se_session *se_sess; 1872 struct tcm_vhost_nexus *tv_nexus; 1873 struct tcm_vhost_cmd *tv_cmd; 1874 unsigned int i; 1875 1876 mutex_lock(&tpg->tv_tpg_mutex); 1877 if (tpg->tpg_nexus) { 1878 mutex_unlock(&tpg->tv_tpg_mutex); 1879 pr_debug("tpg->tpg_nexus already exists\n"); 1880 return -EEXIST; 1881 } 1882 se_tpg = &tpg->se_tpg; 1883 1884 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); 1885 if (!tv_nexus) { 1886 mutex_unlock(&tpg->tv_tpg_mutex); 1887 pr_err("Unable to allocate struct tcm_vhost_nexus\n"); 1888 return -ENOMEM; 1889 } 1890 /* 1891 * Initialize the struct se_session pointer and setup tagpool 1892 * for struct tcm_vhost_cmd descriptors 1893 */ 1894 tv_nexus->tvn_se_sess = transport_init_session_tags( 1895 TCM_VHOST_DEFAULT_TAGS, 1896 sizeof(struct tcm_vhost_cmd), 1897 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); 1898 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1899 mutex_unlock(&tpg->tv_tpg_mutex); 1900 kfree(tv_nexus); 1901 return -ENOMEM; 1902 } 1903 se_sess = tv_nexus->tvn_se_sess; 1904 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { 1905 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; 1906 1907 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * 1908 TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL); 1909 if (!tv_cmd->tvc_sgl) { 1910 mutex_unlock(&tpg->tv_tpg_mutex); 1911 pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); 1912 goto out; 1913 } 1914 1915 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * 1916 TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL); 1917 if (!tv_cmd->tvc_upages) { 1918 mutex_unlock(&tpg->tv_tpg_mutex); 1919 pr_err("Unable to allocate tv_cmd->tvc_upages\n"); 1920 goto out; 1921 } 1922 1923 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * 1924 TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL); 1925 if (!tv_cmd->tvc_prot_sgl) { 1926 mutex_unlock(&tpg->tv_tpg_mutex); 1927 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); 1928 goto out; 1929 } 1930 } 1931 /* 1932 * Since we are running in 'demo mode' this call with generate a 1933 * struct se_node_acl for the tcm_vhost struct se_portal_group with 1934 * the SCSI Initiator port name of the passed configfs group 'name'. 1935 */ 1936 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 1937 se_tpg, (unsigned char *)name); 1938 if (!tv_nexus->tvn_se_sess->se_node_acl) { 1939 mutex_unlock(&tpg->tv_tpg_mutex); 1940 pr_debug("core_tpg_check_initiator_node_acl() failed" 1941 " for %s\n", name); 1942 goto out; 1943 } 1944 /* 1945 * Now register the TCM vhost virtual I_T Nexus as active with the 1946 * call to __transport_register_session() 1947 */ 1948 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1949 tv_nexus->tvn_se_sess, tv_nexus); 1950 tpg->tpg_nexus = tv_nexus; 1951 1952 mutex_unlock(&tpg->tv_tpg_mutex); 1953 return 0; 1954 1955 out: 1956 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); 1957 transport_free_session(se_sess); 1958 kfree(tv_nexus); 1959 return -ENOMEM; 1960 } 1961 1962 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) 1963 { 1964 struct se_session *se_sess; 1965 struct tcm_vhost_nexus *tv_nexus; 1966 1967 mutex_lock(&tpg->tv_tpg_mutex); 1968 tv_nexus = tpg->tpg_nexus; 1969 if (!tv_nexus) { 1970 mutex_unlock(&tpg->tv_tpg_mutex); 1971 return -ENODEV; 1972 } 1973 1974 se_sess = tv_nexus->tvn_se_sess; 1975 if (!se_sess) { 1976 mutex_unlock(&tpg->tv_tpg_mutex); 1977 return -ENODEV; 1978 } 1979 1980 if (tpg->tv_tpg_port_count != 0) { 1981 mutex_unlock(&tpg->tv_tpg_mutex); 1982 pr_err("Unable to remove TCM_vhost I_T Nexus with" 1983 " active TPG port count: %d\n", 1984 tpg->tv_tpg_port_count); 1985 return -EBUSY; 1986 } 1987 1988 if (tpg->tv_tpg_vhost_count != 0) { 1989 mutex_unlock(&tpg->tv_tpg_mutex); 1990 pr_err("Unable to remove TCM_vhost I_T Nexus with" 1991 " active TPG vhost count: %d\n", 1992 tpg->tv_tpg_vhost_count); 1993 return -EBUSY; 1994 } 1995 1996 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" 1997 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), 1998 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1999 2000 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); 2001 /* 2002 * Release the SCSI I_T Nexus to the emulated vhost Target Port 2003 */ 2004 transport_deregister_session(tv_nexus->tvn_se_sess); 2005 tpg->tpg_nexus = NULL; 2006 mutex_unlock(&tpg->tv_tpg_mutex); 2007 2008 kfree(tv_nexus); 2009 return 0; 2010 } 2011 2012 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, 2013 char *page) 2014 { 2015 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 2016 struct tcm_vhost_tpg, se_tpg); 2017 struct tcm_vhost_nexus *tv_nexus; 2018 ssize_t ret; 2019 2020 mutex_lock(&tpg->tv_tpg_mutex); 2021 tv_nexus = tpg->tpg_nexus; 2022 if (!tv_nexus) { 2023 mutex_unlock(&tpg->tv_tpg_mutex); 2024 return -ENODEV; 2025 } 2026 ret = snprintf(page, PAGE_SIZE, "%s\n", 2027 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 2028 mutex_unlock(&tpg->tv_tpg_mutex); 2029 2030 return ret; 2031 } 2032 2033 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, 2034 const char *page, 2035 size_t count) 2036 { 2037 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 2038 struct tcm_vhost_tpg, se_tpg); 2039 struct tcm_vhost_tport *tport_wwn = tpg->tport; 2040 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; 2041 int ret; 2042 /* 2043 * Shutdown the active I_T nexus if 'NULL' is passed.. 2044 */ 2045 if (!strncmp(page, "NULL", 4)) { 2046 ret = tcm_vhost_drop_nexus(tpg); 2047 return (!ret) ? count : ret; 2048 } 2049 /* 2050 * Otherwise make sure the passed virtual Initiator port WWN matches 2051 * the fabric protocol_id set in tcm_vhost_make_tport(), and call 2052 * tcm_vhost_make_nexus(). 2053 */ 2054 if (strlen(page) >= TCM_VHOST_NAMELEN) { 2055 pr_err("Emulated NAA Sas Address: %s, exceeds" 2056 " max: %d\n", page, TCM_VHOST_NAMELEN); 2057 return -EINVAL; 2058 } 2059 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); 2060 2061 ptr = strstr(i_port, "naa."); 2062 if (ptr) { 2063 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { 2064 pr_err("Passed SAS Initiator Port %s does not" 2065 " match target port protoid: %s\n", i_port, 2066 tcm_vhost_dump_proto_id(tport_wwn)); 2067 return -EINVAL; 2068 } 2069 port_ptr = &i_port[0]; 2070 goto check_newline; 2071 } 2072 ptr = strstr(i_port, "fc."); 2073 if (ptr) { 2074 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { 2075 pr_err("Passed FCP Initiator Port %s does not" 2076 " match target port protoid: %s\n", i_port, 2077 tcm_vhost_dump_proto_id(tport_wwn)); 2078 return -EINVAL; 2079 } 2080 port_ptr = &i_port[3]; /* Skip over "fc." */ 2081 goto check_newline; 2082 } 2083 ptr = strstr(i_port, "iqn."); 2084 if (ptr) { 2085 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { 2086 pr_err("Passed iSCSI Initiator Port %s does not" 2087 " match target port protoid: %s\n", i_port, 2088 tcm_vhost_dump_proto_id(tport_wwn)); 2089 return -EINVAL; 2090 } 2091 port_ptr = &i_port[0]; 2092 goto check_newline; 2093 } 2094 pr_err("Unable to locate prefix for emulated Initiator Port:" 2095 " %s\n", i_port); 2096 return -EINVAL; 2097 /* 2098 * Clear any trailing newline for the NAA WWN 2099 */ 2100 check_newline: 2101 if (i_port[strlen(i_port)-1] == '\n') 2102 i_port[strlen(i_port)-1] = '\0'; 2103 2104 ret = tcm_vhost_make_nexus(tpg, port_ptr); 2105 if (ret < 0) 2106 return ret; 2107 2108 return count; 2109 } 2110 2111 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); 2112 2113 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { 2114 &tcm_vhost_tpg_nexus.attr, 2115 NULL, 2116 }; 2117 2118 static struct se_portal_group * 2119 tcm_vhost_make_tpg(struct se_wwn *wwn, 2120 struct config_group *group, 2121 const char *name) 2122 { 2123 struct tcm_vhost_tport *tport = container_of(wwn, 2124 struct tcm_vhost_tport, tport_wwn); 2125 2126 struct tcm_vhost_tpg *tpg; 2127 unsigned long tpgt; 2128 int ret; 2129 2130 if (strstr(name, "tpgt_") != name) 2131 return ERR_PTR(-EINVAL); 2132 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) 2133 return ERR_PTR(-EINVAL); 2134 2135 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); 2136 if (!tpg) { 2137 pr_err("Unable to allocate struct tcm_vhost_tpg"); 2138 return ERR_PTR(-ENOMEM); 2139 } 2140 mutex_init(&tpg->tv_tpg_mutex); 2141 INIT_LIST_HEAD(&tpg->tv_tpg_list); 2142 tpg->tport = tport; 2143 tpg->tport_tpgt = tpgt; 2144 2145 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, 2146 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 2147 if (ret < 0) { 2148 kfree(tpg); 2149 return NULL; 2150 } 2151 mutex_lock(&tcm_vhost_mutex); 2152 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); 2153 mutex_unlock(&tcm_vhost_mutex); 2154 2155 return &tpg->se_tpg; 2156 } 2157 2158 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) 2159 { 2160 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 2161 struct tcm_vhost_tpg, se_tpg); 2162 2163 mutex_lock(&tcm_vhost_mutex); 2164 list_del(&tpg->tv_tpg_list); 2165 mutex_unlock(&tcm_vhost_mutex); 2166 /* 2167 * Release the virtual I_T Nexus for this vhost TPG 2168 */ 2169 tcm_vhost_drop_nexus(tpg); 2170 /* 2171 * Deregister the se_tpg from TCM.. 2172 */ 2173 core_tpg_deregister(se_tpg); 2174 kfree(tpg); 2175 } 2176 2177 static struct se_wwn * 2178 tcm_vhost_make_tport(struct target_fabric_configfs *tf, 2179 struct config_group *group, 2180 const char *name) 2181 { 2182 struct tcm_vhost_tport *tport; 2183 char *ptr; 2184 u64 wwpn = 0; 2185 int off = 0; 2186 2187 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) 2188 return ERR_PTR(-EINVAL); */ 2189 2190 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); 2191 if (!tport) { 2192 pr_err("Unable to allocate struct tcm_vhost_tport"); 2193 return ERR_PTR(-ENOMEM); 2194 } 2195 tport->tport_wwpn = wwpn; 2196 /* 2197 * Determine the emulated Protocol Identifier and Target Port Name 2198 * based on the incoming configfs directory name. 2199 */ 2200 ptr = strstr(name, "naa."); 2201 if (ptr) { 2202 tport->tport_proto_id = SCSI_PROTOCOL_SAS; 2203 goto check_len; 2204 } 2205 ptr = strstr(name, "fc."); 2206 if (ptr) { 2207 tport->tport_proto_id = SCSI_PROTOCOL_FCP; 2208 off = 3; /* Skip over "fc." */ 2209 goto check_len; 2210 } 2211 ptr = strstr(name, "iqn."); 2212 if (ptr) { 2213 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; 2214 goto check_len; 2215 } 2216 2217 pr_err("Unable to locate prefix for emulated Target Port:" 2218 " %s\n", name); 2219 kfree(tport); 2220 return ERR_PTR(-EINVAL); 2221 2222 check_len: 2223 if (strlen(name) >= TCM_VHOST_NAMELEN) { 2224 pr_err("Emulated %s Address: %s, exceeds" 2225 " max: %d\n", name, tcm_vhost_dump_proto_id(tport), 2226 TCM_VHOST_NAMELEN); 2227 kfree(tport); 2228 return ERR_PTR(-EINVAL); 2229 } 2230 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); 2231 2232 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" 2233 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); 2234 2235 return &tport->tport_wwn; 2236 } 2237 2238 static void tcm_vhost_drop_tport(struct se_wwn *wwn) 2239 { 2240 struct tcm_vhost_tport *tport = container_of(wwn, 2241 struct tcm_vhost_tport, tport_wwn); 2242 2243 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" 2244 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), 2245 tport->tport_name); 2246 2247 kfree(tport); 2248 } 2249 2250 static ssize_t 2251 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf, 2252 char *page) 2253 { 2254 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" 2255 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, 2256 utsname()->machine); 2257 } 2258 2259 TF_WWN_ATTR_RO(tcm_vhost, version); 2260 2261 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { 2262 &tcm_vhost_wwn_version.attr, 2263 NULL, 2264 }; 2265 2266 static struct target_core_fabric_ops tcm_vhost_ops = { 2267 .get_fabric_name = tcm_vhost_get_fabric_name, 2268 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, 2269 .tpg_get_wwn = tcm_vhost_get_fabric_wwn, 2270 .tpg_get_tag = tcm_vhost_get_tag, 2271 .tpg_get_default_depth = tcm_vhost_get_default_depth, 2272 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, 2273 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, 2274 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, 2275 .tpg_check_demo_mode = tcm_vhost_check_true, 2276 .tpg_check_demo_mode_cache = tcm_vhost_check_true, 2277 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false, 2278 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false, 2279 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, 2280 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, 2281 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, 2282 .release_cmd = tcm_vhost_release_cmd, 2283 .check_stop_free = vhost_scsi_check_stop_free, 2284 .shutdown_session = tcm_vhost_shutdown_session, 2285 .close_session = tcm_vhost_close_session, 2286 .sess_get_index = tcm_vhost_sess_get_index, 2287 .sess_get_initiator_sid = NULL, 2288 .write_pending = tcm_vhost_write_pending, 2289 .write_pending_status = tcm_vhost_write_pending_status, 2290 .set_default_node_attributes = tcm_vhost_set_default_node_attrs, 2291 .get_task_tag = tcm_vhost_get_task_tag, 2292 .get_cmd_state = tcm_vhost_get_cmd_state, 2293 .queue_data_in = tcm_vhost_queue_data_in, 2294 .queue_status = tcm_vhost_queue_status, 2295 .queue_tm_rsp = tcm_vhost_queue_tm_rsp, 2296 .aborted_task = tcm_vhost_aborted_task, 2297 /* 2298 * Setup callers for generic logic in target_core_fabric_configfs.c 2299 */ 2300 .fabric_make_wwn = tcm_vhost_make_tport, 2301 .fabric_drop_wwn = tcm_vhost_drop_tport, 2302 .fabric_make_tpg = tcm_vhost_make_tpg, 2303 .fabric_drop_tpg = tcm_vhost_drop_tpg, 2304 .fabric_post_link = tcm_vhost_port_link, 2305 .fabric_pre_unlink = tcm_vhost_port_unlink, 2306 .fabric_make_np = NULL, 2307 .fabric_drop_np = NULL, 2308 .fabric_make_nodeacl = tcm_vhost_make_nodeacl, 2309 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, 2310 }; 2311 2312 static int tcm_vhost_register_configfs(void) 2313 { 2314 struct target_fabric_configfs *fabric; 2315 int ret; 2316 2317 pr_debug("TCM_VHOST fabric module %s on %s/%s" 2318 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, 2319 utsname()->machine); 2320 /* 2321 * Register the top level struct config_item_type with TCM core 2322 */ 2323 fabric = target_fabric_configfs_init(THIS_MODULE, "vhost"); 2324 if (IS_ERR(fabric)) { 2325 pr_err("target_fabric_configfs_init() failed\n"); 2326 return PTR_ERR(fabric); 2327 } 2328 /* 2329 * Setup fabric->tf_ops from our local tcm_vhost_ops 2330 */ 2331 fabric->tf_ops = tcm_vhost_ops; 2332 /* 2333 * Setup default attribute lists for various fabric->tf_cit_tmpl 2334 */ 2335 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; 2336 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; 2337 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 2338 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2339 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2340 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2341 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2342 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2343 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2344 /* 2345 * Register the fabric for use within TCM 2346 */ 2347 ret = target_fabric_configfs_register(fabric); 2348 if (ret < 0) { 2349 pr_err("target_fabric_configfs_register() failed" 2350 " for TCM_VHOST\n"); 2351 return ret; 2352 } 2353 /* 2354 * Setup our local pointer to *fabric 2355 */ 2356 tcm_vhost_fabric_configfs = fabric; 2357 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); 2358 return 0; 2359 }; 2360 2361 static void tcm_vhost_deregister_configfs(void) 2362 { 2363 if (!tcm_vhost_fabric_configfs) 2364 return; 2365 2366 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); 2367 tcm_vhost_fabric_configfs = NULL; 2368 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); 2369 }; 2370 2371 static int __init tcm_vhost_init(void) 2372 { 2373 int ret = -ENOMEM; 2374 /* 2375 * Use our own dedicated workqueue for submitting I/O into 2376 * target core to avoid contention within system_wq. 2377 */ 2378 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); 2379 if (!tcm_vhost_workqueue) 2380 goto out; 2381 2382 ret = vhost_scsi_register(); 2383 if (ret < 0) 2384 goto out_destroy_workqueue; 2385 2386 ret = tcm_vhost_register_configfs(); 2387 if (ret < 0) 2388 goto out_vhost_scsi_deregister; 2389 2390 return 0; 2391 2392 out_vhost_scsi_deregister: 2393 vhost_scsi_deregister(); 2394 out_destroy_workqueue: 2395 destroy_workqueue(tcm_vhost_workqueue); 2396 out: 2397 return ret; 2398 }; 2399 2400 static void tcm_vhost_exit(void) 2401 { 2402 tcm_vhost_deregister_configfs(); 2403 vhost_scsi_deregister(); 2404 destroy_workqueue(tcm_vhost_workqueue); 2405 }; 2406 2407 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); 2408 MODULE_ALIAS("tcm_vhost"); 2409 MODULE_LICENSE("GPL"); 2410 module_init(tcm_vhost_init); 2411 module_exit(tcm_vhost_exit); 2412