1 /* 2 * Xen SCSI backend driver 3 * 4 * Copyright (c) 2008, FUJITSU Limited 5 * 6 * Based on the blkback driver code. 7 * Adaption to kernel taget core infrastructure taken from vhost/scsi.c 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation; or, when distributed 12 * separately from the Linux kernel or incorporated into other 13 * software packages, subject to the following license: 14 * 15 * Permission is hereby granted, free of charge, to any person obtaining a copy 16 * of this source file (the "Software"), to deal in the Software without 17 * restriction, including without limitation the rights to use, copy, modify, 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 * and to permit persons to whom the Software is furnished to do so, subject to 20 * the following conditions: 21 * 22 * The above copyright notice and this permission notice shall be included in 23 * all copies or substantial portions of the Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 * IN THE SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "xen-pvscsi: " fmt 35 36 #include <linux/module.h> 37 #include <linux/utsname.h> 38 #include <linux/interrupt.h> 39 #include <linux/slab.h> 40 #include <linux/wait.h> 41 #include <linux/sched.h> 42 #include <linux/list.h> 43 #include <linux/gfp.h> 44 #include <linux/delay.h> 45 #include <linux/spinlock.h> 46 #include <linux/configfs.h> 47 48 #include <generated/utsrelease.h> 49 50 #include <scsi/scsi_host.h> /* SG_ALL */ 51 52 #include <target/target_core_base.h> 53 #include <target/target_core_fabric.h> 54 55 #include <asm/hypervisor.h> 56 57 #include <xen/xen.h> 58 #include <xen/balloon.h> 59 #include <xen/events.h> 60 #include <xen/xenbus.h> 61 #include <xen/grant_table.h> 62 #include <xen/page.h> 63 64 #include <xen/interface/grant_table.h> 65 #include <xen/interface/io/vscsiif.h> 66 67 #define VSCSI_VERSION "v0.1" 68 #define VSCSI_NAMELEN 32 69 70 struct ids_tuple { 71 unsigned int hst; /* host */ 72 unsigned int chn; /* channel */ 73 unsigned int tgt; /* target */ 74 unsigned int lun; /* LUN */ 75 }; 76 77 struct v2p_entry { 78 struct ids_tuple v; /* translate from */ 79 struct scsiback_tpg *tpg; /* translate to */ 80 unsigned int lun; 81 struct kref kref; 82 struct list_head l; 83 }; 84 85 struct vscsibk_info { 86 struct xenbus_device *dev; 87 88 domid_t domid; 89 unsigned int irq; 90 91 struct vscsiif_back_ring ring; 92 93 spinlock_t ring_lock; 94 atomic_t nr_unreplied_reqs; 95 96 spinlock_t v2p_lock; 97 struct list_head v2p_entry_lists; 98 99 wait_queue_head_t waiting_to_free; 100 101 struct gnttab_page_cache free_pages; 102 }; 103 104 /* theoretical maximum of grants for one request */ 105 #define VSCSI_MAX_GRANTS (SG_ALL + VSCSIIF_SG_TABLESIZE) 106 107 /* 108 * VSCSI_GRANT_BATCH is the maximum number of grants to be processed in one 109 * call to map/unmap grants. Don't choose it too large, as there are arrays 110 * with VSCSI_GRANT_BATCH elements allocated on the stack. 111 */ 112 #define VSCSI_GRANT_BATCH 16 113 114 struct vscsibk_pend { 115 uint16_t rqid; 116 117 uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; 118 uint8_t cmd_len; 119 120 uint8_t sc_data_direction; 121 uint16_t n_sg; /* real length of SG list */ 122 uint16_t n_grants; /* SG pages and potentially SG list */ 123 uint32_t data_len; 124 uint32_t result; 125 126 struct vscsibk_info *info; 127 struct v2p_entry *v2p; 128 struct scatterlist *sgl; 129 130 uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; 131 132 grant_handle_t grant_handles[VSCSI_MAX_GRANTS]; 133 struct page *pages[VSCSI_MAX_GRANTS]; 134 135 struct se_cmd se_cmd; 136 137 struct completion tmr_done; 138 }; 139 140 #define VSCSI_DEFAULT_SESSION_TAGS 128 141 142 struct scsiback_nexus { 143 /* Pointer to TCM session for I_T Nexus */ 144 struct se_session *tvn_se_sess; 145 }; 146 147 struct scsiback_tport { 148 /* SCSI protocol the tport is providing */ 149 u8 tport_proto_id; 150 /* Binary World Wide unique Port Name for pvscsi Target port */ 151 u64 tport_wwpn; 152 /* ASCII formatted WWPN for pvscsi Target port */ 153 char tport_name[VSCSI_NAMELEN]; 154 /* Returned by scsiback_make_tport() */ 155 struct se_wwn tport_wwn; 156 }; 157 158 struct scsiback_tpg { 159 /* scsiback port target portal group tag for TCM */ 160 u16 tport_tpgt; 161 /* track number of TPG Port/Lun Links wrt explicit I_T Nexus shutdown */ 162 int tv_tpg_port_count; 163 /* xen-pvscsi references to tpg_nexus, protected by tv_tpg_mutex */ 164 int tv_tpg_fe_count; 165 /* list for scsiback_list */ 166 struct list_head tv_tpg_list; 167 /* Used to protect access for tpg_nexus */ 168 struct mutex tv_tpg_mutex; 169 /* Pointer to the TCM pvscsi I_T Nexus for this TPG endpoint */ 170 struct scsiback_nexus *tpg_nexus; 171 /* Pointer back to scsiback_tport */ 172 struct scsiback_tport *tport; 173 /* Returned by scsiback_make_tpg() */ 174 struct se_portal_group se_tpg; 175 /* alias used in xenstore */ 176 char param_alias[VSCSI_NAMELEN]; 177 /* list of info structures related to this target portal group */ 178 struct list_head info_list; 179 }; 180 181 #define SCSIBACK_INVALID_HANDLE (~0) 182 183 static bool log_print_stat; 184 module_param(log_print_stat, bool, 0644); 185 186 static int scsiback_max_buffer_pages = 1024; 187 module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644); 188 MODULE_PARM_DESC(max_buffer_pages, 189 "Maximum number of free pages to keep in backend buffer"); 190 191 /* Global spinlock to protect scsiback TPG list */ 192 static DEFINE_MUTEX(scsiback_mutex); 193 static LIST_HEAD(scsiback_list); 194 195 static void scsiback_get(struct vscsibk_info *info) 196 { 197 atomic_inc(&info->nr_unreplied_reqs); 198 } 199 200 static void scsiback_put(struct vscsibk_info *info) 201 { 202 if (atomic_dec_and_test(&info->nr_unreplied_reqs)) 203 wake_up(&info->waiting_to_free); 204 } 205 206 static unsigned long vaddr_page(struct page *page) 207 { 208 unsigned long pfn = page_to_pfn(page); 209 210 return (unsigned long)pfn_to_kaddr(pfn); 211 } 212 213 static unsigned long vaddr(struct vscsibk_pend *req, int seg) 214 { 215 return vaddr_page(req->pages[seg]); 216 } 217 218 static void scsiback_print_status(char *sense_buffer, int errors, 219 struct vscsibk_pend *pending_req) 220 { 221 struct scsiback_tpg *tpg = pending_req->v2p->tpg; 222 223 pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x\n", 224 tpg->tport->tport_name, pending_req->v2p->lun, 225 pending_req->cmnd[0], errors & 0xff, COMMAND_COMPLETE, 226 host_byte(errors)); 227 } 228 229 static void scsiback_fast_flush_area(struct vscsibk_pend *req) 230 { 231 struct gnttab_unmap_grant_ref unmap[VSCSI_GRANT_BATCH]; 232 struct page *pages[VSCSI_GRANT_BATCH]; 233 unsigned int i, invcount = 0; 234 grant_handle_t handle; 235 int err; 236 237 kfree(req->sgl); 238 req->sgl = NULL; 239 req->n_sg = 0; 240 241 if (!req->n_grants) 242 return; 243 244 for (i = 0; i < req->n_grants; i++) { 245 handle = req->grant_handles[i]; 246 if (handle == SCSIBACK_INVALID_HANDLE) 247 continue; 248 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), 249 GNTMAP_host_map, handle); 250 req->grant_handles[i] = SCSIBACK_INVALID_HANDLE; 251 pages[invcount] = req->pages[i]; 252 put_page(pages[invcount]); 253 invcount++; 254 if (invcount < VSCSI_GRANT_BATCH) 255 continue; 256 err = gnttab_unmap_refs(unmap, NULL, pages, invcount); 257 BUG_ON(err); 258 invcount = 0; 259 } 260 261 if (invcount) { 262 err = gnttab_unmap_refs(unmap, NULL, pages, invcount); 263 BUG_ON(err); 264 } 265 266 gnttab_page_cache_put(&req->info->free_pages, req->pages, 267 req->n_grants); 268 req->n_grants = 0; 269 } 270 271 static void scsiback_free_translation_entry(struct kref *kref) 272 { 273 struct v2p_entry *entry = container_of(kref, struct v2p_entry, kref); 274 struct scsiback_tpg *tpg = entry->tpg; 275 276 mutex_lock(&tpg->tv_tpg_mutex); 277 tpg->tv_tpg_fe_count--; 278 mutex_unlock(&tpg->tv_tpg_mutex); 279 280 kfree(entry); 281 } 282 283 static void scsiback_send_response(struct vscsibk_info *info, 284 char *sense_buffer, int32_t result, uint32_t resid, 285 uint16_t rqid) 286 { 287 struct vscsiif_response *ring_res; 288 int notify; 289 struct scsi_sense_hdr sshdr; 290 unsigned long flags; 291 unsigned len; 292 293 spin_lock_irqsave(&info->ring_lock, flags); 294 295 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt); 296 info->ring.rsp_prod_pvt++; 297 298 ring_res->rslt = result; 299 ring_res->rqid = rqid; 300 301 if (sense_buffer != NULL && 302 scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE, 303 &sshdr)) { 304 len = min_t(unsigned, 8 + sense_buffer[7], 305 VSCSIIF_SENSE_BUFFERSIZE); 306 memcpy(ring_res->sense_buffer, sense_buffer, len); 307 ring_res->sense_len = len; 308 } else { 309 ring_res->sense_len = 0; 310 } 311 312 ring_res->residual_len = resid; 313 314 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify); 315 spin_unlock_irqrestore(&info->ring_lock, flags); 316 317 if (notify) 318 notify_remote_via_irq(info->irq); 319 } 320 321 static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result, 322 uint32_t resid, struct vscsibk_pend *pending_req) 323 { 324 scsiback_send_response(pending_req->info, sense_buffer, result, 325 resid, pending_req->rqid); 326 327 if (pending_req->v2p) 328 kref_put(&pending_req->v2p->kref, 329 scsiback_free_translation_entry); 330 } 331 332 static void scsiback_cmd_done(struct vscsibk_pend *pending_req) 333 { 334 struct vscsibk_info *info = pending_req->info; 335 unsigned char *sense_buffer; 336 unsigned int resid; 337 int errors; 338 339 sense_buffer = pending_req->sense_buffer; 340 resid = pending_req->se_cmd.residual_count; 341 errors = pending_req->result; 342 343 if (errors && log_print_stat) 344 scsiback_print_status(sense_buffer, errors, pending_req); 345 346 scsiback_fast_flush_area(pending_req); 347 scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req); 348 scsiback_put(info); 349 /* 350 * Drop the extra KREF_ACK reference taken by target_submit_cmd_map_sgls() 351 * ahead of scsiback_check_stop_free() -> transport_generic_free_cmd() 352 * final se_cmd->cmd_kref put. 353 */ 354 target_put_sess_cmd(&pending_req->se_cmd); 355 } 356 357 static void scsiback_cmd_exec(struct vscsibk_pend *pending_req) 358 { 359 struct se_cmd *se_cmd = &pending_req->se_cmd; 360 struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess; 361 362 scsiback_get(pending_req->info); 363 se_cmd->tag = pending_req->rqid; 364 target_init_cmd(se_cmd, sess, pending_req->sense_buffer, 365 pending_req->v2p->lun, pending_req->data_len, 0, 366 pending_req->sc_data_direction, TARGET_SCF_ACK_KREF); 367 368 if (target_submit_prep(se_cmd, pending_req->cmnd, pending_req->sgl, 369 pending_req->n_sg, NULL, 0, NULL, 0, GFP_KERNEL)) 370 return; 371 372 target_submit(se_cmd); 373 } 374 375 static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map, 376 struct page **pg, grant_handle_t *grant, int cnt) 377 { 378 int err, i; 379 380 if (!cnt) 381 return 0; 382 383 err = gnttab_map_refs(map, NULL, pg, cnt); 384 for (i = 0; i < cnt; i++) { 385 if (unlikely(map[i].status != GNTST_okay)) { 386 pr_err("invalid buffer -- could not remap it\n"); 387 map[i].handle = SCSIBACK_INVALID_HANDLE; 388 if (!err) 389 err = -ENOMEM; 390 } else { 391 get_page(pg[i]); 392 } 393 grant[i] = map[i].handle; 394 } 395 return err; 396 } 397 398 static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req, 399 struct scsiif_request_segment *seg, struct page **pg, 400 grant_handle_t *grant, int cnt, u32 flags) 401 { 402 int mapcount = 0, i, err = 0; 403 struct gnttab_map_grant_ref map[VSCSI_GRANT_BATCH]; 404 struct vscsibk_info *info = pending_req->info; 405 406 for (i = 0; i < cnt; i++) { 407 if (gnttab_page_cache_get(&info->free_pages, pg + mapcount)) { 408 gnttab_page_cache_put(&info->free_pages, pg, mapcount); 409 pr_err("no grant page\n"); 410 return -ENOMEM; 411 } 412 gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]), 413 flags, seg[i].gref, info->domid); 414 mapcount++; 415 if (mapcount < VSCSI_GRANT_BATCH) 416 continue; 417 err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount); 418 pg += mapcount; 419 grant += mapcount; 420 pending_req->n_grants += mapcount; 421 if (err) 422 return err; 423 mapcount = 0; 424 } 425 err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount); 426 pending_req->n_grants += mapcount; 427 return err; 428 } 429 430 static int scsiback_gnttab_data_map(struct vscsiif_request *ring_req, 431 struct vscsibk_pend *pending_req) 432 { 433 u32 flags; 434 int i, err, n_segs, i_seg = 0; 435 struct page **pg; 436 struct scsiif_request_segment *seg; 437 unsigned long end_seg = 0; 438 unsigned int nr_segments = (unsigned int)ring_req->nr_segments; 439 unsigned int nr_sgl = 0; 440 struct scatterlist *sg; 441 grant_handle_t *grant; 442 443 pending_req->n_sg = 0; 444 pending_req->n_grants = 0; 445 pending_req->data_len = 0; 446 447 nr_segments &= ~VSCSIIF_SG_GRANT; 448 if (!nr_segments) 449 return 0; 450 451 if (nr_segments > VSCSIIF_SG_TABLESIZE) { 452 pr_debug("invalid parameter nr_seg = %d\n", 453 ring_req->nr_segments); 454 return -EINVAL; 455 } 456 457 if (ring_req->nr_segments & VSCSIIF_SG_GRANT) { 458 err = scsiback_gnttab_data_map_list(pending_req, ring_req->seg, 459 pending_req->pages, pending_req->grant_handles, 460 nr_segments, GNTMAP_host_map | GNTMAP_readonly); 461 if (err) 462 return err; 463 nr_sgl = nr_segments; 464 nr_segments = 0; 465 for (i = 0; i < nr_sgl; i++) { 466 n_segs = ring_req->seg[i].length / 467 sizeof(struct scsiif_request_segment); 468 if ((unsigned)ring_req->seg[i].offset + 469 (unsigned)ring_req->seg[i].length > PAGE_SIZE || 470 n_segs * sizeof(struct scsiif_request_segment) != 471 ring_req->seg[i].length) 472 return -EINVAL; 473 nr_segments += n_segs; 474 } 475 if (nr_segments > SG_ALL) { 476 pr_debug("invalid nr_seg = %d\n", nr_segments); 477 return -EINVAL; 478 } 479 } 480 481 /* free of (sgl) in fast_flush_area() */ 482 pending_req->sgl = kmalloc_array(nr_segments, 483 sizeof(struct scatterlist), GFP_KERNEL); 484 if (!pending_req->sgl) 485 return -ENOMEM; 486 487 sg_init_table(pending_req->sgl, nr_segments); 488 pending_req->n_sg = nr_segments; 489 490 flags = GNTMAP_host_map; 491 if (pending_req->sc_data_direction == DMA_TO_DEVICE) 492 flags |= GNTMAP_readonly; 493 494 pg = pending_req->pages + nr_sgl; 495 grant = pending_req->grant_handles + nr_sgl; 496 if (!nr_sgl) { 497 seg = ring_req->seg; 498 err = scsiback_gnttab_data_map_list(pending_req, seg, 499 pg, grant, nr_segments, flags); 500 if (err) 501 return err; 502 } else { 503 for (i = 0; i < nr_sgl; i++) { 504 seg = (struct scsiif_request_segment *)( 505 vaddr(pending_req, i) + ring_req->seg[i].offset); 506 n_segs = ring_req->seg[i].length / 507 sizeof(struct scsiif_request_segment); 508 err = scsiback_gnttab_data_map_list(pending_req, seg, 509 pg, grant, n_segs, flags); 510 if (err) 511 return err; 512 pg += n_segs; 513 grant += n_segs; 514 } 515 end_seg = vaddr(pending_req, 0) + ring_req->seg[0].offset; 516 seg = (struct scsiif_request_segment *)end_seg; 517 end_seg += ring_req->seg[0].length; 518 pg = pending_req->pages + nr_sgl; 519 } 520 521 for_each_sg(pending_req->sgl, sg, nr_segments, i) { 522 sg_set_page(sg, pg[i], seg->length, seg->offset); 523 pending_req->data_len += seg->length; 524 seg++; 525 if (nr_sgl && (unsigned long)seg >= end_seg) { 526 i_seg++; 527 end_seg = vaddr(pending_req, i_seg) + 528 ring_req->seg[i_seg].offset; 529 seg = (struct scsiif_request_segment *)end_seg; 530 end_seg += ring_req->seg[i_seg].length; 531 } 532 if (sg->offset >= PAGE_SIZE || 533 sg->length > PAGE_SIZE || 534 sg->offset + sg->length > PAGE_SIZE) 535 return -EINVAL; 536 } 537 538 return 0; 539 } 540 541 static void scsiback_disconnect(struct vscsibk_info *info) 542 { 543 wait_event(info->waiting_to_free, 544 atomic_read(&info->nr_unreplied_reqs) == 0); 545 546 unbind_from_irqhandler(info->irq, info); 547 info->irq = 0; 548 xenbus_unmap_ring_vfree(info->dev, info->ring.sring); 549 } 550 551 static void scsiback_device_action(struct vscsibk_pend *pending_req, 552 enum tcm_tmreq_table act, int tag) 553 { 554 struct scsiback_tpg *tpg = pending_req->v2p->tpg; 555 struct scsiback_nexus *nexus = tpg->tpg_nexus; 556 struct se_cmd *se_cmd = &pending_req->se_cmd; 557 u64 unpacked_lun = pending_req->v2p->lun; 558 int rc, err = FAILED; 559 560 init_completion(&pending_req->tmr_done); 561 562 rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess, 563 &pending_req->sense_buffer[0], 564 unpacked_lun, NULL, act, GFP_KERNEL, 565 tag, TARGET_SCF_ACK_KREF); 566 if (rc) 567 goto err; 568 569 wait_for_completion(&pending_req->tmr_done); 570 571 err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? 572 SUCCESS : FAILED; 573 574 scsiback_do_resp_with_sense(NULL, err, 0, pending_req); 575 transport_generic_free_cmd(&pending_req->se_cmd, 0); 576 return; 577 578 err: 579 scsiback_do_resp_with_sense(NULL, err, 0, pending_req); 580 } 581 582 /* 583 Perform virtual to physical translation 584 */ 585 static struct v2p_entry *scsiback_do_translation(struct vscsibk_info *info, 586 struct ids_tuple *v) 587 { 588 struct v2p_entry *entry; 589 struct list_head *head = &(info->v2p_entry_lists); 590 unsigned long flags; 591 592 spin_lock_irqsave(&info->v2p_lock, flags); 593 list_for_each_entry(entry, head, l) { 594 if ((entry->v.chn == v->chn) && 595 (entry->v.tgt == v->tgt) && 596 (entry->v.lun == v->lun)) { 597 kref_get(&entry->kref); 598 goto out; 599 } 600 } 601 entry = NULL; 602 603 out: 604 spin_unlock_irqrestore(&info->v2p_lock, flags); 605 return entry; 606 } 607 608 static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring, 609 struct v2p_entry *v2p) 610 { 611 struct scsiback_tpg *tpg = v2p->tpg; 612 struct scsiback_nexus *nexus = tpg->tpg_nexus; 613 struct se_session *se_sess = nexus->tvn_se_sess; 614 struct vscsibk_pend *req; 615 int tag, cpu, i; 616 617 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); 618 if (tag < 0) { 619 pr_err("Unable to obtain tag for vscsiif_request\n"); 620 return ERR_PTR(-ENOMEM); 621 } 622 623 req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag]; 624 memset(req, 0, sizeof(*req)); 625 req->se_cmd.map_tag = tag; 626 req->se_cmd.map_cpu = cpu; 627 628 for (i = 0; i < VSCSI_MAX_GRANTS; i++) 629 req->grant_handles[i] = SCSIBACK_INVALID_HANDLE; 630 631 return req; 632 } 633 634 static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info, 635 struct vscsiif_back_ring *ring, 636 struct vscsiif_request *ring_req) 637 { 638 struct vscsibk_pend *pending_req; 639 struct v2p_entry *v2p; 640 struct ids_tuple vir; 641 642 /* request range check from frontend */ 643 if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) && 644 (ring_req->sc_data_direction != DMA_TO_DEVICE) && 645 (ring_req->sc_data_direction != DMA_FROM_DEVICE) && 646 (ring_req->sc_data_direction != DMA_NONE)) { 647 pr_debug("invalid parameter data_dir = %d\n", 648 ring_req->sc_data_direction); 649 return ERR_PTR(-EINVAL); 650 } 651 if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) { 652 pr_debug("invalid parameter cmd_len = %d\n", 653 ring_req->cmd_len); 654 return ERR_PTR(-EINVAL); 655 } 656 657 vir.chn = ring_req->channel; 658 vir.tgt = ring_req->id; 659 vir.lun = ring_req->lun; 660 661 v2p = scsiback_do_translation(info, &vir); 662 if (!v2p) { 663 pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n", 664 vir.chn, vir.tgt, vir.lun); 665 return ERR_PTR(-ENODEV); 666 } 667 668 pending_req = scsiback_get_pend_req(ring, v2p); 669 if (IS_ERR(pending_req)) { 670 kref_put(&v2p->kref, scsiback_free_translation_entry); 671 return ERR_PTR(-ENOMEM); 672 } 673 pending_req->rqid = ring_req->rqid; 674 pending_req->info = info; 675 pending_req->v2p = v2p; 676 pending_req->sc_data_direction = ring_req->sc_data_direction; 677 pending_req->cmd_len = ring_req->cmd_len; 678 memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len); 679 680 return pending_req; 681 } 682 683 static int scsiback_do_cmd_fn(struct vscsibk_info *info, 684 unsigned int *eoi_flags) 685 { 686 struct vscsiif_back_ring *ring = &info->ring; 687 struct vscsiif_request ring_req; 688 struct vscsibk_pend *pending_req; 689 RING_IDX rc, rp; 690 int more_to_do; 691 uint32_t result; 692 693 rc = ring->req_cons; 694 rp = ring->sring->req_prod; 695 rmb(); /* guest system is accessing ring, too */ 696 697 if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) { 698 rc = ring->rsp_prod_pvt; 699 pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n", 700 info->domid, rp, rc, rp - rc); 701 return -EINVAL; 702 } 703 704 while ((rc != rp)) { 705 *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS; 706 707 if (RING_REQUEST_CONS_OVERFLOW(ring, rc)) 708 break; 709 710 RING_COPY_REQUEST(ring, rc, &ring_req); 711 ring->req_cons = ++rc; 712 713 pending_req = prepare_pending_reqs(info, ring, &ring_req); 714 if (IS_ERR(pending_req)) { 715 switch (PTR_ERR(pending_req)) { 716 case -ENODEV: 717 result = DID_NO_CONNECT; 718 break; 719 default: 720 result = DID_ERROR; 721 break; 722 } 723 scsiback_send_response(info, NULL, result << 16, 0, 724 ring_req.rqid); 725 return 1; 726 } 727 728 switch (ring_req.act) { 729 case VSCSIIF_ACT_SCSI_CDB: 730 if (scsiback_gnttab_data_map(&ring_req, pending_req)) { 731 scsiback_fast_flush_area(pending_req); 732 scsiback_do_resp_with_sense(NULL, 733 DID_ERROR << 16, 0, pending_req); 734 transport_generic_free_cmd(&pending_req->se_cmd, 0); 735 } else { 736 scsiback_cmd_exec(pending_req); 737 } 738 break; 739 case VSCSIIF_ACT_SCSI_ABORT: 740 scsiback_device_action(pending_req, TMR_ABORT_TASK, 741 ring_req.ref_rqid); 742 break; 743 case VSCSIIF_ACT_SCSI_RESET: 744 scsiback_device_action(pending_req, TMR_LUN_RESET, 0); 745 break; 746 default: 747 pr_err_ratelimited("invalid request\n"); 748 scsiback_do_resp_with_sense(NULL, DID_ERROR << 16, 0, 749 pending_req); 750 transport_generic_free_cmd(&pending_req->se_cmd, 0); 751 break; 752 } 753 754 /* Yield point for this unbounded loop. */ 755 cond_resched(); 756 } 757 758 gnttab_page_cache_shrink(&info->free_pages, scsiback_max_buffer_pages); 759 760 RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do); 761 return more_to_do; 762 } 763 764 static irqreturn_t scsiback_irq_fn(int irq, void *dev_id) 765 { 766 struct vscsibk_info *info = dev_id; 767 int rc; 768 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; 769 770 while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0) 771 cond_resched(); 772 773 /* In case of a ring error we keep the event channel masked. */ 774 if (!rc) 775 xen_irq_lateeoi(irq, eoi_flags); 776 777 return IRQ_HANDLED; 778 } 779 780 static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref, 781 evtchn_port_t evtchn) 782 { 783 void *area; 784 struct vscsiif_sring *sring; 785 int err; 786 787 if (info->irq) 788 return -1; 789 790 err = xenbus_map_ring_valloc(info->dev, &ring_ref, 1, &area); 791 if (err) 792 return err; 793 794 sring = (struct vscsiif_sring *)area; 795 BACK_RING_INIT(&info->ring, sring, PAGE_SIZE); 796 797 err = bind_interdomain_evtchn_to_irq_lateeoi(info->dev, evtchn); 798 if (err < 0) 799 goto unmap_page; 800 801 info->irq = err; 802 803 err = request_threaded_irq(info->irq, NULL, scsiback_irq_fn, 804 IRQF_ONESHOT, "vscsiif-backend", info); 805 if (err) 806 goto free_irq; 807 808 return 0; 809 810 free_irq: 811 unbind_from_irqhandler(info->irq, info); 812 info->irq = 0; 813 unmap_page: 814 xenbus_unmap_ring_vfree(info->dev, area); 815 816 return err; 817 } 818 819 static int scsiback_map(struct vscsibk_info *info) 820 { 821 struct xenbus_device *dev = info->dev; 822 unsigned int ring_ref; 823 evtchn_port_t evtchn; 824 int err; 825 826 err = xenbus_gather(XBT_NIL, dev->otherend, 827 "ring-ref", "%u", &ring_ref, 828 "event-channel", "%u", &evtchn, NULL); 829 if (err) { 830 xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend); 831 return err; 832 } 833 834 return scsiback_init_sring(info, ring_ref, evtchn); 835 } 836 837 /* 838 Check for a translation entry being present 839 */ 840 static struct v2p_entry *scsiback_chk_translation_entry( 841 struct vscsibk_info *info, struct ids_tuple *v) 842 { 843 struct list_head *head = &(info->v2p_entry_lists); 844 struct v2p_entry *entry; 845 846 list_for_each_entry(entry, head, l) 847 if ((entry->v.chn == v->chn) && 848 (entry->v.tgt == v->tgt) && 849 (entry->v.lun == v->lun)) 850 return entry; 851 852 return NULL; 853 } 854 855 /* 856 Add a new translation entry 857 */ 858 static int scsiback_add_translation_entry(struct vscsibk_info *info, 859 char *phy, struct ids_tuple *v) 860 { 861 int err = 0; 862 struct v2p_entry *new; 863 unsigned long flags; 864 char *lunp; 865 unsigned long long unpacked_lun; 866 struct se_lun *se_lun; 867 struct scsiback_tpg *tpg_entry, *tpg = NULL; 868 char *error = "doesn't exist"; 869 870 lunp = strrchr(phy, ':'); 871 if (!lunp) { 872 pr_err("illegal format of physical device %s\n", phy); 873 return -EINVAL; 874 } 875 *lunp = 0; 876 lunp++; 877 err = kstrtoull(lunp, 10, &unpacked_lun); 878 if (err < 0) { 879 pr_err("lun number not valid: %s\n", lunp); 880 return err; 881 } 882 883 mutex_lock(&scsiback_mutex); 884 list_for_each_entry(tpg_entry, &scsiback_list, tv_tpg_list) { 885 if (!strcmp(phy, tpg_entry->tport->tport_name) || 886 !strcmp(phy, tpg_entry->param_alias)) { 887 mutex_lock(&tpg_entry->se_tpg.tpg_lun_mutex); 888 hlist_for_each_entry(se_lun, &tpg_entry->se_tpg.tpg_lun_hlist, link) { 889 if (se_lun->unpacked_lun == unpacked_lun) { 890 if (!tpg_entry->tpg_nexus) 891 error = "nexus undefined"; 892 else 893 tpg = tpg_entry; 894 break; 895 } 896 } 897 mutex_unlock(&tpg_entry->se_tpg.tpg_lun_mutex); 898 break; 899 } 900 } 901 if (tpg) { 902 mutex_lock(&tpg->tv_tpg_mutex); 903 tpg->tv_tpg_fe_count++; 904 mutex_unlock(&tpg->tv_tpg_mutex); 905 } 906 mutex_unlock(&scsiback_mutex); 907 908 if (!tpg) { 909 pr_err("%s:%llu %s\n", phy, unpacked_lun, error); 910 return -ENODEV; 911 } 912 913 new = kmalloc(sizeof(struct v2p_entry), GFP_KERNEL); 914 if (new == NULL) { 915 err = -ENOMEM; 916 goto out_free; 917 } 918 919 spin_lock_irqsave(&info->v2p_lock, flags); 920 921 /* Check double assignment to identical virtual ID */ 922 if (scsiback_chk_translation_entry(info, v)) { 923 pr_warn("Virtual ID is already used. Assignment was not performed.\n"); 924 err = -EEXIST; 925 goto out; 926 } 927 928 /* Create a new translation entry and add to the list */ 929 kref_init(&new->kref); 930 new->v = *v; 931 new->tpg = tpg; 932 new->lun = unpacked_lun; 933 list_add_tail(&new->l, &info->v2p_entry_lists); 934 935 out: 936 spin_unlock_irqrestore(&info->v2p_lock, flags); 937 938 out_free: 939 if (err) { 940 mutex_lock(&tpg->tv_tpg_mutex); 941 tpg->tv_tpg_fe_count--; 942 mutex_unlock(&tpg->tv_tpg_mutex); 943 kfree(new); 944 } 945 946 return err; 947 } 948 949 static void __scsiback_del_translation_entry(struct v2p_entry *entry) 950 { 951 list_del(&entry->l); 952 kref_put(&entry->kref, scsiback_free_translation_entry); 953 } 954 955 /* 956 Delete the translation entry specified 957 */ 958 static int scsiback_del_translation_entry(struct vscsibk_info *info, 959 struct ids_tuple *v) 960 { 961 struct v2p_entry *entry; 962 unsigned long flags; 963 int ret = 0; 964 965 spin_lock_irqsave(&info->v2p_lock, flags); 966 /* Find out the translation entry specified */ 967 entry = scsiback_chk_translation_entry(info, v); 968 if (entry) 969 __scsiback_del_translation_entry(entry); 970 else 971 ret = -ENOENT; 972 973 spin_unlock_irqrestore(&info->v2p_lock, flags); 974 return ret; 975 } 976 977 static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, 978 char *phy, struct ids_tuple *vir, int try) 979 { 980 struct v2p_entry *entry; 981 unsigned long flags; 982 int err; 983 984 if (try) { 985 spin_lock_irqsave(&info->v2p_lock, flags); 986 entry = scsiback_chk_translation_entry(info, vir); 987 spin_unlock_irqrestore(&info->v2p_lock, flags); 988 if (entry) 989 return; 990 } 991 if (!scsiback_add_translation_entry(info, phy, vir)) { 992 if (xenbus_printf(XBT_NIL, info->dev->nodename, state, 993 "%d", XenbusStateInitialised)) { 994 pr_err("xenbus_printf error %s\n", state); 995 scsiback_del_translation_entry(info, vir); 996 } 997 } else if (!try) { 998 err = xenbus_printf(XBT_NIL, info->dev->nodename, state, 999 "%d", XenbusStateClosed); 1000 if (err) 1001 xenbus_dev_error(info->dev, err, 1002 "%s: writing %s", __func__, state); 1003 } 1004 } 1005 1006 static void scsiback_do_del_lun(struct vscsibk_info *info, const char *state, 1007 struct ids_tuple *vir) 1008 { 1009 if (!scsiback_del_translation_entry(info, vir)) { 1010 if (xenbus_printf(XBT_NIL, info->dev->nodename, state, 1011 "%d", XenbusStateClosed)) 1012 pr_err("xenbus_printf error %s\n", state); 1013 } 1014 } 1015 1016 #define VSCSIBACK_OP_ADD_OR_DEL_LUN 1 1017 #define VSCSIBACK_OP_UPDATEDEV_STATE 2 1018 1019 static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op, 1020 char *ent) 1021 { 1022 int err; 1023 struct ids_tuple vir; 1024 char *val; 1025 int device_state; 1026 char phy[VSCSI_NAMELEN]; 1027 char str[64]; 1028 char state[64]; 1029 struct xenbus_device *dev = info->dev; 1030 1031 /* read status */ 1032 snprintf(state, sizeof(state), "vscsi-devs/%s/state", ent); 1033 err = xenbus_scanf(XBT_NIL, dev->nodename, state, "%u", &device_state); 1034 if (XENBUS_EXIST_ERR(err)) 1035 return; 1036 1037 /* physical SCSI device */ 1038 snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent); 1039 val = xenbus_read(XBT_NIL, dev->nodename, str, NULL); 1040 if (IS_ERR(val)) { 1041 err = xenbus_printf(XBT_NIL, dev->nodename, state, 1042 "%d", XenbusStateClosed); 1043 if (err) 1044 xenbus_dev_error(info->dev, err, 1045 "%s: writing %s", __func__, state); 1046 return; 1047 } 1048 strlcpy(phy, val, VSCSI_NAMELEN); 1049 kfree(val); 1050 1051 /* virtual SCSI device */ 1052 snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", ent); 1053 err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u", 1054 &vir.hst, &vir.chn, &vir.tgt, &vir.lun); 1055 if (XENBUS_EXIST_ERR(err)) { 1056 err = xenbus_printf(XBT_NIL, dev->nodename, state, 1057 "%d", XenbusStateClosed); 1058 if (err) 1059 xenbus_dev_error(info->dev, err, 1060 "%s: writing %s", __func__, state); 1061 return; 1062 } 1063 1064 switch (op) { 1065 case VSCSIBACK_OP_ADD_OR_DEL_LUN: 1066 switch (device_state) { 1067 case XenbusStateInitialising: 1068 scsiback_do_add_lun(info, state, phy, &vir, 0); 1069 break; 1070 case XenbusStateConnected: 1071 scsiback_do_add_lun(info, state, phy, &vir, 1); 1072 break; 1073 case XenbusStateClosing: 1074 scsiback_do_del_lun(info, state, &vir); 1075 break; 1076 default: 1077 break; 1078 } 1079 break; 1080 1081 case VSCSIBACK_OP_UPDATEDEV_STATE: 1082 if (device_state == XenbusStateInitialised) { 1083 /* modify vscsi-devs/dev-x/state */ 1084 if (xenbus_printf(XBT_NIL, dev->nodename, state, 1085 "%d", XenbusStateConnected)) { 1086 pr_err("xenbus_printf error %s\n", str); 1087 scsiback_del_translation_entry(info, &vir); 1088 xenbus_printf(XBT_NIL, dev->nodename, state, 1089 "%d", XenbusStateClosed); 1090 } 1091 } 1092 break; 1093 /* When it is necessary, processing is added here. */ 1094 default: 1095 break; 1096 } 1097 } 1098 1099 static void scsiback_do_lun_hotplug(struct vscsibk_info *info, int op) 1100 { 1101 int i; 1102 char **dir; 1103 unsigned int ndir = 0; 1104 1105 dir = xenbus_directory(XBT_NIL, info->dev->nodename, "vscsi-devs", 1106 &ndir); 1107 if (IS_ERR(dir)) 1108 return; 1109 1110 for (i = 0; i < ndir; i++) 1111 scsiback_do_1lun_hotplug(info, op, dir[i]); 1112 1113 kfree(dir); 1114 } 1115 1116 static void scsiback_frontend_changed(struct xenbus_device *dev, 1117 enum xenbus_state frontend_state) 1118 { 1119 struct vscsibk_info *info = dev_get_drvdata(&dev->dev); 1120 1121 switch (frontend_state) { 1122 case XenbusStateInitialising: 1123 break; 1124 1125 case XenbusStateInitialised: 1126 if (scsiback_map(info)) 1127 break; 1128 1129 scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN); 1130 xenbus_switch_state(dev, XenbusStateConnected); 1131 break; 1132 1133 case XenbusStateConnected: 1134 scsiback_do_lun_hotplug(info, VSCSIBACK_OP_UPDATEDEV_STATE); 1135 1136 if (dev->state == XenbusStateConnected) 1137 break; 1138 1139 xenbus_switch_state(dev, XenbusStateConnected); 1140 break; 1141 1142 case XenbusStateClosing: 1143 if (info->irq) 1144 scsiback_disconnect(info); 1145 1146 xenbus_switch_state(dev, XenbusStateClosing); 1147 break; 1148 1149 case XenbusStateClosed: 1150 xenbus_switch_state(dev, XenbusStateClosed); 1151 if (xenbus_dev_is_online(dev)) 1152 break; 1153 fallthrough; /* if not online */ 1154 case XenbusStateUnknown: 1155 device_unregister(&dev->dev); 1156 break; 1157 1158 case XenbusStateReconfiguring: 1159 scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN); 1160 xenbus_switch_state(dev, XenbusStateReconfigured); 1161 1162 break; 1163 1164 default: 1165 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", 1166 frontend_state); 1167 break; 1168 } 1169 } 1170 1171 /* 1172 Release the translation entry specfied 1173 */ 1174 static void scsiback_release_translation_entry(struct vscsibk_info *info) 1175 { 1176 struct v2p_entry *entry, *tmp; 1177 struct list_head *head = &(info->v2p_entry_lists); 1178 unsigned long flags; 1179 1180 spin_lock_irqsave(&info->v2p_lock, flags); 1181 1182 list_for_each_entry_safe(entry, tmp, head, l) 1183 __scsiback_del_translation_entry(entry); 1184 1185 spin_unlock_irqrestore(&info->v2p_lock, flags); 1186 } 1187 1188 static int scsiback_remove(struct xenbus_device *dev) 1189 { 1190 struct vscsibk_info *info = dev_get_drvdata(&dev->dev); 1191 1192 if (info->irq) 1193 scsiback_disconnect(info); 1194 1195 scsiback_release_translation_entry(info); 1196 1197 gnttab_page_cache_shrink(&info->free_pages, 0); 1198 1199 dev_set_drvdata(&dev->dev, NULL); 1200 1201 return 0; 1202 } 1203 1204 static int scsiback_probe(struct xenbus_device *dev, 1205 const struct xenbus_device_id *id) 1206 { 1207 int err; 1208 1209 struct vscsibk_info *info = kzalloc(sizeof(struct vscsibk_info), 1210 GFP_KERNEL); 1211 1212 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); 1213 1214 if (!info) { 1215 xenbus_dev_fatal(dev, -ENOMEM, "allocating backend structure"); 1216 return -ENOMEM; 1217 } 1218 info->dev = dev; 1219 dev_set_drvdata(&dev->dev, info); 1220 1221 info->domid = dev->otherend_id; 1222 spin_lock_init(&info->ring_lock); 1223 atomic_set(&info->nr_unreplied_reqs, 0); 1224 init_waitqueue_head(&info->waiting_to_free); 1225 info->dev = dev; 1226 info->irq = 0; 1227 INIT_LIST_HEAD(&info->v2p_entry_lists); 1228 spin_lock_init(&info->v2p_lock); 1229 gnttab_page_cache_init(&info->free_pages); 1230 1231 err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u", 1232 SG_ALL); 1233 if (err) 1234 xenbus_dev_error(dev, err, "writing feature-sg-grant"); 1235 1236 err = xenbus_switch_state(dev, XenbusStateInitWait); 1237 if (err) 1238 goto fail; 1239 1240 return 0; 1241 1242 fail: 1243 pr_warn("%s failed\n", __func__); 1244 scsiback_remove(dev); 1245 1246 return err; 1247 } 1248 1249 static char *scsiback_dump_proto_id(struct scsiback_tport *tport) 1250 { 1251 switch (tport->tport_proto_id) { 1252 case SCSI_PROTOCOL_SAS: 1253 return "SAS"; 1254 case SCSI_PROTOCOL_FCP: 1255 return "FCP"; 1256 case SCSI_PROTOCOL_ISCSI: 1257 return "iSCSI"; 1258 default: 1259 break; 1260 } 1261 1262 return "Unknown"; 1263 } 1264 1265 static char *scsiback_get_fabric_wwn(struct se_portal_group *se_tpg) 1266 { 1267 struct scsiback_tpg *tpg = container_of(se_tpg, 1268 struct scsiback_tpg, se_tpg); 1269 struct scsiback_tport *tport = tpg->tport; 1270 1271 return &tport->tport_name[0]; 1272 } 1273 1274 static u16 scsiback_get_tag(struct se_portal_group *se_tpg) 1275 { 1276 struct scsiback_tpg *tpg = container_of(se_tpg, 1277 struct scsiback_tpg, se_tpg); 1278 return tpg->tport_tpgt; 1279 } 1280 1281 static struct se_wwn * 1282 scsiback_make_tport(struct target_fabric_configfs *tf, 1283 struct config_group *group, 1284 const char *name) 1285 { 1286 struct scsiback_tport *tport; 1287 char *ptr; 1288 u64 wwpn = 0; 1289 int off = 0; 1290 1291 tport = kzalloc(sizeof(struct scsiback_tport), GFP_KERNEL); 1292 if (!tport) 1293 return ERR_PTR(-ENOMEM); 1294 1295 tport->tport_wwpn = wwpn; 1296 /* 1297 * Determine the emulated Protocol Identifier and Target Port Name 1298 * based on the incoming configfs directory name. 1299 */ 1300 ptr = strstr(name, "naa."); 1301 if (ptr) { 1302 tport->tport_proto_id = SCSI_PROTOCOL_SAS; 1303 goto check_len; 1304 } 1305 ptr = strstr(name, "fc."); 1306 if (ptr) { 1307 tport->tport_proto_id = SCSI_PROTOCOL_FCP; 1308 off = 3; /* Skip over "fc." */ 1309 goto check_len; 1310 } 1311 ptr = strstr(name, "iqn."); 1312 if (ptr) { 1313 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; 1314 goto check_len; 1315 } 1316 1317 pr_err("Unable to locate prefix for emulated Target Port: %s\n", name); 1318 kfree(tport); 1319 return ERR_PTR(-EINVAL); 1320 1321 check_len: 1322 if (strlen(name) >= VSCSI_NAMELEN) { 1323 pr_err("Emulated %s Address: %s, exceeds max: %d\n", name, 1324 scsiback_dump_proto_id(tport), VSCSI_NAMELEN); 1325 kfree(tport); 1326 return ERR_PTR(-EINVAL); 1327 } 1328 snprintf(&tport->tport_name[0], VSCSI_NAMELEN, "%s", &name[off]); 1329 1330 pr_debug("Allocated emulated Target %s Address: %s\n", 1331 scsiback_dump_proto_id(tport), name); 1332 1333 return &tport->tport_wwn; 1334 } 1335 1336 static void scsiback_drop_tport(struct se_wwn *wwn) 1337 { 1338 struct scsiback_tport *tport = container_of(wwn, 1339 struct scsiback_tport, tport_wwn); 1340 1341 pr_debug("Deallocating emulated Target %s Address: %s\n", 1342 scsiback_dump_proto_id(tport), tport->tport_name); 1343 1344 kfree(tport); 1345 } 1346 1347 static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg) 1348 { 1349 return 1; 1350 } 1351 1352 static int scsiback_check_stop_free(struct se_cmd *se_cmd) 1353 { 1354 return transport_generic_free_cmd(se_cmd, 0); 1355 } 1356 1357 static void scsiback_release_cmd(struct se_cmd *se_cmd) 1358 { 1359 target_free_tag(se_cmd->se_sess, se_cmd); 1360 } 1361 1362 static u32 scsiback_sess_get_index(struct se_session *se_sess) 1363 { 1364 return 0; 1365 } 1366 1367 static int scsiback_write_pending(struct se_cmd *se_cmd) 1368 { 1369 /* Go ahead and process the write immediately */ 1370 target_execute_cmd(se_cmd); 1371 1372 return 0; 1373 } 1374 1375 static void scsiback_set_default_node_attrs(struct se_node_acl *nacl) 1376 { 1377 } 1378 1379 static int scsiback_get_cmd_state(struct se_cmd *se_cmd) 1380 { 1381 return 0; 1382 } 1383 1384 static int scsiback_queue_data_in(struct se_cmd *se_cmd) 1385 { 1386 struct vscsibk_pend *pending_req = container_of(se_cmd, 1387 struct vscsibk_pend, se_cmd); 1388 1389 pending_req->result = SAM_STAT_GOOD; 1390 scsiback_cmd_done(pending_req); 1391 return 0; 1392 } 1393 1394 static int scsiback_queue_status(struct se_cmd *se_cmd) 1395 { 1396 struct vscsibk_pend *pending_req = container_of(se_cmd, 1397 struct vscsibk_pend, se_cmd); 1398 1399 if (se_cmd->sense_buffer && 1400 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 1401 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) 1402 pending_req->result = SAM_STAT_CHECK_CONDITION; 1403 else 1404 pending_req->result = se_cmd->scsi_status; 1405 1406 scsiback_cmd_done(pending_req); 1407 return 0; 1408 } 1409 1410 static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd) 1411 { 1412 struct vscsibk_pend *pending_req = container_of(se_cmd, 1413 struct vscsibk_pend, se_cmd); 1414 1415 complete(&pending_req->tmr_done); 1416 } 1417 1418 static void scsiback_aborted_task(struct se_cmd *se_cmd) 1419 { 1420 } 1421 1422 static ssize_t scsiback_tpg_param_alias_show(struct config_item *item, 1423 char *page) 1424 { 1425 struct se_portal_group *se_tpg = param_to_tpg(item); 1426 struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg, 1427 se_tpg); 1428 ssize_t rb; 1429 1430 mutex_lock(&tpg->tv_tpg_mutex); 1431 rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias); 1432 mutex_unlock(&tpg->tv_tpg_mutex); 1433 1434 return rb; 1435 } 1436 1437 static ssize_t scsiback_tpg_param_alias_store(struct config_item *item, 1438 const char *page, size_t count) 1439 { 1440 struct se_portal_group *se_tpg = param_to_tpg(item); 1441 struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg, 1442 se_tpg); 1443 int len; 1444 1445 if (strlen(page) >= VSCSI_NAMELEN) { 1446 pr_err("param alias: %s, exceeds max: %d\n", page, 1447 VSCSI_NAMELEN); 1448 return -EINVAL; 1449 } 1450 1451 mutex_lock(&tpg->tv_tpg_mutex); 1452 len = snprintf(tpg->param_alias, VSCSI_NAMELEN, "%s", page); 1453 if (tpg->param_alias[len - 1] == '\n') 1454 tpg->param_alias[len - 1] = '\0'; 1455 mutex_unlock(&tpg->tv_tpg_mutex); 1456 1457 return count; 1458 } 1459 1460 CONFIGFS_ATTR(scsiback_tpg_param_, alias); 1461 1462 static struct configfs_attribute *scsiback_param_attrs[] = { 1463 &scsiback_tpg_param_attr_alias, 1464 NULL, 1465 }; 1466 1467 static int scsiback_alloc_sess_cb(struct se_portal_group *se_tpg, 1468 struct se_session *se_sess, void *p) 1469 { 1470 struct scsiback_tpg *tpg = container_of(se_tpg, 1471 struct scsiback_tpg, se_tpg); 1472 1473 tpg->tpg_nexus = p; 1474 return 0; 1475 } 1476 1477 static int scsiback_make_nexus(struct scsiback_tpg *tpg, 1478 const char *name) 1479 { 1480 struct scsiback_nexus *tv_nexus; 1481 int ret = 0; 1482 1483 mutex_lock(&tpg->tv_tpg_mutex); 1484 if (tpg->tpg_nexus) { 1485 pr_debug("tpg->tpg_nexus already exists\n"); 1486 ret = -EEXIST; 1487 goto out_unlock; 1488 } 1489 1490 tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL); 1491 if (!tv_nexus) { 1492 ret = -ENOMEM; 1493 goto out_unlock; 1494 } 1495 1496 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 1497 VSCSI_DEFAULT_SESSION_TAGS, 1498 sizeof(struct vscsibk_pend), 1499 TARGET_PROT_NORMAL, name, 1500 tv_nexus, scsiback_alloc_sess_cb); 1501 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1502 kfree(tv_nexus); 1503 ret = -ENOMEM; 1504 goto out_unlock; 1505 } 1506 1507 out_unlock: 1508 mutex_unlock(&tpg->tv_tpg_mutex); 1509 return ret; 1510 } 1511 1512 static int scsiback_drop_nexus(struct scsiback_tpg *tpg) 1513 { 1514 struct se_session *se_sess; 1515 struct scsiback_nexus *tv_nexus; 1516 1517 mutex_lock(&tpg->tv_tpg_mutex); 1518 tv_nexus = tpg->tpg_nexus; 1519 if (!tv_nexus) { 1520 mutex_unlock(&tpg->tv_tpg_mutex); 1521 return -ENODEV; 1522 } 1523 1524 se_sess = tv_nexus->tvn_se_sess; 1525 if (!se_sess) { 1526 mutex_unlock(&tpg->tv_tpg_mutex); 1527 return -ENODEV; 1528 } 1529 1530 if (tpg->tv_tpg_port_count != 0) { 1531 mutex_unlock(&tpg->tv_tpg_mutex); 1532 pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG port count: %d\n", 1533 tpg->tv_tpg_port_count); 1534 return -EBUSY; 1535 } 1536 1537 if (tpg->tv_tpg_fe_count != 0) { 1538 mutex_unlock(&tpg->tv_tpg_mutex); 1539 pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG frontend count: %d\n", 1540 tpg->tv_tpg_fe_count); 1541 return -EBUSY; 1542 } 1543 1544 pr_debug("Removing I_T Nexus to emulated %s Initiator Port: %s\n", 1545 scsiback_dump_proto_id(tpg->tport), 1546 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1547 1548 /* 1549 * Release the SCSI I_T Nexus to the emulated xen-pvscsi Target Port 1550 */ 1551 target_remove_session(se_sess); 1552 tpg->tpg_nexus = NULL; 1553 mutex_unlock(&tpg->tv_tpg_mutex); 1554 1555 kfree(tv_nexus); 1556 return 0; 1557 } 1558 1559 static ssize_t scsiback_tpg_nexus_show(struct config_item *item, char *page) 1560 { 1561 struct se_portal_group *se_tpg = to_tpg(item); 1562 struct scsiback_tpg *tpg = container_of(se_tpg, 1563 struct scsiback_tpg, se_tpg); 1564 struct scsiback_nexus *tv_nexus; 1565 ssize_t ret; 1566 1567 mutex_lock(&tpg->tv_tpg_mutex); 1568 tv_nexus = tpg->tpg_nexus; 1569 if (!tv_nexus) { 1570 mutex_unlock(&tpg->tv_tpg_mutex); 1571 return -ENODEV; 1572 } 1573 ret = snprintf(page, PAGE_SIZE, "%s\n", 1574 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1575 mutex_unlock(&tpg->tv_tpg_mutex); 1576 1577 return ret; 1578 } 1579 1580 static ssize_t scsiback_tpg_nexus_store(struct config_item *item, 1581 const char *page, size_t count) 1582 { 1583 struct se_portal_group *se_tpg = to_tpg(item); 1584 struct scsiback_tpg *tpg = container_of(se_tpg, 1585 struct scsiback_tpg, se_tpg); 1586 struct scsiback_tport *tport_wwn = tpg->tport; 1587 unsigned char i_port[VSCSI_NAMELEN], *ptr, *port_ptr; 1588 int ret; 1589 /* 1590 * Shutdown the active I_T nexus if 'NULL' is passed. 1591 */ 1592 if (!strncmp(page, "NULL", 4)) { 1593 ret = scsiback_drop_nexus(tpg); 1594 return (!ret) ? count : ret; 1595 } 1596 /* 1597 * Otherwise make sure the passed virtual Initiator port WWN matches 1598 * the fabric protocol_id set in scsiback_make_tport(), and call 1599 * scsiback_make_nexus(). 1600 */ 1601 if (strlen(page) >= VSCSI_NAMELEN) { 1602 pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n", 1603 page, VSCSI_NAMELEN); 1604 return -EINVAL; 1605 } 1606 snprintf(&i_port[0], VSCSI_NAMELEN, "%s", page); 1607 1608 ptr = strstr(i_port, "naa."); 1609 if (ptr) { 1610 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { 1611 pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n", 1612 i_port, scsiback_dump_proto_id(tport_wwn)); 1613 return -EINVAL; 1614 } 1615 port_ptr = &i_port[0]; 1616 goto check_newline; 1617 } 1618 ptr = strstr(i_port, "fc."); 1619 if (ptr) { 1620 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { 1621 pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n", 1622 i_port, scsiback_dump_proto_id(tport_wwn)); 1623 return -EINVAL; 1624 } 1625 port_ptr = &i_port[3]; /* Skip over "fc." */ 1626 goto check_newline; 1627 } 1628 ptr = strstr(i_port, "iqn."); 1629 if (ptr) { 1630 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { 1631 pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n", 1632 i_port, scsiback_dump_proto_id(tport_wwn)); 1633 return -EINVAL; 1634 } 1635 port_ptr = &i_port[0]; 1636 goto check_newline; 1637 } 1638 pr_err("Unable to locate prefix for emulated Initiator Port: %s\n", 1639 i_port); 1640 return -EINVAL; 1641 /* 1642 * Clear any trailing newline for the NAA WWN 1643 */ 1644 check_newline: 1645 if (i_port[strlen(i_port) - 1] == '\n') 1646 i_port[strlen(i_port) - 1] = '\0'; 1647 1648 ret = scsiback_make_nexus(tpg, port_ptr); 1649 if (ret < 0) 1650 return ret; 1651 1652 return count; 1653 } 1654 1655 CONFIGFS_ATTR(scsiback_tpg_, nexus); 1656 1657 static struct configfs_attribute *scsiback_tpg_attrs[] = { 1658 &scsiback_tpg_attr_nexus, 1659 NULL, 1660 }; 1661 1662 static ssize_t 1663 scsiback_wwn_version_show(struct config_item *item, char *page) 1664 { 1665 return sprintf(page, "xen-pvscsi fabric module %s on %s/%s on " 1666 UTS_RELEASE"\n", 1667 VSCSI_VERSION, utsname()->sysname, utsname()->machine); 1668 } 1669 1670 CONFIGFS_ATTR_RO(scsiback_wwn_, version); 1671 1672 static struct configfs_attribute *scsiback_wwn_attrs[] = { 1673 &scsiback_wwn_attr_version, 1674 NULL, 1675 }; 1676 1677 static int scsiback_port_link(struct se_portal_group *se_tpg, 1678 struct se_lun *lun) 1679 { 1680 struct scsiback_tpg *tpg = container_of(se_tpg, 1681 struct scsiback_tpg, se_tpg); 1682 1683 mutex_lock(&tpg->tv_tpg_mutex); 1684 tpg->tv_tpg_port_count++; 1685 mutex_unlock(&tpg->tv_tpg_mutex); 1686 1687 return 0; 1688 } 1689 1690 static void scsiback_port_unlink(struct se_portal_group *se_tpg, 1691 struct se_lun *lun) 1692 { 1693 struct scsiback_tpg *tpg = container_of(se_tpg, 1694 struct scsiback_tpg, se_tpg); 1695 1696 mutex_lock(&tpg->tv_tpg_mutex); 1697 tpg->tv_tpg_port_count--; 1698 mutex_unlock(&tpg->tv_tpg_mutex); 1699 } 1700 1701 static struct se_portal_group * 1702 scsiback_make_tpg(struct se_wwn *wwn, const char *name) 1703 { 1704 struct scsiback_tport *tport = container_of(wwn, 1705 struct scsiback_tport, tport_wwn); 1706 1707 struct scsiback_tpg *tpg; 1708 u16 tpgt; 1709 int ret; 1710 1711 if (strstr(name, "tpgt_") != name) 1712 return ERR_PTR(-EINVAL); 1713 ret = kstrtou16(name + 5, 10, &tpgt); 1714 if (ret) 1715 return ERR_PTR(ret); 1716 1717 tpg = kzalloc(sizeof(struct scsiback_tpg), GFP_KERNEL); 1718 if (!tpg) 1719 return ERR_PTR(-ENOMEM); 1720 1721 mutex_init(&tpg->tv_tpg_mutex); 1722 INIT_LIST_HEAD(&tpg->tv_tpg_list); 1723 INIT_LIST_HEAD(&tpg->info_list); 1724 tpg->tport = tport; 1725 tpg->tport_tpgt = tpgt; 1726 1727 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id); 1728 if (ret < 0) { 1729 kfree(tpg); 1730 return NULL; 1731 } 1732 mutex_lock(&scsiback_mutex); 1733 list_add_tail(&tpg->tv_tpg_list, &scsiback_list); 1734 mutex_unlock(&scsiback_mutex); 1735 1736 return &tpg->se_tpg; 1737 } 1738 1739 static void scsiback_drop_tpg(struct se_portal_group *se_tpg) 1740 { 1741 struct scsiback_tpg *tpg = container_of(se_tpg, 1742 struct scsiback_tpg, se_tpg); 1743 1744 mutex_lock(&scsiback_mutex); 1745 list_del(&tpg->tv_tpg_list); 1746 mutex_unlock(&scsiback_mutex); 1747 /* 1748 * Release the virtual I_T Nexus for this xen-pvscsi TPG 1749 */ 1750 scsiback_drop_nexus(tpg); 1751 /* 1752 * Deregister the se_tpg from TCM. 1753 */ 1754 core_tpg_deregister(se_tpg); 1755 kfree(tpg); 1756 } 1757 1758 static int scsiback_check_true(struct se_portal_group *se_tpg) 1759 { 1760 return 1; 1761 } 1762 1763 static int scsiback_check_false(struct se_portal_group *se_tpg) 1764 { 1765 return 0; 1766 } 1767 1768 static const struct target_core_fabric_ops scsiback_ops = { 1769 .module = THIS_MODULE, 1770 .fabric_name = "xen-pvscsi", 1771 .tpg_get_wwn = scsiback_get_fabric_wwn, 1772 .tpg_get_tag = scsiback_get_tag, 1773 .tpg_check_demo_mode = scsiback_check_true, 1774 .tpg_check_demo_mode_cache = scsiback_check_true, 1775 .tpg_check_demo_mode_write_protect = scsiback_check_false, 1776 .tpg_check_prod_mode_write_protect = scsiback_check_false, 1777 .tpg_get_inst_index = scsiback_tpg_get_inst_index, 1778 .check_stop_free = scsiback_check_stop_free, 1779 .release_cmd = scsiback_release_cmd, 1780 .sess_get_index = scsiback_sess_get_index, 1781 .sess_get_initiator_sid = NULL, 1782 .write_pending = scsiback_write_pending, 1783 .set_default_node_attributes = scsiback_set_default_node_attrs, 1784 .get_cmd_state = scsiback_get_cmd_state, 1785 .queue_data_in = scsiback_queue_data_in, 1786 .queue_status = scsiback_queue_status, 1787 .queue_tm_rsp = scsiback_queue_tm_rsp, 1788 .aborted_task = scsiback_aborted_task, 1789 /* 1790 * Setup callers for generic logic in target_core_fabric_configfs.c 1791 */ 1792 .fabric_make_wwn = scsiback_make_tport, 1793 .fabric_drop_wwn = scsiback_drop_tport, 1794 .fabric_make_tpg = scsiback_make_tpg, 1795 .fabric_drop_tpg = scsiback_drop_tpg, 1796 .fabric_post_link = scsiback_port_link, 1797 .fabric_pre_unlink = scsiback_port_unlink, 1798 1799 .tfc_wwn_attrs = scsiback_wwn_attrs, 1800 .tfc_tpg_base_attrs = scsiback_tpg_attrs, 1801 .tfc_tpg_param_attrs = scsiback_param_attrs, 1802 }; 1803 1804 static const struct xenbus_device_id scsiback_ids[] = { 1805 { "vscsi" }, 1806 { "" } 1807 }; 1808 1809 static struct xenbus_driver scsiback_driver = { 1810 .ids = scsiback_ids, 1811 .probe = scsiback_probe, 1812 .remove = scsiback_remove, 1813 .otherend_changed = scsiback_frontend_changed 1814 }; 1815 1816 static int __init scsiback_init(void) 1817 { 1818 int ret; 1819 1820 if (!xen_domain()) 1821 return -ENODEV; 1822 1823 pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n", 1824 VSCSI_VERSION, utsname()->sysname, utsname()->machine); 1825 1826 ret = xenbus_register_backend(&scsiback_driver); 1827 if (ret) 1828 goto out; 1829 1830 ret = target_register_template(&scsiback_ops); 1831 if (ret) 1832 goto out_unregister_xenbus; 1833 1834 return 0; 1835 1836 out_unregister_xenbus: 1837 xenbus_unregister_driver(&scsiback_driver); 1838 out: 1839 pr_err("%s: error %d\n", __func__, ret); 1840 return ret; 1841 } 1842 1843 static void __exit scsiback_exit(void) 1844 { 1845 target_unregister_template(&scsiback_ops); 1846 xenbus_unregister_driver(&scsiback_driver); 1847 } 1848 1849 module_init(scsiback_init); 1850 module_exit(scsiback_exit); 1851 1852 MODULE_DESCRIPTION("Xen SCSI backend driver"); 1853 MODULE_LICENSE("Dual BSD/GPL"); 1854 MODULE_ALIAS("xen-backend:vscsi"); 1855 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>"); 1856