1 /* 2 * Virtio SCSI HBA driver 3 * 4 * Copyright IBM Corp. 2010 5 * Copyright Red Hat, Inc. 2011 6 * 7 * Authors: 8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 9 * Paolo Bonzini <pbonzini@redhat.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 * 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/mempool.h> 21 #include <linux/virtio.h> 22 #include <linux/virtio_ids.h> 23 #include <linux/virtio_config.h> 24 #include <linux/virtio_scsi.h> 25 #include <linux/cpu.h> 26 #include <scsi/scsi_host.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_cmnd.h> 29 30 #define VIRTIO_SCSI_MEMPOOL_SZ 64 31 #define VIRTIO_SCSI_EVENT_LEN 8 32 #define VIRTIO_SCSI_VQ_BASE 2 33 34 /* Command queue element */ 35 struct virtio_scsi_cmd { 36 struct scsi_cmnd *sc; 37 struct completion *comp; 38 union { 39 struct virtio_scsi_cmd_req cmd; 40 struct virtio_scsi_ctrl_tmf_req tmf; 41 struct virtio_scsi_ctrl_an_req an; 42 } req; 43 union { 44 struct virtio_scsi_cmd_resp cmd; 45 struct virtio_scsi_ctrl_tmf_resp tmf; 46 struct virtio_scsi_ctrl_an_resp an; 47 struct virtio_scsi_event evt; 48 } resp; 49 } ____cacheline_aligned_in_smp; 50 51 struct virtio_scsi_event_node { 52 struct virtio_scsi *vscsi; 53 struct virtio_scsi_event event; 54 struct work_struct work; 55 }; 56 57 struct virtio_scsi_vq { 58 /* Protects vq */ 59 spinlock_t vq_lock; 60 61 struct virtqueue *vq; 62 }; 63 64 /* 65 * Per-target queue state. 66 * 67 * This struct holds the data needed by the queue steering policy. When a 68 * target is sent multiple requests, we need to drive them to the same queue so 69 * that FIFO processing order is kept. However, if a target was idle, we can 70 * choose a queue arbitrarily. In this case the queue is chosen according to 71 * the current VCPU, so the driver expects the number of request queues to be 72 * equal to the number of VCPUs. This makes it easy and fast to select the 73 * queue, and also lets the driver optimize the IRQ affinity for the virtqueues 74 * (each virtqueue's affinity is set to the CPU that "owns" the queue). 75 * 76 * An interesting effect of this policy is that only writes to req_vq need to 77 * take the tgt_lock. Read can be done outside the lock because: 78 * 79 * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1. 80 * In that case, no other CPU is reading req_vq: even if they were in 81 * virtscsi_queuecommand_multi, they would be spinning on tgt_lock. 82 * 83 * - reads of req_vq only occur when the target is not idle (reqs != 0). 84 * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq. 85 * 86 * Similarly, decrements of reqs are never concurrent with writes of req_vq. 87 * Thus they can happen outside the tgt_lock, provided of course we make reqs 88 * an atomic_t. 89 */ 90 struct virtio_scsi_target_state { 91 /* This spinlock never held at the same time as vq_lock. */ 92 spinlock_t tgt_lock; 93 94 /* Count of outstanding requests. */ 95 atomic_t reqs; 96 97 /* Currently active virtqueue for requests sent to this target. */ 98 struct virtio_scsi_vq *req_vq; 99 }; 100 101 /* Driver instance state */ 102 struct virtio_scsi { 103 struct virtio_device *vdev; 104 105 /* Get some buffers ready for event vq */ 106 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; 107 108 u32 num_queues; 109 110 /* If the affinity hint is set for virtqueues */ 111 bool affinity_hint_set; 112 113 /* CPU hotplug notifier */ 114 struct notifier_block nb; 115 116 struct virtio_scsi_vq ctrl_vq; 117 struct virtio_scsi_vq event_vq; 118 struct virtio_scsi_vq req_vqs[]; 119 }; 120 121 static struct kmem_cache *virtscsi_cmd_cache; 122 static mempool_t *virtscsi_cmd_pool; 123 124 static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) 125 { 126 return vdev->priv; 127 } 128 129 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) 130 { 131 if (!resid) 132 return; 133 134 if (!scsi_bidi_cmnd(sc)) { 135 scsi_set_resid(sc, resid); 136 return; 137 } 138 139 scsi_in(sc)->resid = min(resid, scsi_in(sc)->length); 140 scsi_out(sc)->resid = resid - scsi_in(sc)->resid; 141 } 142 143 /** 144 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done 145 * 146 * Called with vq_lock held. 147 */ 148 static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) 149 { 150 struct virtio_scsi_cmd *cmd = buf; 151 struct scsi_cmnd *sc = cmd->sc; 152 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; 153 struct virtio_scsi_target_state *tgt = 154 scsi_target(sc->device)->hostdata; 155 156 dev_dbg(&sc->device->sdev_gendev, 157 "cmd %p response %u status %#02x sense_len %u\n", 158 sc, resp->response, resp->status, resp->sense_len); 159 160 sc->result = resp->status; 161 virtscsi_compute_resid(sc, resp->resid); 162 switch (resp->response) { 163 case VIRTIO_SCSI_S_OK: 164 set_host_byte(sc, DID_OK); 165 break; 166 case VIRTIO_SCSI_S_OVERRUN: 167 set_host_byte(sc, DID_ERROR); 168 break; 169 case VIRTIO_SCSI_S_ABORTED: 170 set_host_byte(sc, DID_ABORT); 171 break; 172 case VIRTIO_SCSI_S_BAD_TARGET: 173 set_host_byte(sc, DID_BAD_TARGET); 174 break; 175 case VIRTIO_SCSI_S_RESET: 176 set_host_byte(sc, DID_RESET); 177 break; 178 case VIRTIO_SCSI_S_BUSY: 179 set_host_byte(sc, DID_BUS_BUSY); 180 break; 181 case VIRTIO_SCSI_S_TRANSPORT_FAILURE: 182 set_host_byte(sc, DID_TRANSPORT_DISRUPTED); 183 break; 184 case VIRTIO_SCSI_S_TARGET_FAILURE: 185 set_host_byte(sc, DID_TARGET_FAILURE); 186 break; 187 case VIRTIO_SCSI_S_NEXUS_FAILURE: 188 set_host_byte(sc, DID_NEXUS_FAILURE); 189 break; 190 default: 191 scmd_printk(KERN_WARNING, sc, "Unknown response %d", 192 resp->response); 193 /* fall through */ 194 case VIRTIO_SCSI_S_FAILURE: 195 set_host_byte(sc, DID_ERROR); 196 break; 197 } 198 199 WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE); 200 if (sc->sense_buffer) { 201 memcpy(sc->sense_buffer, resp->sense, 202 min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE)); 203 if (resp->sense_len) 204 set_driver_byte(sc, DRIVER_SENSE); 205 } 206 207 mempool_free(cmd, virtscsi_cmd_pool); 208 sc->scsi_done(sc); 209 210 atomic_dec(&tgt->reqs); 211 } 212 213 static void virtscsi_vq_done(struct virtio_scsi *vscsi, 214 struct virtio_scsi_vq *virtscsi_vq, 215 void (*fn)(struct virtio_scsi *vscsi, void *buf)) 216 { 217 void *buf; 218 unsigned int len; 219 unsigned long flags; 220 struct virtqueue *vq = virtscsi_vq->vq; 221 222 spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); 223 do { 224 virtqueue_disable_cb(vq); 225 while ((buf = virtqueue_get_buf(vq, &len)) != NULL) 226 fn(vscsi, buf); 227 } while (!virtqueue_enable_cb(vq)); 228 spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); 229 } 230 231 static void virtscsi_req_done(struct virtqueue *vq) 232 { 233 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); 234 struct virtio_scsi *vscsi = shost_priv(sh); 235 int index = vq->index - VIRTIO_SCSI_VQ_BASE; 236 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; 237 238 /* 239 * Read req_vq before decrementing the reqs field in 240 * virtscsi_complete_cmd. 241 * 242 * With barriers: 243 * 244 * CPU #0 virtscsi_queuecommand_multi (CPU #1) 245 * ------------------------------------------------------------ 246 * lock vq_lock 247 * read req_vq 248 * read reqs (reqs = 1) 249 * write reqs (reqs = 0) 250 * increment reqs (reqs = 1) 251 * write req_vq 252 * 253 * Possible reordering without barriers: 254 * 255 * CPU #0 virtscsi_queuecommand_multi (CPU #1) 256 * ------------------------------------------------------------ 257 * lock vq_lock 258 * read reqs (reqs = 1) 259 * write reqs (reqs = 0) 260 * increment reqs (reqs = 1) 261 * write req_vq 262 * read (wrong) req_vq 263 * 264 * We do not need a full smp_rmb, because req_vq is required to get 265 * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored 266 * in the virtqueue as the user token. 267 */ 268 smp_read_barrier_depends(); 269 270 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); 271 }; 272 273 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) 274 { 275 struct virtio_scsi_cmd *cmd = buf; 276 277 if (cmd->comp) 278 complete_all(cmd->comp); 279 else 280 mempool_free(cmd, virtscsi_cmd_pool); 281 } 282 283 static void virtscsi_ctrl_done(struct virtqueue *vq) 284 { 285 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); 286 struct virtio_scsi *vscsi = shost_priv(sh); 287 288 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); 289 }; 290 291 static int virtscsi_kick_event(struct virtio_scsi *vscsi, 292 struct virtio_scsi_event_node *event_node) 293 { 294 int err; 295 struct scatterlist sg; 296 unsigned long flags; 297 298 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); 299 300 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); 301 302 err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node, 303 GFP_ATOMIC); 304 if (!err) 305 virtqueue_kick(vscsi->event_vq.vq); 306 307 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); 308 309 return err; 310 } 311 312 static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) 313 { 314 int i; 315 316 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { 317 vscsi->event_list[i].vscsi = vscsi; 318 virtscsi_kick_event(vscsi, &vscsi->event_list[i]); 319 } 320 321 return 0; 322 } 323 324 static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) 325 { 326 int i; 327 328 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) 329 cancel_work_sync(&vscsi->event_list[i].work); 330 } 331 332 static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, 333 struct virtio_scsi_event *event) 334 { 335 struct scsi_device *sdev; 336 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 337 unsigned int target = event->lun[1]; 338 unsigned int lun = (event->lun[2] << 8) | event->lun[3]; 339 340 switch (event->reason) { 341 case VIRTIO_SCSI_EVT_RESET_RESCAN: 342 scsi_add_device(shost, 0, target, lun); 343 break; 344 case VIRTIO_SCSI_EVT_RESET_REMOVED: 345 sdev = scsi_device_lookup(shost, 0, target, lun); 346 if (sdev) { 347 scsi_remove_device(sdev); 348 scsi_device_put(sdev); 349 } else { 350 pr_err("SCSI device %d 0 %d %d not found\n", 351 shost->host_no, target, lun); 352 } 353 break; 354 default: 355 pr_info("Unsupport virtio scsi event reason %x\n", event->reason); 356 } 357 } 358 359 static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, 360 struct virtio_scsi_event *event) 361 { 362 struct scsi_device *sdev; 363 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 364 unsigned int target = event->lun[1]; 365 unsigned int lun = (event->lun[2] << 8) | event->lun[3]; 366 u8 asc = event->reason & 255; 367 u8 ascq = event->reason >> 8; 368 369 sdev = scsi_device_lookup(shost, 0, target, lun); 370 if (!sdev) { 371 pr_err("SCSI device %d 0 %d %d not found\n", 372 shost->host_no, target, lun); 373 return; 374 } 375 376 /* Handle "Parameters changed", "Mode parameters changed", and 377 "Capacity data has changed". */ 378 if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) 379 scsi_rescan_device(&sdev->sdev_gendev); 380 381 scsi_device_put(sdev); 382 } 383 384 static void virtscsi_handle_event(struct work_struct *work) 385 { 386 struct virtio_scsi_event_node *event_node = 387 container_of(work, struct virtio_scsi_event_node, work); 388 struct virtio_scsi *vscsi = event_node->vscsi; 389 struct virtio_scsi_event *event = &event_node->event; 390 391 if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) { 392 event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED; 393 scsi_scan_host(virtio_scsi_host(vscsi->vdev)); 394 } 395 396 switch (event->event) { 397 case VIRTIO_SCSI_T_NO_EVENT: 398 break; 399 case VIRTIO_SCSI_T_TRANSPORT_RESET: 400 virtscsi_handle_transport_reset(vscsi, event); 401 break; 402 case VIRTIO_SCSI_T_PARAM_CHANGE: 403 virtscsi_handle_param_change(vscsi, event); 404 break; 405 default: 406 pr_err("Unsupport virtio scsi event %x\n", event->event); 407 } 408 virtscsi_kick_event(vscsi, event_node); 409 } 410 411 static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) 412 { 413 struct virtio_scsi_event_node *event_node = buf; 414 415 INIT_WORK(&event_node->work, virtscsi_handle_event); 416 schedule_work(&event_node->work); 417 } 418 419 static void virtscsi_event_done(struct virtqueue *vq) 420 { 421 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); 422 struct virtio_scsi *vscsi = shost_priv(sh); 423 424 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); 425 }; 426 427 /** 428 * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue 429 * @vq : the struct virtqueue we're talking about 430 * @cmd : command structure 431 * @req_size : size of the request buffer 432 * @resp_size : size of the response buffer 433 * @gfp : flags to use for memory allocations 434 */ 435 static int virtscsi_add_cmd(struct virtqueue *vq, 436 struct virtio_scsi_cmd *cmd, 437 size_t req_size, size_t resp_size, gfp_t gfp) 438 { 439 struct scsi_cmnd *sc = cmd->sc; 440 struct scatterlist *sgs[4], req, resp; 441 struct sg_table *out, *in; 442 unsigned out_num = 0, in_num = 0; 443 444 out = in = NULL; 445 446 if (sc && sc->sc_data_direction != DMA_NONE) { 447 if (sc->sc_data_direction != DMA_FROM_DEVICE) 448 out = &scsi_out(sc)->table; 449 if (sc->sc_data_direction != DMA_TO_DEVICE) 450 in = &scsi_in(sc)->table; 451 } 452 453 /* Request header. */ 454 sg_init_one(&req, &cmd->req, req_size); 455 sgs[out_num++] = &req; 456 457 /* Data-out buffer. */ 458 if (out) 459 sgs[out_num++] = out->sgl; 460 461 /* Response header. */ 462 sg_init_one(&resp, &cmd->resp, resp_size); 463 sgs[out_num + in_num++] = &resp; 464 465 /* Data-in buffer */ 466 if (in) 467 sgs[out_num + in_num++] = in->sgl; 468 469 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp); 470 } 471 472 static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, 473 struct virtio_scsi_cmd *cmd, 474 size_t req_size, size_t resp_size, gfp_t gfp) 475 { 476 unsigned long flags; 477 int err; 478 bool needs_kick = false; 479 480 spin_lock_irqsave(&vq->vq_lock, flags); 481 err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp); 482 if (!err) 483 needs_kick = virtqueue_kick_prepare(vq->vq); 484 485 spin_unlock_irqrestore(&vq->vq_lock, flags); 486 487 if (needs_kick) 488 virtqueue_notify(vq->vq); 489 return err; 490 } 491 492 static int virtscsi_queuecommand(struct virtio_scsi *vscsi, 493 struct virtio_scsi_vq *req_vq, 494 struct scsi_cmnd *sc) 495 { 496 struct virtio_scsi_cmd *cmd; 497 int ret; 498 499 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 500 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); 501 502 /* TODO: check feature bit and fail if unsupported? */ 503 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); 504 505 dev_dbg(&sc->device->sdev_gendev, 506 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); 507 508 ret = SCSI_MLQUEUE_HOST_BUSY; 509 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC); 510 if (!cmd) 511 goto out; 512 513 memset(cmd, 0, sizeof(*cmd)); 514 cmd->sc = sc; 515 cmd->req.cmd = (struct virtio_scsi_cmd_req){ 516 .lun[0] = 1, 517 .lun[1] = sc->device->id, 518 .lun[2] = (sc->device->lun >> 8) | 0x40, 519 .lun[3] = sc->device->lun & 0xff, 520 .tag = (unsigned long)sc, 521 .task_attr = VIRTIO_SCSI_S_SIMPLE, 522 .prio = 0, 523 .crn = 0, 524 }; 525 526 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); 527 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); 528 529 if (virtscsi_kick_cmd(req_vq, cmd, 530 sizeof cmd->req.cmd, sizeof cmd->resp.cmd, 531 GFP_ATOMIC) == 0) 532 ret = 0; 533 else 534 mempool_free(cmd, virtscsi_cmd_pool); 535 536 out: 537 return ret; 538 } 539 540 static int virtscsi_queuecommand_single(struct Scsi_Host *sh, 541 struct scsi_cmnd *sc) 542 { 543 struct virtio_scsi *vscsi = shost_priv(sh); 544 struct virtio_scsi_target_state *tgt = 545 scsi_target(sc->device)->hostdata; 546 547 atomic_inc(&tgt->reqs); 548 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); 549 } 550 551 static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, 552 struct virtio_scsi_target_state *tgt) 553 { 554 struct virtio_scsi_vq *vq; 555 unsigned long flags; 556 u32 queue_num; 557 558 spin_lock_irqsave(&tgt->tgt_lock, flags); 559 560 /* 561 * The memory barrier after atomic_inc_return matches 562 * the smp_read_barrier_depends() in virtscsi_req_done. 563 */ 564 if (atomic_inc_return(&tgt->reqs) > 1) 565 vq = ACCESS_ONCE(tgt->req_vq); 566 else { 567 queue_num = smp_processor_id(); 568 while (unlikely(queue_num >= vscsi->num_queues)) 569 queue_num -= vscsi->num_queues; 570 571 tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; 572 } 573 574 spin_unlock_irqrestore(&tgt->tgt_lock, flags); 575 return vq; 576 } 577 578 static int virtscsi_queuecommand_multi(struct Scsi_Host *sh, 579 struct scsi_cmnd *sc) 580 { 581 struct virtio_scsi *vscsi = shost_priv(sh); 582 struct virtio_scsi_target_state *tgt = 583 scsi_target(sc->device)->hostdata; 584 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt); 585 586 return virtscsi_queuecommand(vscsi, req_vq, sc); 587 } 588 589 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) 590 { 591 DECLARE_COMPLETION_ONSTACK(comp); 592 int ret = FAILED; 593 594 cmd->comp = ∁ 595 if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, 596 sizeof cmd->req.tmf, sizeof cmd->resp.tmf, 597 GFP_NOIO) < 0) 598 goto out; 599 600 wait_for_completion(&comp); 601 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || 602 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) 603 ret = SUCCESS; 604 605 out: 606 mempool_free(cmd, virtscsi_cmd_pool); 607 return ret; 608 } 609 610 static int virtscsi_device_reset(struct scsi_cmnd *sc) 611 { 612 struct virtio_scsi *vscsi = shost_priv(sc->device->host); 613 struct virtio_scsi_cmd *cmd; 614 615 sdev_printk(KERN_INFO, sc->device, "device reset\n"); 616 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); 617 if (!cmd) 618 return FAILED; 619 620 memset(cmd, 0, sizeof(*cmd)); 621 cmd->sc = sc; 622 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ 623 .type = VIRTIO_SCSI_T_TMF, 624 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET, 625 .lun[0] = 1, 626 .lun[1] = sc->device->id, 627 .lun[2] = (sc->device->lun >> 8) | 0x40, 628 .lun[3] = sc->device->lun & 0xff, 629 }; 630 return virtscsi_tmf(vscsi, cmd); 631 } 632 633 static int virtscsi_abort(struct scsi_cmnd *sc) 634 { 635 struct virtio_scsi *vscsi = shost_priv(sc->device->host); 636 struct virtio_scsi_cmd *cmd; 637 638 scmd_printk(KERN_INFO, sc, "abort\n"); 639 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); 640 if (!cmd) 641 return FAILED; 642 643 memset(cmd, 0, sizeof(*cmd)); 644 cmd->sc = sc; 645 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ 646 .type = VIRTIO_SCSI_T_TMF, 647 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, 648 .lun[0] = 1, 649 .lun[1] = sc->device->id, 650 .lun[2] = (sc->device->lun >> 8) | 0x40, 651 .lun[3] = sc->device->lun & 0xff, 652 .tag = (unsigned long)sc, 653 }; 654 return virtscsi_tmf(vscsi, cmd); 655 } 656 657 static int virtscsi_target_alloc(struct scsi_target *starget) 658 { 659 struct virtio_scsi_target_state *tgt = 660 kmalloc(sizeof(*tgt), GFP_KERNEL); 661 if (!tgt) 662 return -ENOMEM; 663 664 spin_lock_init(&tgt->tgt_lock); 665 atomic_set(&tgt->reqs, 0); 666 tgt->req_vq = NULL; 667 668 starget->hostdata = tgt; 669 return 0; 670 } 671 672 static void virtscsi_target_destroy(struct scsi_target *starget) 673 { 674 struct virtio_scsi_target_state *tgt = starget->hostdata; 675 kfree(tgt); 676 } 677 678 static struct scsi_host_template virtscsi_host_template_single = { 679 .module = THIS_MODULE, 680 .name = "Virtio SCSI HBA", 681 .proc_name = "virtio_scsi", 682 .this_id = -1, 683 .queuecommand = virtscsi_queuecommand_single, 684 .eh_abort_handler = virtscsi_abort, 685 .eh_device_reset_handler = virtscsi_device_reset, 686 687 .can_queue = 1024, 688 .dma_boundary = UINT_MAX, 689 .use_clustering = ENABLE_CLUSTERING, 690 .target_alloc = virtscsi_target_alloc, 691 .target_destroy = virtscsi_target_destroy, 692 }; 693 694 static struct scsi_host_template virtscsi_host_template_multi = { 695 .module = THIS_MODULE, 696 .name = "Virtio SCSI HBA", 697 .proc_name = "virtio_scsi", 698 .this_id = -1, 699 .queuecommand = virtscsi_queuecommand_multi, 700 .eh_abort_handler = virtscsi_abort, 701 .eh_device_reset_handler = virtscsi_device_reset, 702 703 .can_queue = 1024, 704 .dma_boundary = UINT_MAX, 705 .use_clustering = ENABLE_CLUSTERING, 706 .target_alloc = virtscsi_target_alloc, 707 .target_destroy = virtscsi_target_destroy, 708 }; 709 710 #define virtscsi_config_get(vdev, fld) \ 711 ({ \ 712 typeof(((struct virtio_scsi_config *)0)->fld) __val; \ 713 vdev->config->get(vdev, \ 714 offsetof(struct virtio_scsi_config, fld), \ 715 &__val, sizeof(__val)); \ 716 __val; \ 717 }) 718 719 #define virtscsi_config_set(vdev, fld, val) \ 720 (void)({ \ 721 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ 722 vdev->config->set(vdev, \ 723 offsetof(struct virtio_scsi_config, fld), \ 724 &__val, sizeof(__val)); \ 725 }) 726 727 static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) 728 { 729 int i; 730 int cpu; 731 732 /* In multiqueue mode, when the number of cpu is equal 733 * to the number of request queues, we let the qeueues 734 * to be private to one cpu by setting the affinity hint 735 * to eliminate the contention. 736 */ 737 if ((vscsi->num_queues == 1 || 738 vscsi->num_queues != num_online_cpus()) && affinity) { 739 if (vscsi->affinity_hint_set) 740 affinity = false; 741 else 742 return; 743 } 744 745 if (affinity) { 746 i = 0; 747 for_each_online_cpu(cpu) { 748 virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu); 749 i++; 750 } 751 752 vscsi->affinity_hint_set = true; 753 } else { 754 for (i = 0; i < vscsi->num_queues; i++) 755 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); 756 757 vscsi->affinity_hint_set = false; 758 } 759 } 760 761 static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) 762 { 763 get_online_cpus(); 764 __virtscsi_set_affinity(vscsi, affinity); 765 put_online_cpus(); 766 } 767 768 static int virtscsi_cpu_callback(struct notifier_block *nfb, 769 unsigned long action, void *hcpu) 770 { 771 struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb); 772 switch(action) { 773 case CPU_ONLINE: 774 case CPU_ONLINE_FROZEN: 775 case CPU_DEAD: 776 case CPU_DEAD_FROZEN: 777 __virtscsi_set_affinity(vscsi, true); 778 break; 779 default: 780 break; 781 } 782 return NOTIFY_OK; 783 } 784 785 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, 786 struct virtqueue *vq) 787 { 788 spin_lock_init(&virtscsi_vq->vq_lock); 789 virtscsi_vq->vq = vq; 790 } 791 792 static void virtscsi_scan(struct virtio_device *vdev) 793 { 794 struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv; 795 796 scsi_scan_host(shost); 797 } 798 799 static void virtscsi_remove_vqs(struct virtio_device *vdev) 800 { 801 struct Scsi_Host *sh = virtio_scsi_host(vdev); 802 struct virtio_scsi *vscsi = shost_priv(sh); 803 804 virtscsi_set_affinity(vscsi, false); 805 806 /* Stop all the virtqueues. */ 807 vdev->config->reset(vdev); 808 809 vdev->config->del_vqs(vdev); 810 } 811 812 static int virtscsi_init(struct virtio_device *vdev, 813 struct virtio_scsi *vscsi) 814 { 815 int err; 816 u32 i; 817 u32 num_vqs; 818 vq_callback_t **callbacks; 819 const char **names; 820 struct virtqueue **vqs; 821 822 num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; 823 vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL); 824 callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL); 825 names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL); 826 827 if (!callbacks || !vqs || !names) { 828 err = -ENOMEM; 829 goto out; 830 } 831 832 callbacks[0] = virtscsi_ctrl_done; 833 callbacks[1] = virtscsi_event_done; 834 names[0] = "control"; 835 names[1] = "event"; 836 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) { 837 callbacks[i] = virtscsi_req_done; 838 names[i] = "request"; 839 } 840 841 /* Discover virtqueues and write information to configuration. */ 842 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); 843 if (err) 844 goto out; 845 846 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); 847 virtscsi_init_vq(&vscsi->event_vq, vqs[1]); 848 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) 849 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], 850 vqs[i]); 851 852 virtscsi_set_affinity(vscsi, true); 853 854 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); 855 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); 856 857 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) 858 virtscsi_kick_event_all(vscsi); 859 860 err = 0; 861 862 out: 863 kfree(names); 864 kfree(callbacks); 865 kfree(vqs); 866 if (err) 867 virtscsi_remove_vqs(vdev); 868 return err; 869 } 870 871 static int virtscsi_probe(struct virtio_device *vdev) 872 { 873 struct Scsi_Host *shost; 874 struct virtio_scsi *vscsi; 875 int err; 876 u32 sg_elems, num_targets; 877 u32 cmd_per_lun; 878 u32 num_queues; 879 struct scsi_host_template *hostt; 880 881 /* We need to know how many queues before we allocate. */ 882 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; 883 884 num_targets = virtscsi_config_get(vdev, max_target) + 1; 885 886 if (num_queues == 1) 887 hostt = &virtscsi_host_template_single; 888 else 889 hostt = &virtscsi_host_template_multi; 890 891 shost = scsi_host_alloc(hostt, 892 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); 893 if (!shost) 894 return -ENOMEM; 895 896 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; 897 shost->sg_tablesize = sg_elems; 898 vscsi = shost_priv(shost); 899 vscsi->vdev = vdev; 900 vscsi->num_queues = num_queues; 901 vdev->priv = shost; 902 903 err = virtscsi_init(vdev, vscsi); 904 if (err) 905 goto virtscsi_init_failed; 906 907 vscsi->nb.notifier_call = &virtscsi_cpu_callback; 908 err = register_hotcpu_notifier(&vscsi->nb); 909 if (err) { 910 pr_err("registering cpu notifier failed\n"); 911 goto scsi_add_host_failed; 912 } 913 914 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; 915 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); 916 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; 917 918 /* LUNs > 256 are reported with format 1, so they go in the range 919 * 16640-32767. 920 */ 921 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000; 922 shost->max_id = num_targets; 923 shost->max_channel = 0; 924 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; 925 err = scsi_add_host(shost, &vdev->dev); 926 if (err) 927 goto scsi_add_host_failed; 928 /* 929 * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan() 930 * after VIRTIO_CONFIG_S_DRIVER_OK has been set.. 931 */ 932 return 0; 933 934 scsi_add_host_failed: 935 vdev->config->del_vqs(vdev); 936 virtscsi_init_failed: 937 scsi_host_put(shost); 938 return err; 939 } 940 941 static void virtscsi_remove(struct virtio_device *vdev) 942 { 943 struct Scsi_Host *shost = virtio_scsi_host(vdev); 944 struct virtio_scsi *vscsi = shost_priv(shost); 945 946 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) 947 virtscsi_cancel_event_work(vscsi); 948 949 scsi_remove_host(shost); 950 951 unregister_hotcpu_notifier(&vscsi->nb); 952 953 virtscsi_remove_vqs(vdev); 954 scsi_host_put(shost); 955 } 956 957 #ifdef CONFIG_PM 958 static int virtscsi_freeze(struct virtio_device *vdev) 959 { 960 virtscsi_remove_vqs(vdev); 961 return 0; 962 } 963 964 static int virtscsi_restore(struct virtio_device *vdev) 965 { 966 struct Scsi_Host *sh = virtio_scsi_host(vdev); 967 struct virtio_scsi *vscsi = shost_priv(sh); 968 969 return virtscsi_init(vdev, vscsi); 970 } 971 #endif 972 973 static struct virtio_device_id id_table[] = { 974 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, 975 { 0 }, 976 }; 977 978 static unsigned int features[] = { 979 VIRTIO_SCSI_F_HOTPLUG, 980 VIRTIO_SCSI_F_CHANGE, 981 }; 982 983 static struct virtio_driver virtio_scsi_driver = { 984 .feature_table = features, 985 .feature_table_size = ARRAY_SIZE(features), 986 .driver.name = KBUILD_MODNAME, 987 .driver.owner = THIS_MODULE, 988 .id_table = id_table, 989 .probe = virtscsi_probe, 990 .scan = virtscsi_scan, 991 #ifdef CONFIG_PM 992 .freeze = virtscsi_freeze, 993 .restore = virtscsi_restore, 994 #endif 995 .remove = virtscsi_remove, 996 }; 997 998 static int __init init(void) 999 { 1000 int ret = -ENOMEM; 1001 1002 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); 1003 if (!virtscsi_cmd_cache) { 1004 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n"); 1005 goto error; 1006 } 1007 1008 1009 virtscsi_cmd_pool = 1010 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, 1011 virtscsi_cmd_cache); 1012 if (!virtscsi_cmd_pool) { 1013 pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); 1014 goto error; 1015 } 1016 ret = register_virtio_driver(&virtio_scsi_driver); 1017 if (ret < 0) 1018 goto error; 1019 1020 return 0; 1021 1022 error: 1023 if (virtscsi_cmd_pool) { 1024 mempool_destroy(virtscsi_cmd_pool); 1025 virtscsi_cmd_pool = NULL; 1026 } 1027 if (virtscsi_cmd_cache) { 1028 kmem_cache_destroy(virtscsi_cmd_cache); 1029 virtscsi_cmd_cache = NULL; 1030 } 1031 return ret; 1032 } 1033 1034 static void __exit fini(void) 1035 { 1036 unregister_virtio_driver(&virtio_scsi_driver); 1037 mempool_destroy(virtscsi_cmd_pool); 1038 kmem_cache_destroy(virtscsi_cmd_cache); 1039 } 1040 module_init(init); 1041 module_exit(fini); 1042 1043 MODULE_DEVICE_TABLE(virtio, id_table); 1044 MODULE_DESCRIPTION("Virtio SCSI HBA driver"); 1045 MODULE_LICENSE("GPL"); 1046