1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Authors: 18 * Haiyang Zhang <haiyangz@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com> 20 * K. Y. Srinivasan <kys@microsoft.com> 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/wait.h> 25 #include <linux/sched.h> 26 #include <linux/completion.h> 27 #include <linux/string.h> 28 #include <linux/mm.h> 29 #include <linux/delay.h> 30 #include <linux/init.h> 31 #include <linux/slab.h> 32 #include <linux/module.h> 33 #include <linux/device.h> 34 #include <linux/hyperv.h> 35 #include <linux/blkdev.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_cmnd.h> 38 #include <scsi/scsi_host.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_tcq.h> 41 #include <scsi/scsi_eh.h> 42 #include <scsi/scsi_devinfo.h> 43 #include <scsi/scsi_dbg.h> 44 45 /* 46 * All wire protocol details (storage protocol between the guest and the host) 47 * are consolidated here. 48 * 49 * Begin protocol definitions. 50 */ 51 52 /* 53 * Version history: 54 * V1 Beta: 0.1 55 * V1 RC < 2008/1/31: 1.0 56 * V1 RC > 2008/1/31: 2.0 57 * Win7: 4.2 58 * Win8: 5.1 59 * Win8.1: 6.0 60 * Win10: 6.2 61 */ 62 63 #define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \ 64 (((MINOR_) & 0xff))) 65 66 #define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0) 67 #define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2) 68 #define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1) 69 #define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0) 70 #define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2) 71 72 /* Packet structure describing virtual storage requests. */ 73 enum vstor_packet_operation { 74 VSTOR_OPERATION_COMPLETE_IO = 1, 75 VSTOR_OPERATION_REMOVE_DEVICE = 2, 76 VSTOR_OPERATION_EXECUTE_SRB = 3, 77 VSTOR_OPERATION_RESET_LUN = 4, 78 VSTOR_OPERATION_RESET_ADAPTER = 5, 79 VSTOR_OPERATION_RESET_BUS = 6, 80 VSTOR_OPERATION_BEGIN_INITIALIZATION = 7, 81 VSTOR_OPERATION_END_INITIALIZATION = 8, 82 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9, 83 VSTOR_OPERATION_QUERY_PROPERTIES = 10, 84 VSTOR_OPERATION_ENUMERATE_BUS = 11, 85 VSTOR_OPERATION_FCHBA_DATA = 12, 86 VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13, 87 VSTOR_OPERATION_MAXIMUM = 13 88 }; 89 90 /* 91 * WWN packet for Fibre Channel HBA 92 */ 93 94 struct hv_fc_wwn_packet { 95 bool primary_active; 96 u8 reserved1; 97 u8 reserved2; 98 u8 primary_port_wwn[8]; 99 u8 primary_node_wwn[8]; 100 u8 secondary_port_wwn[8]; 101 u8 secondary_node_wwn[8]; 102 }; 103 104 105 106 /* 107 * SRB Flag Bits 108 */ 109 110 #define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002 111 #define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004 112 #define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008 113 #define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010 114 #define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020 115 #define SRB_FLAGS_DATA_IN 0x00000040 116 #define SRB_FLAGS_DATA_OUT 0x00000080 117 #define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000 118 #define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT) 119 #define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100 120 #define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200 121 #define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400 122 123 /* 124 * This flag indicates the request is part of the workflow for processing a D3. 125 */ 126 #define SRB_FLAGS_D3_PROCESSING 0x00000800 127 #define SRB_FLAGS_IS_ACTIVE 0x00010000 128 #define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000 129 #define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000 130 #define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000 131 #define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000 132 #define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000 133 #define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000 134 #define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000 135 #define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 136 #define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 137 138 139 /* 140 * Platform neutral description of a scsi request - 141 * this remains the same across the write regardless of 32/64 bit 142 * note: it's patterned off the SCSI_PASS_THROUGH structure 143 */ 144 #define STORVSC_MAX_CMD_LEN 0x10 145 146 #define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14 147 #define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12 148 149 #define STORVSC_SENSE_BUFFER_SIZE 0x14 150 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14 151 152 /* 153 * Sense buffer size changed in win8; have a run-time 154 * variable to track the size we should use. This value will 155 * likely change during protocol negotiation but it is valid 156 * to start by assuming pre-Win8. 157 */ 158 static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; 159 160 /* 161 * The storage protocol version is determined during the 162 * initial exchange with the host. It will indicate which 163 * storage functionality is available in the host. 164 */ 165 static int vmstor_proto_version; 166 167 struct vmscsi_win8_extension { 168 /* 169 * The following were added in Windows 8 170 */ 171 u16 reserve; 172 u8 queue_tag; 173 u8 queue_action; 174 u32 srb_flags; 175 u32 time_out_value; 176 u32 queue_sort_ey; 177 } __packed; 178 179 struct vmscsi_request { 180 u16 length; 181 u8 srb_status; 182 u8 scsi_status; 183 184 u8 port_number; 185 u8 path_id; 186 u8 target_id; 187 u8 lun; 188 189 u8 cdb_length; 190 u8 sense_info_length; 191 u8 data_in; 192 u8 reserved; 193 194 u32 data_transfer_length; 195 196 union { 197 u8 cdb[STORVSC_MAX_CMD_LEN]; 198 u8 sense_data[STORVSC_SENSE_BUFFER_SIZE]; 199 u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING]; 200 }; 201 /* 202 * The following was added in win8. 203 */ 204 struct vmscsi_win8_extension win8_extension; 205 206 } __attribute((packed)); 207 208 209 /* 210 * The size of the vmscsi_request has changed in win8. The 211 * additional size is because of new elements added to the 212 * structure. These elements are valid only when we are talking 213 * to a win8 host. 214 * Track the correction to size we need to apply. This value 215 * will likely change during protocol negotiation but it is 216 * valid to start by assuming pre-Win8. 217 */ 218 static int vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); 219 220 /* 221 * The list of storage protocols in order of preference. 222 */ 223 struct vmstor_protocol { 224 int protocol_version; 225 int sense_buffer_size; 226 int vmscsi_size_delta; 227 }; 228 229 230 static const struct vmstor_protocol vmstor_protocols[] = { 231 { 232 VMSTOR_PROTO_VERSION_WIN10, 233 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, 234 0 235 }, 236 { 237 VMSTOR_PROTO_VERSION_WIN8_1, 238 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, 239 0 240 }, 241 { 242 VMSTOR_PROTO_VERSION_WIN8, 243 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, 244 0 245 }, 246 { 247 VMSTOR_PROTO_VERSION_WIN7, 248 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE, 249 sizeof(struct vmscsi_win8_extension), 250 }, 251 { 252 VMSTOR_PROTO_VERSION_WIN6, 253 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE, 254 sizeof(struct vmscsi_win8_extension), 255 } 256 }; 257 258 259 /* 260 * This structure is sent during the intialization phase to get the different 261 * properties of the channel. 262 */ 263 264 #define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1 265 266 struct vmstorage_channel_properties { 267 u32 reserved; 268 u16 max_channel_cnt; 269 u16 reserved1; 270 271 u32 flags; 272 u32 max_transfer_bytes; 273 274 u64 reserved2; 275 } __packed; 276 277 /* This structure is sent during the storage protocol negotiations. */ 278 struct vmstorage_protocol_version { 279 /* Major (MSW) and minor (LSW) version numbers. */ 280 u16 major_minor; 281 282 /* 283 * Revision number is auto-incremented whenever this file is changed 284 * (See FILL_VMSTOR_REVISION macro above). Mismatch does not 285 * definitely indicate incompatibility--but it does indicate mismatched 286 * builds. 287 * This is only used on the windows side. Just set it to 0. 288 */ 289 u16 revision; 290 } __packed; 291 292 /* Channel Property Flags */ 293 #define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1 294 #define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2 295 296 struct vstor_packet { 297 /* Requested operation type */ 298 enum vstor_packet_operation operation; 299 300 /* Flags - see below for values */ 301 u32 flags; 302 303 /* Status of the request returned from the server side. */ 304 u32 status; 305 306 /* Data payload area */ 307 union { 308 /* 309 * Structure used to forward SCSI commands from the 310 * client to the server. 311 */ 312 struct vmscsi_request vm_srb; 313 314 /* Structure used to query channel properties. */ 315 struct vmstorage_channel_properties storage_channel_properties; 316 317 /* Used during version negotiations. */ 318 struct vmstorage_protocol_version version; 319 320 /* Fibre channel address packet */ 321 struct hv_fc_wwn_packet wwn_packet; 322 323 /* Number of sub-channels to create */ 324 u16 sub_channel_count; 325 326 /* This will be the maximum of the union members */ 327 u8 buffer[0x34]; 328 }; 329 } __packed; 330 331 /* 332 * Packet Flags: 333 * 334 * This flag indicates that the server should send back a completion for this 335 * packet. 336 */ 337 338 #define REQUEST_COMPLETION_FLAG 0x1 339 340 /* Matches Windows-end */ 341 enum storvsc_request_type { 342 WRITE_TYPE = 0, 343 READ_TYPE, 344 UNKNOWN_TYPE, 345 }; 346 347 /* 348 * SRB status codes and masks; a subset of the codes used here. 349 */ 350 351 #define SRB_STATUS_AUTOSENSE_VALID 0x80 352 #define SRB_STATUS_INVALID_LUN 0x20 353 #define SRB_STATUS_SUCCESS 0x01 354 #define SRB_STATUS_ABORTED 0x02 355 #define SRB_STATUS_ERROR 0x04 356 357 /* 358 * This is the end of Protocol specific defines. 359 */ 360 361 static int storvsc_ringbuffer_size = (256 * PAGE_SIZE); 362 static u32 max_outstanding_req_per_channel; 363 364 static int storvsc_vcpus_per_sub_channel = 4; 365 366 module_param(storvsc_ringbuffer_size, int, S_IRUGO); 367 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); 368 369 module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO); 370 MODULE_PARM_DESC(vcpus_per_sub_channel, "Ratio of VCPUs to subchannels"); 371 /* 372 * Timeout in seconds for all devices managed by this driver. 373 */ 374 static int storvsc_timeout = 180; 375 376 static int msft_blist_flags = BLIST_TRY_VPD_PAGES; 377 378 379 static void storvsc_on_channel_callback(void *context); 380 381 #define STORVSC_MAX_LUNS_PER_TARGET 255 382 #define STORVSC_MAX_TARGETS 2 383 #define STORVSC_MAX_CHANNELS 8 384 385 #define STORVSC_FC_MAX_LUNS_PER_TARGET 255 386 #define STORVSC_FC_MAX_TARGETS 128 387 #define STORVSC_FC_MAX_CHANNELS 8 388 389 #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 390 #define STORVSC_IDE_MAX_TARGETS 1 391 #define STORVSC_IDE_MAX_CHANNELS 1 392 393 struct storvsc_cmd_request { 394 struct scsi_cmnd *cmd; 395 396 unsigned int bounce_sgl_count; 397 struct scatterlist *bounce_sgl; 398 399 struct hv_device *device; 400 401 /* Synchronize the request/response if needed */ 402 struct completion wait_event; 403 404 struct vmbus_channel_packet_multipage_buffer mpb; 405 struct vmbus_packet_mpb_array *payload; 406 u32 payload_sz; 407 408 struct vstor_packet vstor_packet; 409 }; 410 411 412 /* A storvsc device is a device object that contains a vmbus channel */ 413 struct storvsc_device { 414 struct hv_device *device; 415 416 bool destroy; 417 bool drain_notify; 418 bool open_sub_channel; 419 atomic_t num_outstanding_req; 420 struct Scsi_Host *host; 421 422 wait_queue_head_t waiting_to_drain; 423 424 /* 425 * Each unique Port/Path/Target represents 1 channel ie scsi 426 * controller. In reality, the pathid, targetid is always 0 427 * and the port is set by us 428 */ 429 unsigned int port_number; 430 unsigned char path_id; 431 unsigned char target_id; 432 433 /* 434 * Max I/O, the device can support. 435 */ 436 u32 max_transfer_bytes; 437 /* Used for vsc/vsp channel reset process */ 438 struct storvsc_cmd_request init_request; 439 struct storvsc_cmd_request reset_request; 440 }; 441 442 struct hv_host_device { 443 struct hv_device *dev; 444 unsigned int port; 445 unsigned char path; 446 unsigned char target; 447 }; 448 449 struct storvsc_scan_work { 450 struct work_struct work; 451 struct Scsi_Host *host; 452 uint lun; 453 }; 454 455 static void storvsc_device_scan(struct work_struct *work) 456 { 457 struct storvsc_scan_work *wrk; 458 uint lun; 459 struct scsi_device *sdev; 460 461 wrk = container_of(work, struct storvsc_scan_work, work); 462 lun = wrk->lun; 463 464 sdev = scsi_device_lookup(wrk->host, 0, 0, lun); 465 if (!sdev) 466 goto done; 467 scsi_rescan_device(&sdev->sdev_gendev); 468 scsi_device_put(sdev); 469 470 done: 471 kfree(wrk); 472 } 473 474 static void storvsc_host_scan(struct work_struct *work) 475 { 476 struct storvsc_scan_work *wrk; 477 struct Scsi_Host *host; 478 struct scsi_device *sdev; 479 480 wrk = container_of(work, struct storvsc_scan_work, work); 481 host = wrk->host; 482 483 /* 484 * Before scanning the host, first check to see if any of the 485 * currrently known devices have been hot removed. We issue a 486 * "unit ready" command against all currently known devices. 487 * This I/O will result in an error for devices that have been 488 * removed. As part of handling the I/O error, we remove the device. 489 * 490 * When a LUN is added or removed, the host sends us a signal to 491 * scan the host. Thus we are forced to discover the LUNs that 492 * may have been removed this way. 493 */ 494 mutex_lock(&host->scan_mutex); 495 shost_for_each_device(sdev, host) 496 scsi_test_unit_ready(sdev, 1, 1, NULL); 497 mutex_unlock(&host->scan_mutex); 498 /* 499 * Now scan the host to discover LUNs that may have been added. 500 */ 501 scsi_scan_host(host); 502 503 kfree(wrk); 504 } 505 506 static void storvsc_remove_lun(struct work_struct *work) 507 { 508 struct storvsc_scan_work *wrk; 509 struct scsi_device *sdev; 510 511 wrk = container_of(work, struct storvsc_scan_work, work); 512 if (!scsi_host_get(wrk->host)) 513 goto done; 514 515 sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun); 516 517 if (sdev) { 518 scsi_remove_device(sdev); 519 scsi_device_put(sdev); 520 } 521 scsi_host_put(wrk->host); 522 523 done: 524 kfree(wrk); 525 } 526 527 528 /* 529 * We can get incoming messages from the host that are not in response to 530 * messages that we have sent out. An example of this would be messages 531 * received by the guest to notify dynamic addition/removal of LUNs. To 532 * deal with potential race conditions where the driver may be in the 533 * midst of being unloaded when we might receive an unsolicited message 534 * from the host, we have implemented a mechanism to gurantee sequential 535 * consistency: 536 * 537 * 1) Once the device is marked as being destroyed, we will fail all 538 * outgoing messages. 539 * 2) We permit incoming messages when the device is being destroyed, 540 * only to properly account for messages already sent out. 541 */ 542 543 static inline struct storvsc_device *get_out_stor_device( 544 struct hv_device *device) 545 { 546 struct storvsc_device *stor_device; 547 548 stor_device = hv_get_drvdata(device); 549 550 if (stor_device && stor_device->destroy) 551 stor_device = NULL; 552 553 return stor_device; 554 } 555 556 557 static inline void storvsc_wait_to_drain(struct storvsc_device *dev) 558 { 559 dev->drain_notify = true; 560 wait_event(dev->waiting_to_drain, 561 atomic_read(&dev->num_outstanding_req) == 0); 562 dev->drain_notify = false; 563 } 564 565 static inline struct storvsc_device *get_in_stor_device( 566 struct hv_device *device) 567 { 568 struct storvsc_device *stor_device; 569 570 stor_device = hv_get_drvdata(device); 571 572 if (!stor_device) 573 goto get_in_err; 574 575 /* 576 * If the device is being destroyed; allow incoming 577 * traffic only to cleanup outstanding requests. 578 */ 579 580 if (stor_device->destroy && 581 (atomic_read(&stor_device->num_outstanding_req) == 0)) 582 stor_device = NULL; 583 584 get_in_err: 585 return stor_device; 586 587 } 588 589 static void destroy_bounce_buffer(struct scatterlist *sgl, 590 unsigned int sg_count) 591 { 592 int i; 593 struct page *page_buf; 594 595 for (i = 0; i < sg_count; i++) { 596 page_buf = sg_page((&sgl[i])); 597 if (page_buf != NULL) 598 __free_page(page_buf); 599 } 600 601 kfree(sgl); 602 } 603 604 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count) 605 { 606 int i; 607 608 /* No need to check */ 609 if (sg_count < 2) 610 return -1; 611 612 /* We have at least 2 sg entries */ 613 for (i = 0; i < sg_count; i++) { 614 if (i == 0) { 615 /* make sure 1st one does not have hole */ 616 if (sgl[i].offset + sgl[i].length != PAGE_SIZE) 617 return i; 618 } else if (i == sg_count - 1) { 619 /* make sure last one does not have hole */ 620 if (sgl[i].offset != 0) 621 return i; 622 } else { 623 /* make sure no hole in the middle */ 624 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0) 625 return i; 626 } 627 } 628 return -1; 629 } 630 631 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, 632 unsigned int sg_count, 633 unsigned int len, 634 int write) 635 { 636 int i; 637 int num_pages; 638 struct scatterlist *bounce_sgl; 639 struct page *page_buf; 640 unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE); 641 642 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT; 643 644 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC); 645 if (!bounce_sgl) 646 return NULL; 647 648 sg_init_table(bounce_sgl, num_pages); 649 for (i = 0; i < num_pages; i++) { 650 page_buf = alloc_page(GFP_ATOMIC); 651 if (!page_buf) 652 goto cleanup; 653 sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0); 654 } 655 656 return bounce_sgl; 657 658 cleanup: 659 destroy_bounce_buffer(bounce_sgl, num_pages); 660 return NULL; 661 } 662 663 /* Assume the original sgl has enough room */ 664 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, 665 struct scatterlist *bounce_sgl, 666 unsigned int orig_sgl_count, 667 unsigned int bounce_sgl_count) 668 { 669 int i; 670 int j = 0; 671 unsigned long src, dest; 672 unsigned int srclen, destlen, copylen; 673 unsigned int total_copied = 0; 674 unsigned long bounce_addr = 0; 675 unsigned long dest_addr = 0; 676 unsigned long flags; 677 struct scatterlist *cur_dest_sgl; 678 struct scatterlist *cur_src_sgl; 679 680 local_irq_save(flags); 681 cur_dest_sgl = orig_sgl; 682 cur_src_sgl = bounce_sgl; 683 for (i = 0; i < orig_sgl_count; i++) { 684 dest_addr = (unsigned long) 685 kmap_atomic(sg_page(cur_dest_sgl)) + 686 cur_dest_sgl->offset; 687 dest = dest_addr; 688 destlen = cur_dest_sgl->length; 689 690 if (bounce_addr == 0) 691 bounce_addr = (unsigned long)kmap_atomic( 692 sg_page(cur_src_sgl)); 693 694 while (destlen) { 695 src = bounce_addr + cur_src_sgl->offset; 696 srclen = cur_src_sgl->length - cur_src_sgl->offset; 697 698 copylen = min(srclen, destlen); 699 memcpy((void *)dest, (void *)src, copylen); 700 701 total_copied += copylen; 702 cur_src_sgl->offset += copylen; 703 destlen -= copylen; 704 dest += copylen; 705 706 if (cur_src_sgl->offset == cur_src_sgl->length) { 707 /* full */ 708 kunmap_atomic((void *)bounce_addr); 709 j++; 710 711 /* 712 * It is possible that the number of elements 713 * in the bounce buffer may not be equal to 714 * the number of elements in the original 715 * scatter list. Handle this correctly. 716 */ 717 718 if (j == bounce_sgl_count) { 719 /* 720 * We are done; cleanup and return. 721 */ 722 kunmap_atomic((void *)(dest_addr - 723 cur_dest_sgl->offset)); 724 local_irq_restore(flags); 725 return total_copied; 726 } 727 728 /* if we need to use another bounce buffer */ 729 if (destlen || i != orig_sgl_count - 1) { 730 cur_src_sgl = sg_next(cur_src_sgl); 731 bounce_addr = (unsigned long) 732 kmap_atomic( 733 sg_page(cur_src_sgl)); 734 } 735 } else if (destlen == 0 && i == orig_sgl_count - 1) { 736 /* unmap the last bounce that is < PAGE_SIZE */ 737 kunmap_atomic((void *)bounce_addr); 738 } 739 } 740 741 kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset)); 742 cur_dest_sgl = sg_next(cur_dest_sgl); 743 } 744 745 local_irq_restore(flags); 746 747 return total_copied; 748 } 749 750 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */ 751 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, 752 struct scatterlist *bounce_sgl, 753 unsigned int orig_sgl_count) 754 { 755 int i; 756 int j = 0; 757 unsigned long src, dest; 758 unsigned int srclen, destlen, copylen; 759 unsigned int total_copied = 0; 760 unsigned long bounce_addr = 0; 761 unsigned long src_addr = 0; 762 unsigned long flags; 763 struct scatterlist *cur_src_sgl; 764 struct scatterlist *cur_dest_sgl; 765 766 local_irq_save(flags); 767 768 cur_src_sgl = orig_sgl; 769 cur_dest_sgl = bounce_sgl; 770 771 for (i = 0; i < orig_sgl_count; i++) { 772 src_addr = (unsigned long) 773 kmap_atomic(sg_page(cur_src_sgl)) + 774 cur_src_sgl->offset; 775 src = src_addr; 776 srclen = cur_src_sgl->length; 777 778 if (bounce_addr == 0) 779 bounce_addr = (unsigned long) 780 kmap_atomic(sg_page(cur_dest_sgl)); 781 782 while (srclen) { 783 /* assume bounce offset always == 0 */ 784 dest = bounce_addr + cur_dest_sgl->length; 785 destlen = PAGE_SIZE - cur_dest_sgl->length; 786 787 copylen = min(srclen, destlen); 788 memcpy((void *)dest, (void *)src, copylen); 789 790 total_copied += copylen; 791 cur_dest_sgl->length += copylen; 792 srclen -= copylen; 793 src += copylen; 794 795 if (cur_dest_sgl->length == PAGE_SIZE) { 796 /* full..move to next entry */ 797 kunmap_atomic((void *)bounce_addr); 798 bounce_addr = 0; 799 j++; 800 } 801 802 /* if we need to use another bounce buffer */ 803 if (srclen && bounce_addr == 0) { 804 cur_dest_sgl = sg_next(cur_dest_sgl); 805 bounce_addr = (unsigned long) 806 kmap_atomic( 807 sg_page(cur_dest_sgl)); 808 } 809 810 } 811 812 kunmap_atomic((void *)(src_addr - cur_src_sgl->offset)); 813 cur_src_sgl = sg_next(cur_src_sgl); 814 } 815 816 if (bounce_addr) 817 kunmap_atomic((void *)bounce_addr); 818 819 local_irq_restore(flags); 820 821 return total_copied; 822 } 823 824 static void handle_sc_creation(struct vmbus_channel *new_sc) 825 { 826 struct hv_device *device = new_sc->primary_channel->device_obj; 827 struct storvsc_device *stor_device; 828 struct vmstorage_channel_properties props; 829 830 stor_device = get_out_stor_device(device); 831 if (!stor_device) 832 return; 833 834 if (stor_device->open_sub_channel == false) 835 return; 836 837 memset(&props, 0, sizeof(struct vmstorage_channel_properties)); 838 839 vmbus_open(new_sc, 840 storvsc_ringbuffer_size, 841 storvsc_ringbuffer_size, 842 (void *)&props, 843 sizeof(struct vmstorage_channel_properties), 844 storvsc_on_channel_callback, new_sc); 845 } 846 847 static void handle_multichannel_storage(struct hv_device *device, int max_chns) 848 { 849 struct storvsc_device *stor_device; 850 int num_cpus = num_online_cpus(); 851 int num_sc; 852 struct storvsc_cmd_request *request; 853 struct vstor_packet *vstor_packet; 854 int ret, t; 855 856 num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); 857 stor_device = get_out_stor_device(device); 858 if (!stor_device) 859 return; 860 861 request = &stor_device->init_request; 862 vstor_packet = &request->vstor_packet; 863 864 stor_device->open_sub_channel = true; 865 /* 866 * Establish a handler for dealing with subchannels. 867 */ 868 vmbus_set_sc_create_callback(device->channel, handle_sc_creation); 869 870 /* 871 * Check to see if sub-channels have already been created. This 872 * can happen when this driver is re-loaded after unloading. 873 */ 874 875 if (vmbus_are_subchannels_present(device->channel)) 876 return; 877 878 stor_device->open_sub_channel = false; 879 /* 880 * Request the host to create sub-channels. 881 */ 882 memset(request, 0, sizeof(struct storvsc_cmd_request)); 883 init_completion(&request->wait_event); 884 vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS; 885 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 886 vstor_packet->sub_channel_count = num_sc; 887 888 ret = vmbus_sendpacket(device->channel, vstor_packet, 889 (sizeof(struct vstor_packet) - 890 vmscsi_size_delta), 891 (unsigned long)request, 892 VM_PKT_DATA_INBAND, 893 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 894 895 if (ret != 0) 896 return; 897 898 t = wait_for_completion_timeout(&request->wait_event, 10*HZ); 899 if (t == 0) 900 return; 901 902 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 903 vstor_packet->status != 0) 904 return; 905 906 /* 907 * Now that we created the sub-channels, invoke the check; this 908 * may trigger the callback. 909 */ 910 stor_device->open_sub_channel = true; 911 vmbus_are_subchannels_present(device->channel); 912 } 913 914 static int storvsc_channel_init(struct hv_device *device) 915 { 916 struct storvsc_device *stor_device; 917 struct storvsc_cmd_request *request; 918 struct vstor_packet *vstor_packet; 919 int ret, t, i; 920 int max_chns; 921 bool process_sub_channels = false; 922 923 stor_device = get_out_stor_device(device); 924 if (!stor_device) 925 return -ENODEV; 926 927 request = &stor_device->init_request; 928 vstor_packet = &request->vstor_packet; 929 930 /* 931 * Now, initiate the vsc/vsp initialization protocol on the open 932 * channel 933 */ 934 memset(request, 0, sizeof(struct storvsc_cmd_request)); 935 init_completion(&request->wait_event); 936 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION; 937 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 938 939 ret = vmbus_sendpacket(device->channel, vstor_packet, 940 (sizeof(struct vstor_packet) - 941 vmscsi_size_delta), 942 (unsigned long)request, 943 VM_PKT_DATA_INBAND, 944 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 945 if (ret != 0) 946 goto cleanup; 947 948 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 949 if (t == 0) { 950 ret = -ETIMEDOUT; 951 goto cleanup; 952 } 953 954 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 955 vstor_packet->status != 0) { 956 ret = -EINVAL; 957 goto cleanup; 958 } 959 960 961 for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) { 962 /* reuse the packet for version range supported */ 963 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 964 vstor_packet->operation = 965 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION; 966 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 967 968 vstor_packet->version.major_minor = 969 vmstor_protocols[i].protocol_version; 970 971 /* 972 * The revision number is only used in Windows; set it to 0. 973 */ 974 vstor_packet->version.revision = 0; 975 976 ret = vmbus_sendpacket(device->channel, vstor_packet, 977 (sizeof(struct vstor_packet) - 978 vmscsi_size_delta), 979 (unsigned long)request, 980 VM_PKT_DATA_INBAND, 981 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 982 if (ret != 0) 983 goto cleanup; 984 985 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 986 if (t == 0) { 987 ret = -ETIMEDOUT; 988 goto cleanup; 989 } 990 991 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) { 992 ret = -EINVAL; 993 goto cleanup; 994 } 995 996 if (vstor_packet->status == 0) { 997 vmstor_proto_version = 998 vmstor_protocols[i].protocol_version; 999 1000 sense_buffer_size = 1001 vmstor_protocols[i].sense_buffer_size; 1002 1003 vmscsi_size_delta = 1004 vmstor_protocols[i].vmscsi_size_delta; 1005 1006 break; 1007 } 1008 } 1009 1010 if (vstor_packet->status != 0) { 1011 ret = -EINVAL; 1012 goto cleanup; 1013 } 1014 1015 1016 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 1017 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES; 1018 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 1019 1020 ret = vmbus_sendpacket(device->channel, vstor_packet, 1021 (sizeof(struct vstor_packet) - 1022 vmscsi_size_delta), 1023 (unsigned long)request, 1024 VM_PKT_DATA_INBAND, 1025 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1026 1027 if (ret != 0) 1028 goto cleanup; 1029 1030 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 1031 if (t == 0) { 1032 ret = -ETIMEDOUT; 1033 goto cleanup; 1034 } 1035 1036 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 1037 vstor_packet->status != 0) { 1038 ret = -EINVAL; 1039 goto cleanup; 1040 } 1041 1042 /* 1043 * Check to see if multi-channel support is there. 1044 * Hosts that implement protocol version of 5.1 and above 1045 * support multi-channel. 1046 */ 1047 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt; 1048 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) { 1049 if (vstor_packet->storage_channel_properties.flags & 1050 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) 1051 process_sub_channels = true; 1052 } 1053 stor_device->max_transfer_bytes = 1054 vstor_packet->storage_channel_properties.max_transfer_bytes; 1055 1056 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 1057 vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION; 1058 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 1059 1060 ret = vmbus_sendpacket(device->channel, vstor_packet, 1061 (sizeof(struct vstor_packet) - 1062 vmscsi_size_delta), 1063 (unsigned long)request, 1064 VM_PKT_DATA_INBAND, 1065 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1066 1067 if (ret != 0) 1068 goto cleanup; 1069 1070 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 1071 if (t == 0) { 1072 ret = -ETIMEDOUT; 1073 goto cleanup; 1074 } 1075 1076 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 1077 vstor_packet->status != 0) { 1078 ret = -EINVAL; 1079 goto cleanup; 1080 } 1081 1082 if (process_sub_channels) 1083 handle_multichannel_storage(device, max_chns); 1084 1085 1086 cleanup: 1087 return ret; 1088 } 1089 1090 static void storvsc_handle_error(struct vmscsi_request *vm_srb, 1091 struct scsi_cmnd *scmnd, 1092 struct Scsi_Host *host, 1093 u8 asc, u8 ascq) 1094 { 1095 struct storvsc_scan_work *wrk; 1096 void (*process_err_fn)(struct work_struct *work); 1097 bool do_work = false; 1098 1099 switch (vm_srb->srb_status) { 1100 case SRB_STATUS_ERROR: 1101 /* 1102 * If there is an error; offline the device since all 1103 * error recovery strategies would have already been 1104 * deployed on the host side. However, if the command 1105 * were a pass-through command deal with it appropriately. 1106 */ 1107 switch (scmnd->cmnd[0]) { 1108 case ATA_16: 1109 case ATA_12: 1110 set_host_byte(scmnd, DID_PASSTHROUGH); 1111 break; 1112 /* 1113 * On Some Windows hosts TEST_UNIT_READY command can return 1114 * SRB_STATUS_ERROR, let the upper level code deal with it 1115 * based on the sense information. 1116 */ 1117 case TEST_UNIT_READY: 1118 break; 1119 default: 1120 set_host_byte(scmnd, DID_TARGET_FAILURE); 1121 } 1122 break; 1123 case SRB_STATUS_INVALID_LUN: 1124 do_work = true; 1125 process_err_fn = storvsc_remove_lun; 1126 break; 1127 case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID): 1128 if ((asc == 0x2a) && (ascq == 0x9)) { 1129 do_work = true; 1130 process_err_fn = storvsc_device_scan; 1131 /* 1132 * Retry the I/O that trigerred this. 1133 */ 1134 set_host_byte(scmnd, DID_REQUEUE); 1135 } 1136 break; 1137 } 1138 1139 if (!do_work) 1140 return; 1141 1142 /* 1143 * We need to schedule work to process this error; schedule it. 1144 */ 1145 wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); 1146 if (!wrk) { 1147 set_host_byte(scmnd, DID_TARGET_FAILURE); 1148 return; 1149 } 1150 1151 wrk->host = host; 1152 wrk->lun = vm_srb->lun; 1153 INIT_WORK(&wrk->work, process_err_fn); 1154 schedule_work(&wrk->work); 1155 } 1156 1157 1158 static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) 1159 { 1160 struct scsi_cmnd *scmnd = cmd_request->cmd; 1161 struct hv_host_device *host_dev = shost_priv(scmnd->device->host); 1162 struct scsi_sense_hdr sense_hdr; 1163 struct vmscsi_request *vm_srb; 1164 struct Scsi_Host *host; 1165 struct storvsc_device *stor_dev; 1166 struct hv_device *dev = host_dev->dev; 1167 u32 payload_sz = cmd_request->payload_sz; 1168 void *payload = cmd_request->payload; 1169 1170 stor_dev = get_in_stor_device(dev); 1171 host = stor_dev->host; 1172 1173 vm_srb = &cmd_request->vstor_packet.vm_srb; 1174 if (cmd_request->bounce_sgl_count) { 1175 if (vm_srb->data_in == READ_TYPE) 1176 copy_from_bounce_buffer(scsi_sglist(scmnd), 1177 cmd_request->bounce_sgl, 1178 scsi_sg_count(scmnd), 1179 cmd_request->bounce_sgl_count); 1180 destroy_bounce_buffer(cmd_request->bounce_sgl, 1181 cmd_request->bounce_sgl_count); 1182 } 1183 1184 scmnd->result = vm_srb->scsi_status; 1185 1186 if (scmnd->result) { 1187 if (scsi_normalize_sense(scmnd->sense_buffer, 1188 SCSI_SENSE_BUFFERSIZE, &sense_hdr)) 1189 scsi_print_sense_hdr(scmnd->device, "storvsc", 1190 &sense_hdr); 1191 } 1192 1193 if (vm_srb->srb_status != SRB_STATUS_SUCCESS) 1194 storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, 1195 sense_hdr.ascq); 1196 1197 scsi_set_resid(scmnd, 1198 cmd_request->payload->range.len - 1199 vm_srb->data_transfer_length); 1200 1201 scmnd->scsi_done(scmnd); 1202 1203 if (payload_sz > 1204 sizeof(struct vmbus_channel_packet_multipage_buffer)) 1205 kfree(payload); 1206 } 1207 1208 static void storvsc_on_io_completion(struct hv_device *device, 1209 struct vstor_packet *vstor_packet, 1210 struct storvsc_cmd_request *request) 1211 { 1212 struct storvsc_device *stor_device; 1213 struct vstor_packet *stor_pkt; 1214 1215 stor_device = hv_get_drvdata(device); 1216 stor_pkt = &request->vstor_packet; 1217 1218 /* 1219 * The current SCSI handling on the host side does 1220 * not correctly handle: 1221 * INQUIRY command with page code parameter set to 0x80 1222 * MODE_SENSE command with cmd[2] == 0x1c 1223 * 1224 * Setup srb and scsi status so this won't be fatal. 1225 * We do this so we can distinguish truly fatal failues 1226 * (srb status == 0x4) and off-line the device in that case. 1227 */ 1228 1229 if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || 1230 (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) { 1231 vstor_packet->vm_srb.scsi_status = 0; 1232 vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; 1233 } 1234 1235 1236 /* Copy over the status...etc */ 1237 stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status; 1238 stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status; 1239 stor_pkt->vm_srb.sense_info_length = 1240 vstor_packet->vm_srb.sense_info_length; 1241 1242 1243 if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) { 1244 /* CHECK_CONDITION */ 1245 if (vstor_packet->vm_srb.srb_status & 1246 SRB_STATUS_AUTOSENSE_VALID) { 1247 /* autosense data available */ 1248 1249 memcpy(request->cmd->sense_buffer, 1250 vstor_packet->vm_srb.sense_data, 1251 vstor_packet->vm_srb.sense_info_length); 1252 1253 } 1254 } 1255 1256 stor_pkt->vm_srb.data_transfer_length = 1257 vstor_packet->vm_srb.data_transfer_length; 1258 1259 storvsc_command_completion(request); 1260 1261 if (atomic_dec_and_test(&stor_device->num_outstanding_req) && 1262 stor_device->drain_notify) 1263 wake_up(&stor_device->waiting_to_drain); 1264 1265 1266 } 1267 1268 static void storvsc_on_receive(struct hv_device *device, 1269 struct vstor_packet *vstor_packet, 1270 struct storvsc_cmd_request *request) 1271 { 1272 struct storvsc_scan_work *work; 1273 struct storvsc_device *stor_device; 1274 1275 switch (vstor_packet->operation) { 1276 case VSTOR_OPERATION_COMPLETE_IO: 1277 storvsc_on_io_completion(device, vstor_packet, request); 1278 break; 1279 1280 case VSTOR_OPERATION_REMOVE_DEVICE: 1281 case VSTOR_OPERATION_ENUMERATE_BUS: 1282 stor_device = get_in_stor_device(device); 1283 work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); 1284 if (!work) 1285 return; 1286 1287 INIT_WORK(&work->work, storvsc_host_scan); 1288 work->host = stor_device->host; 1289 schedule_work(&work->work); 1290 break; 1291 1292 default: 1293 break; 1294 } 1295 } 1296 1297 static void storvsc_on_channel_callback(void *context) 1298 { 1299 struct vmbus_channel *channel = (struct vmbus_channel *)context; 1300 struct hv_device *device; 1301 struct storvsc_device *stor_device; 1302 u32 bytes_recvd; 1303 u64 request_id; 1304 unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)]; 1305 struct storvsc_cmd_request *request; 1306 int ret; 1307 1308 if (channel->primary_channel != NULL) 1309 device = channel->primary_channel->device_obj; 1310 else 1311 device = channel->device_obj; 1312 1313 stor_device = get_in_stor_device(device); 1314 if (!stor_device) 1315 return; 1316 1317 do { 1318 ret = vmbus_recvpacket(channel, packet, 1319 ALIGN((sizeof(struct vstor_packet) - 1320 vmscsi_size_delta), 8), 1321 &bytes_recvd, &request_id); 1322 if (ret == 0 && bytes_recvd > 0) { 1323 1324 request = (struct storvsc_cmd_request *) 1325 (unsigned long)request_id; 1326 1327 if ((request == &stor_device->init_request) || 1328 (request == &stor_device->reset_request)) { 1329 1330 memcpy(&request->vstor_packet, packet, 1331 (sizeof(struct vstor_packet) - 1332 vmscsi_size_delta)); 1333 complete(&request->wait_event); 1334 } else { 1335 storvsc_on_receive(device, 1336 (struct vstor_packet *)packet, 1337 request); 1338 } 1339 } else { 1340 break; 1341 } 1342 } while (1); 1343 1344 return; 1345 } 1346 1347 static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size) 1348 { 1349 struct vmstorage_channel_properties props; 1350 int ret; 1351 1352 memset(&props, 0, sizeof(struct vmstorage_channel_properties)); 1353 1354 ret = vmbus_open(device->channel, 1355 ring_size, 1356 ring_size, 1357 (void *)&props, 1358 sizeof(struct vmstorage_channel_properties), 1359 storvsc_on_channel_callback, device->channel); 1360 1361 if (ret != 0) 1362 return ret; 1363 1364 ret = storvsc_channel_init(device); 1365 1366 return ret; 1367 } 1368 1369 static int storvsc_dev_remove(struct hv_device *device) 1370 { 1371 struct storvsc_device *stor_device; 1372 unsigned long flags; 1373 1374 stor_device = hv_get_drvdata(device); 1375 1376 spin_lock_irqsave(&device->channel->inbound_lock, flags); 1377 stor_device->destroy = true; 1378 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 1379 1380 /* 1381 * At this point, all outbound traffic should be disable. We 1382 * only allow inbound traffic (responses) to proceed so that 1383 * outstanding requests can be completed. 1384 */ 1385 1386 storvsc_wait_to_drain(stor_device); 1387 1388 /* 1389 * Since we have already drained, we don't need to busy wait 1390 * as was done in final_release_stor_device() 1391 * Note that we cannot set the ext pointer to NULL until 1392 * we have drained - to drain the outgoing packets, we need to 1393 * allow incoming packets. 1394 */ 1395 spin_lock_irqsave(&device->channel->inbound_lock, flags); 1396 hv_set_drvdata(device, NULL); 1397 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 1398 1399 /* Close the channel */ 1400 vmbus_close(device->channel); 1401 1402 kfree(stor_device); 1403 return 0; 1404 } 1405 1406 static int storvsc_do_io(struct hv_device *device, 1407 struct storvsc_cmd_request *request) 1408 { 1409 struct storvsc_device *stor_device; 1410 struct vstor_packet *vstor_packet; 1411 struct vmbus_channel *outgoing_channel; 1412 int ret = 0; 1413 1414 vstor_packet = &request->vstor_packet; 1415 stor_device = get_out_stor_device(device); 1416 1417 if (!stor_device) 1418 return -ENODEV; 1419 1420 1421 request->device = device; 1422 /* 1423 * Select an an appropriate channel to send the request out. 1424 */ 1425 1426 outgoing_channel = vmbus_get_outgoing_channel(device->channel); 1427 1428 1429 vstor_packet->flags |= REQUEST_COMPLETION_FLAG; 1430 1431 vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) - 1432 vmscsi_size_delta); 1433 1434 1435 vstor_packet->vm_srb.sense_info_length = sense_buffer_size; 1436 1437 1438 vstor_packet->vm_srb.data_transfer_length = 1439 request->payload->range.len; 1440 1441 vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB; 1442 1443 if (request->payload->range.len) { 1444 1445 ret = vmbus_sendpacket_mpb_desc(outgoing_channel, 1446 request->payload, request->payload_sz, 1447 vstor_packet, 1448 (sizeof(struct vstor_packet) - 1449 vmscsi_size_delta), 1450 (unsigned long)request); 1451 } else { 1452 ret = vmbus_sendpacket(outgoing_channel, vstor_packet, 1453 (sizeof(struct vstor_packet) - 1454 vmscsi_size_delta), 1455 (unsigned long)request, 1456 VM_PKT_DATA_INBAND, 1457 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1458 } 1459 1460 if (ret != 0) 1461 return ret; 1462 1463 atomic_inc(&stor_device->num_outstanding_req); 1464 1465 return ret; 1466 } 1467 1468 static int storvsc_device_configure(struct scsi_device *sdevice) 1469 { 1470 1471 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE); 1472 1473 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY); 1474 1475 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); 1476 1477 sdevice->no_write_same = 1; 1478 1479 /* 1480 * Add blist flags to permit the reading of the VPD pages even when 1481 * the target may claim SPC-2 compliance. MSFT targets currently 1482 * claim SPC-2 compliance while they implement post SPC-2 features. 1483 * With this patch we can correctly handle WRITE_SAME_16 issues. 1484 */ 1485 sdevice->sdev_bflags |= msft_blist_flags; 1486 1487 /* 1488 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 1489 * if the device is a MSFT virtual device. If the host is 1490 * WIN10 or newer, allow write_same. 1491 */ 1492 if (!strncmp(sdevice->vendor, "Msft", 4)) { 1493 switch (vmstor_proto_version) { 1494 case VMSTOR_PROTO_VERSION_WIN8: 1495 case VMSTOR_PROTO_VERSION_WIN8_1: 1496 sdevice->scsi_level = SCSI_SPC_3; 1497 break; 1498 } 1499 1500 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10) 1501 sdevice->no_write_same = 0; 1502 } 1503 1504 return 0; 1505 } 1506 1507 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, 1508 sector_t capacity, int *info) 1509 { 1510 sector_t nsect = capacity; 1511 sector_t cylinders = nsect; 1512 int heads, sectors_pt; 1513 1514 /* 1515 * We are making up these values; let us keep it simple. 1516 */ 1517 heads = 0xff; 1518 sectors_pt = 0x3f; /* Sectors per track */ 1519 sector_div(cylinders, heads * sectors_pt); 1520 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect) 1521 cylinders = 0xffff; 1522 1523 info[0] = heads; 1524 info[1] = sectors_pt; 1525 info[2] = (int)cylinders; 1526 1527 return 0; 1528 } 1529 1530 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) 1531 { 1532 struct hv_host_device *host_dev = shost_priv(scmnd->device->host); 1533 struct hv_device *device = host_dev->dev; 1534 1535 struct storvsc_device *stor_device; 1536 struct storvsc_cmd_request *request; 1537 struct vstor_packet *vstor_packet; 1538 int ret, t; 1539 1540 1541 stor_device = get_out_stor_device(device); 1542 if (!stor_device) 1543 return FAILED; 1544 1545 request = &stor_device->reset_request; 1546 vstor_packet = &request->vstor_packet; 1547 1548 init_completion(&request->wait_event); 1549 1550 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS; 1551 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 1552 vstor_packet->vm_srb.path_id = stor_device->path_id; 1553 1554 ret = vmbus_sendpacket(device->channel, vstor_packet, 1555 (sizeof(struct vstor_packet) - 1556 vmscsi_size_delta), 1557 (unsigned long)&stor_device->reset_request, 1558 VM_PKT_DATA_INBAND, 1559 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1560 if (ret != 0) 1561 return FAILED; 1562 1563 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 1564 if (t == 0) 1565 return TIMEOUT_ERROR; 1566 1567 1568 /* 1569 * At this point, all outstanding requests in the adapter 1570 * should have been flushed out and return to us 1571 * There is a potential race here where the host may be in 1572 * the process of responding when we return from here. 1573 * Just wait for all in-transit packets to be accounted for 1574 * before we return from here. 1575 */ 1576 storvsc_wait_to_drain(stor_device); 1577 1578 return SUCCESS; 1579 } 1580 1581 /* 1582 * The host guarantees to respond to each command, although I/O latencies might 1583 * be unbounded on Azure. Reset the timer unconditionally to give the host a 1584 * chance to perform EH. 1585 */ 1586 static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) 1587 { 1588 return BLK_EH_RESET_TIMER; 1589 } 1590 1591 static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) 1592 { 1593 bool allowed = true; 1594 u8 scsi_op = scmnd->cmnd[0]; 1595 1596 switch (scsi_op) { 1597 /* the host does not handle WRITE_SAME, log accident usage */ 1598 case WRITE_SAME: 1599 /* 1600 * smartd sends this command and the host does not handle 1601 * this. So, don't send it. 1602 */ 1603 case SET_WINDOW: 1604 scmnd->result = ILLEGAL_REQUEST << 16; 1605 allowed = false; 1606 break; 1607 default: 1608 break; 1609 } 1610 return allowed; 1611 } 1612 1613 static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) 1614 { 1615 int ret; 1616 struct hv_host_device *host_dev = shost_priv(host); 1617 struct hv_device *dev = host_dev->dev; 1618 struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd); 1619 int i; 1620 struct scatterlist *sgl; 1621 unsigned int sg_count = 0; 1622 struct vmscsi_request *vm_srb; 1623 struct scatterlist *cur_sgl; 1624 struct vmbus_packet_mpb_array *payload; 1625 u32 payload_sz; 1626 u32 length; 1627 1628 if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) { 1629 /* 1630 * On legacy hosts filter unimplemented commands. 1631 * Future hosts are expected to correctly handle 1632 * unsupported commands. Furthermore, it is 1633 * possible that some of the currently 1634 * unsupported commands maybe supported in 1635 * future versions of the host. 1636 */ 1637 if (!storvsc_scsi_cmd_ok(scmnd)) { 1638 scmnd->scsi_done(scmnd); 1639 return 0; 1640 } 1641 } 1642 1643 /* Setup the cmd request */ 1644 cmd_request->cmd = scmnd; 1645 1646 vm_srb = &cmd_request->vstor_packet.vm_srb; 1647 vm_srb->win8_extension.time_out_value = 60; 1648 1649 vm_srb->win8_extension.srb_flags |= 1650 (SRB_FLAGS_QUEUE_ACTION_ENABLE | 1651 SRB_FLAGS_DISABLE_SYNCH_TRANSFER); 1652 1653 /* Build the SRB */ 1654 switch (scmnd->sc_data_direction) { 1655 case DMA_TO_DEVICE: 1656 vm_srb->data_in = WRITE_TYPE; 1657 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT; 1658 break; 1659 case DMA_FROM_DEVICE: 1660 vm_srb->data_in = READ_TYPE; 1661 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN; 1662 break; 1663 case DMA_NONE: 1664 vm_srb->data_in = UNKNOWN_TYPE; 1665 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; 1666 break; 1667 default: 1668 /* 1669 * This is DMA_BIDIRECTIONAL or something else we are never 1670 * supposed to see here. 1671 */ 1672 WARN(1, "Unexpected data direction: %d\n", 1673 scmnd->sc_data_direction); 1674 return -EINVAL; 1675 } 1676 1677 1678 vm_srb->port_number = host_dev->port; 1679 vm_srb->path_id = scmnd->device->channel; 1680 vm_srb->target_id = scmnd->device->id; 1681 vm_srb->lun = scmnd->device->lun; 1682 1683 vm_srb->cdb_length = scmnd->cmd_len; 1684 1685 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); 1686 1687 sgl = (struct scatterlist *)scsi_sglist(scmnd); 1688 sg_count = scsi_sg_count(scmnd); 1689 1690 length = scsi_bufflen(scmnd); 1691 payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; 1692 payload_sz = sizeof(cmd_request->mpb); 1693 1694 if (sg_count) { 1695 /* check if we need to bounce the sgl */ 1696 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { 1697 cmd_request->bounce_sgl = 1698 create_bounce_buffer(sgl, sg_count, 1699 length, 1700 vm_srb->data_in); 1701 if (!cmd_request->bounce_sgl) 1702 return SCSI_MLQUEUE_HOST_BUSY; 1703 1704 cmd_request->bounce_sgl_count = 1705 ALIGN(length, PAGE_SIZE) >> PAGE_SHIFT; 1706 1707 if (vm_srb->data_in == WRITE_TYPE) 1708 copy_to_bounce_buffer(sgl, 1709 cmd_request->bounce_sgl, sg_count); 1710 1711 sgl = cmd_request->bounce_sgl; 1712 sg_count = cmd_request->bounce_sgl_count; 1713 } 1714 1715 1716 if (sg_count > MAX_PAGE_BUFFER_COUNT) { 1717 1718 payload_sz = (sg_count * sizeof(void *) + 1719 sizeof(struct vmbus_packet_mpb_array)); 1720 payload = kmalloc(payload_sz, GFP_ATOMIC); 1721 if (!payload) { 1722 if (cmd_request->bounce_sgl_count) 1723 destroy_bounce_buffer( 1724 cmd_request->bounce_sgl, 1725 cmd_request->bounce_sgl_count); 1726 1727 return SCSI_MLQUEUE_DEVICE_BUSY; 1728 } 1729 } 1730 1731 payload->range.len = length; 1732 payload->range.offset = sgl[0].offset; 1733 1734 cur_sgl = sgl; 1735 for (i = 0; i < sg_count; i++) { 1736 payload->range.pfn_array[i] = 1737 page_to_pfn(sg_page((cur_sgl))); 1738 cur_sgl = sg_next(cur_sgl); 1739 } 1740 1741 } else if (scsi_sglist(scmnd)) { 1742 payload->range.len = length; 1743 payload->range.offset = 1744 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1); 1745 payload->range.pfn_array[0] = 1746 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT; 1747 } 1748 1749 cmd_request->payload = payload; 1750 cmd_request->payload_sz = payload_sz; 1751 1752 /* Invokes the vsc to start an IO */ 1753 ret = storvsc_do_io(dev, cmd_request); 1754 1755 if (ret == -EAGAIN) { 1756 /* no more space */ 1757 1758 if (cmd_request->bounce_sgl_count) 1759 destroy_bounce_buffer(cmd_request->bounce_sgl, 1760 cmd_request->bounce_sgl_count); 1761 1762 return SCSI_MLQUEUE_DEVICE_BUSY; 1763 } 1764 1765 return 0; 1766 } 1767 1768 static struct scsi_host_template scsi_driver = { 1769 .module = THIS_MODULE, 1770 .name = "storvsc_host_t", 1771 .cmd_size = sizeof(struct storvsc_cmd_request), 1772 .bios_param = storvsc_get_chs, 1773 .queuecommand = storvsc_queuecommand, 1774 .eh_host_reset_handler = storvsc_host_reset_handler, 1775 .proc_name = "storvsc_host", 1776 .eh_timed_out = storvsc_eh_timed_out, 1777 .slave_configure = storvsc_device_configure, 1778 .cmd_per_lun = 255, 1779 .this_id = -1, 1780 .use_clustering = ENABLE_CLUSTERING, 1781 /* Make sure we dont get a sg segment crosses a page boundary */ 1782 .dma_boundary = PAGE_SIZE-1, 1783 .no_write_same = 1, 1784 }; 1785 1786 enum { 1787 SCSI_GUID, 1788 IDE_GUID, 1789 SFC_GUID, 1790 }; 1791 1792 static const struct hv_vmbus_device_id id_table[] = { 1793 /* SCSI guid */ 1794 { HV_SCSI_GUID, 1795 .driver_data = SCSI_GUID 1796 }, 1797 /* IDE guid */ 1798 { HV_IDE_GUID, 1799 .driver_data = IDE_GUID 1800 }, 1801 /* Fibre Channel GUID */ 1802 { 1803 HV_SYNTHFC_GUID, 1804 .driver_data = SFC_GUID 1805 }, 1806 { }, 1807 }; 1808 1809 MODULE_DEVICE_TABLE(vmbus, id_table); 1810 1811 static int storvsc_probe(struct hv_device *device, 1812 const struct hv_vmbus_device_id *dev_id) 1813 { 1814 int ret; 1815 int num_cpus = num_online_cpus(); 1816 struct Scsi_Host *host; 1817 struct hv_host_device *host_dev; 1818 bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); 1819 int target = 0; 1820 struct storvsc_device *stor_device; 1821 int max_luns_per_target; 1822 int max_targets; 1823 int max_channels; 1824 int max_sub_channels = 0; 1825 1826 /* 1827 * Based on the windows host we are running on, 1828 * set state to properly communicate with the host. 1829 */ 1830 1831 if (vmbus_proto_version < VERSION_WIN8) { 1832 max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET; 1833 max_targets = STORVSC_IDE_MAX_TARGETS; 1834 max_channels = STORVSC_IDE_MAX_CHANNELS; 1835 } else { 1836 max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET; 1837 max_targets = STORVSC_MAX_TARGETS; 1838 max_channels = STORVSC_MAX_CHANNELS; 1839 /* 1840 * On Windows8 and above, we support sub-channels for storage. 1841 * The number of sub-channels offerred is based on the number of 1842 * VCPUs in the guest. 1843 */ 1844 max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel); 1845 } 1846 1847 scsi_driver.can_queue = (max_outstanding_req_per_channel * 1848 (max_sub_channels + 1)); 1849 1850 host = scsi_host_alloc(&scsi_driver, 1851 sizeof(struct hv_host_device)); 1852 if (!host) 1853 return -ENOMEM; 1854 1855 host_dev = shost_priv(host); 1856 memset(host_dev, 0, sizeof(struct hv_host_device)); 1857 1858 host_dev->port = host->host_no; 1859 host_dev->dev = device; 1860 1861 1862 stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL); 1863 if (!stor_device) { 1864 ret = -ENOMEM; 1865 goto err_out0; 1866 } 1867 1868 stor_device->destroy = false; 1869 stor_device->open_sub_channel = false; 1870 init_waitqueue_head(&stor_device->waiting_to_drain); 1871 stor_device->device = device; 1872 stor_device->host = host; 1873 hv_set_drvdata(device, stor_device); 1874 1875 stor_device->port_number = host->host_no; 1876 ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size); 1877 if (ret) 1878 goto err_out1; 1879 1880 host_dev->path = stor_device->path_id; 1881 host_dev->target = stor_device->target_id; 1882 1883 switch (dev_id->driver_data) { 1884 case SFC_GUID: 1885 host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET; 1886 host->max_id = STORVSC_FC_MAX_TARGETS; 1887 host->max_channel = STORVSC_FC_MAX_CHANNELS - 1; 1888 break; 1889 1890 case SCSI_GUID: 1891 host->max_lun = max_luns_per_target; 1892 host->max_id = max_targets; 1893 host->max_channel = max_channels - 1; 1894 break; 1895 1896 default: 1897 host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET; 1898 host->max_id = STORVSC_IDE_MAX_TARGETS; 1899 host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1; 1900 break; 1901 } 1902 /* max cmd length */ 1903 host->max_cmd_len = STORVSC_MAX_CMD_LEN; 1904 1905 /* 1906 * set the table size based on the info we got 1907 * from the host. 1908 */ 1909 host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); 1910 1911 /* Register the HBA and start the scsi bus scan */ 1912 ret = scsi_add_host(host, &device->device); 1913 if (ret != 0) 1914 goto err_out2; 1915 1916 if (!dev_is_ide) { 1917 scsi_scan_host(host); 1918 } else { 1919 target = (device->dev_instance.b[5] << 8 | 1920 device->dev_instance.b[4]); 1921 ret = scsi_add_device(host, 0, target, 0); 1922 if (ret) { 1923 scsi_remove_host(host); 1924 goto err_out2; 1925 } 1926 } 1927 return 0; 1928 1929 err_out2: 1930 /* 1931 * Once we have connected with the host, we would need to 1932 * to invoke storvsc_dev_remove() to rollback this state and 1933 * this call also frees up the stor_device; hence the jump around 1934 * err_out1 label. 1935 */ 1936 storvsc_dev_remove(device); 1937 goto err_out0; 1938 1939 err_out1: 1940 kfree(stor_device); 1941 1942 err_out0: 1943 scsi_host_put(host); 1944 return ret; 1945 } 1946 1947 static int storvsc_remove(struct hv_device *dev) 1948 { 1949 struct storvsc_device *stor_device = hv_get_drvdata(dev); 1950 struct Scsi_Host *host = stor_device->host; 1951 1952 scsi_remove_host(host); 1953 storvsc_dev_remove(dev); 1954 scsi_host_put(host); 1955 1956 return 0; 1957 } 1958 1959 static struct hv_driver storvsc_drv = { 1960 .name = KBUILD_MODNAME, 1961 .id_table = id_table, 1962 .probe = storvsc_probe, 1963 .remove = storvsc_remove, 1964 }; 1965 1966 static int __init storvsc_drv_init(void) 1967 { 1968 1969 /* 1970 * Divide the ring buffer data size (which is 1 page less 1971 * than the ring buffer size since that page is reserved for 1972 * the ring buffer indices) by the max request size (which is 1973 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) 1974 */ 1975 max_outstanding_req_per_channel = 1976 ((storvsc_ringbuffer_size - PAGE_SIZE) / 1977 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + 1978 sizeof(struct vstor_packet) + sizeof(u64) - 1979 vmscsi_size_delta, 1980 sizeof(u64))); 1981 1982 return vmbus_driver_register(&storvsc_drv); 1983 } 1984 1985 static void __exit storvsc_drv_exit(void) 1986 { 1987 vmbus_driver_unregister(&storvsc_drv); 1988 } 1989 1990 MODULE_LICENSE("GPL"); 1991 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver"); 1992 module_init(storvsc_drv_init); 1993 module_exit(storvsc_drv_exit); 1994