1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Authors: 18 * Haiyang Zhang <haiyangz@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com> 20 * K. Y. Srinivasan <kys@microsoft.com> 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/wait.h> 25 #include <linux/sched.h> 26 #include <linux/completion.h> 27 #include <linux/string.h> 28 #include <linux/mm.h> 29 #include <linux/delay.h> 30 #include <linux/init.h> 31 #include <linux/slab.h> 32 #include <linux/module.h> 33 #include <linux/device.h> 34 #include <linux/hyperv.h> 35 #include <linux/blkdev.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_cmnd.h> 38 #include <scsi/scsi_host.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_tcq.h> 41 #include <scsi/scsi_eh.h> 42 #include <scsi/scsi_devinfo.h> 43 #include <scsi/scsi_dbg.h> 44 45 /* 46 * All wire protocol details (storage protocol between the guest and the host) 47 * are consolidated here. 48 * 49 * Begin protocol definitions. 50 */ 51 52 /* 53 * Version history: 54 * V1 Beta: 0.1 55 * V1 RC < 2008/1/31: 1.0 56 * V1 RC > 2008/1/31: 2.0 57 * Win7: 4.2 58 * Win8: 5.1 59 */ 60 61 62 #define VMSTOR_WIN7_MAJOR 4 63 #define VMSTOR_WIN7_MINOR 2 64 65 #define VMSTOR_WIN8_MAJOR 5 66 #define VMSTOR_WIN8_MINOR 1 67 68 69 /* Packet structure describing virtual storage requests. */ 70 enum vstor_packet_operation { 71 VSTOR_OPERATION_COMPLETE_IO = 1, 72 VSTOR_OPERATION_REMOVE_DEVICE = 2, 73 VSTOR_OPERATION_EXECUTE_SRB = 3, 74 VSTOR_OPERATION_RESET_LUN = 4, 75 VSTOR_OPERATION_RESET_ADAPTER = 5, 76 VSTOR_OPERATION_RESET_BUS = 6, 77 VSTOR_OPERATION_BEGIN_INITIALIZATION = 7, 78 VSTOR_OPERATION_END_INITIALIZATION = 8, 79 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9, 80 VSTOR_OPERATION_QUERY_PROPERTIES = 10, 81 VSTOR_OPERATION_ENUMERATE_BUS = 11, 82 VSTOR_OPERATION_FCHBA_DATA = 12, 83 VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13, 84 VSTOR_OPERATION_MAXIMUM = 13 85 }; 86 87 /* 88 * WWN packet for Fibre Channel HBA 89 */ 90 91 struct hv_fc_wwn_packet { 92 bool primary_active; 93 u8 reserved1; 94 u8 reserved2; 95 u8 primary_port_wwn[8]; 96 u8 primary_node_wwn[8]; 97 u8 secondary_port_wwn[8]; 98 u8 secondary_node_wwn[8]; 99 }; 100 101 102 103 /* 104 * SRB Flag Bits 105 */ 106 107 #define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002 108 #define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004 109 #define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008 110 #define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010 111 #define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020 112 #define SRB_FLAGS_DATA_IN 0x00000040 113 #define SRB_FLAGS_DATA_OUT 0x00000080 114 #define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000 115 #define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT) 116 #define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100 117 #define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200 118 #define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400 119 120 /* 121 * This flag indicates the request is part of the workflow for processing a D3. 122 */ 123 #define SRB_FLAGS_D3_PROCESSING 0x00000800 124 #define SRB_FLAGS_IS_ACTIVE 0x00010000 125 #define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000 126 #define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000 127 #define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000 128 #define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000 129 #define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000 130 #define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000 131 #define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000 132 #define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 133 #define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 134 135 136 /* 137 * Platform neutral description of a scsi request - 138 * this remains the same across the write regardless of 32/64 bit 139 * note: it's patterned off the SCSI_PASS_THROUGH structure 140 */ 141 #define STORVSC_MAX_CMD_LEN 0x10 142 143 #define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14 144 #define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12 145 146 #define STORVSC_SENSE_BUFFER_SIZE 0x14 147 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14 148 149 /* 150 * Sense buffer size changed in win8; have a run-time 151 * variable to track the size we should use. 152 */ 153 static int sense_buffer_size; 154 155 /* 156 * The size of the vmscsi_request has changed in win8. The 157 * additional size is because of new elements added to the 158 * structure. These elements are valid only when we are talking 159 * to a win8 host. 160 * Track the correction to size we need to apply. 161 */ 162 163 static int vmscsi_size_delta; 164 static int vmstor_current_major; 165 static int vmstor_current_minor; 166 167 struct vmscsi_win8_extension { 168 /* 169 * The following were added in Windows 8 170 */ 171 u16 reserve; 172 u8 queue_tag; 173 u8 queue_action; 174 u32 srb_flags; 175 u32 time_out_value; 176 u32 queue_sort_ey; 177 } __packed; 178 179 struct vmscsi_request { 180 u16 length; 181 u8 srb_status; 182 u8 scsi_status; 183 184 u8 port_number; 185 u8 path_id; 186 u8 target_id; 187 u8 lun; 188 189 u8 cdb_length; 190 u8 sense_info_length; 191 u8 data_in; 192 u8 reserved; 193 194 u32 data_transfer_length; 195 196 union { 197 u8 cdb[STORVSC_MAX_CMD_LEN]; 198 u8 sense_data[STORVSC_SENSE_BUFFER_SIZE]; 199 u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING]; 200 }; 201 /* 202 * The following was added in win8. 203 */ 204 struct vmscsi_win8_extension win8_extension; 205 206 } __attribute((packed)); 207 208 209 /* 210 * This structure is sent during the intialization phase to get the different 211 * properties of the channel. 212 */ 213 214 #define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1 215 216 struct vmstorage_channel_properties { 217 u32 reserved; 218 u16 max_channel_cnt; 219 u16 reserved1; 220 221 u32 flags; 222 u32 max_transfer_bytes; 223 224 u64 reserved2; 225 } __packed; 226 227 /* This structure is sent during the storage protocol negotiations. */ 228 struct vmstorage_protocol_version { 229 /* Major (MSW) and minor (LSW) version numbers. */ 230 u16 major_minor; 231 232 /* 233 * Revision number is auto-incremented whenever this file is changed 234 * (See FILL_VMSTOR_REVISION macro above). Mismatch does not 235 * definitely indicate incompatibility--but it does indicate mismatched 236 * builds. 237 * This is only used on the windows side. Just set it to 0. 238 */ 239 u16 revision; 240 } __packed; 241 242 /* Channel Property Flags */ 243 #define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1 244 #define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2 245 246 struct vstor_packet { 247 /* Requested operation type */ 248 enum vstor_packet_operation operation; 249 250 /* Flags - see below for values */ 251 u32 flags; 252 253 /* Status of the request returned from the server side. */ 254 u32 status; 255 256 /* Data payload area */ 257 union { 258 /* 259 * Structure used to forward SCSI commands from the 260 * client to the server. 261 */ 262 struct vmscsi_request vm_srb; 263 264 /* Structure used to query channel properties. */ 265 struct vmstorage_channel_properties storage_channel_properties; 266 267 /* Used during version negotiations. */ 268 struct vmstorage_protocol_version version; 269 270 /* Fibre channel address packet */ 271 struct hv_fc_wwn_packet wwn_packet; 272 273 /* Number of sub-channels to create */ 274 u16 sub_channel_count; 275 276 /* This will be the maximum of the union members */ 277 u8 buffer[0x34]; 278 }; 279 } __packed; 280 281 /* 282 * Packet Flags: 283 * 284 * This flag indicates that the server should send back a completion for this 285 * packet. 286 */ 287 288 #define REQUEST_COMPLETION_FLAG 0x1 289 290 /* Matches Windows-end */ 291 enum storvsc_request_type { 292 WRITE_TYPE = 0, 293 READ_TYPE, 294 UNKNOWN_TYPE, 295 }; 296 297 /* 298 * SRB status codes and masks; a subset of the codes used here. 299 */ 300 301 #define SRB_STATUS_AUTOSENSE_VALID 0x80 302 #define SRB_STATUS_INVALID_LUN 0x20 303 #define SRB_STATUS_SUCCESS 0x01 304 #define SRB_STATUS_ABORTED 0x02 305 #define SRB_STATUS_ERROR 0x04 306 307 /* 308 * This is the end of Protocol specific defines. 309 */ 310 311 static int storvsc_ringbuffer_size = (20 * PAGE_SIZE); 312 313 module_param(storvsc_ringbuffer_size, int, S_IRUGO); 314 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); 315 316 /* 317 * Timeout in seconds for all devices managed by this driver. 318 */ 319 static int storvsc_timeout = 180; 320 321 static int msft_blist_flags = BLIST_TRY_VPD_PAGES; 322 323 #define STORVSC_MAX_IO_REQUESTS 200 324 325 static void storvsc_on_channel_callback(void *context); 326 327 #define STORVSC_MAX_LUNS_PER_TARGET 255 328 #define STORVSC_MAX_TARGETS 2 329 #define STORVSC_MAX_CHANNELS 8 330 331 #define STORVSC_FC_MAX_LUNS_PER_TARGET 255 332 #define STORVSC_FC_MAX_TARGETS 128 333 #define STORVSC_FC_MAX_CHANNELS 8 334 335 #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 336 #define STORVSC_IDE_MAX_TARGETS 1 337 #define STORVSC_IDE_MAX_CHANNELS 1 338 339 struct storvsc_cmd_request { 340 struct scsi_cmnd *cmd; 341 342 unsigned int bounce_sgl_count; 343 struct scatterlist *bounce_sgl; 344 345 struct hv_device *device; 346 347 /* Synchronize the request/response if needed */ 348 struct completion wait_event; 349 350 struct hv_multipage_buffer data_buffer; 351 struct vstor_packet vstor_packet; 352 }; 353 354 355 /* A storvsc device is a device object that contains a vmbus channel */ 356 struct storvsc_device { 357 struct hv_device *device; 358 359 bool destroy; 360 bool drain_notify; 361 bool open_sub_channel; 362 atomic_t num_outstanding_req; 363 struct Scsi_Host *host; 364 365 wait_queue_head_t waiting_to_drain; 366 367 /* 368 * Each unique Port/Path/Target represents 1 channel ie scsi 369 * controller. In reality, the pathid, targetid is always 0 370 * and the port is set by us 371 */ 372 unsigned int port_number; 373 unsigned char path_id; 374 unsigned char target_id; 375 376 /* Used for vsc/vsp channel reset process */ 377 struct storvsc_cmd_request init_request; 378 struct storvsc_cmd_request reset_request; 379 }; 380 381 struct hv_host_device { 382 struct hv_device *dev; 383 unsigned int port; 384 unsigned char path; 385 unsigned char target; 386 }; 387 388 struct storvsc_scan_work { 389 struct work_struct work; 390 struct Scsi_Host *host; 391 uint lun; 392 }; 393 394 static void storvsc_device_scan(struct work_struct *work) 395 { 396 struct storvsc_scan_work *wrk; 397 uint lun; 398 struct scsi_device *sdev; 399 400 wrk = container_of(work, struct storvsc_scan_work, work); 401 lun = wrk->lun; 402 403 sdev = scsi_device_lookup(wrk->host, 0, 0, lun); 404 if (!sdev) 405 goto done; 406 scsi_rescan_device(&sdev->sdev_gendev); 407 scsi_device_put(sdev); 408 409 done: 410 kfree(wrk); 411 } 412 413 static void storvsc_bus_scan(struct work_struct *work) 414 { 415 struct storvsc_scan_work *wrk; 416 int id, order_id; 417 418 wrk = container_of(work, struct storvsc_scan_work, work); 419 for (id = 0; id < wrk->host->max_id; ++id) { 420 if (wrk->host->reverse_ordering) 421 order_id = wrk->host->max_id - id - 1; 422 else 423 order_id = id; 424 425 scsi_scan_target(&wrk->host->shost_gendev, 0, 426 order_id, SCAN_WILD_CARD, 1); 427 } 428 kfree(wrk); 429 } 430 431 static void storvsc_remove_lun(struct work_struct *work) 432 { 433 struct storvsc_scan_work *wrk; 434 struct scsi_device *sdev; 435 436 wrk = container_of(work, struct storvsc_scan_work, work); 437 if (!scsi_host_get(wrk->host)) 438 goto done; 439 440 sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun); 441 442 if (sdev) { 443 scsi_remove_device(sdev); 444 scsi_device_put(sdev); 445 } 446 scsi_host_put(wrk->host); 447 448 done: 449 kfree(wrk); 450 } 451 452 /* 453 * Major/minor macros. Minor version is in LSB, meaning that earlier flat 454 * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). 455 */ 456 457 static inline u16 storvsc_get_version(u8 major, u8 minor) 458 { 459 u16 version; 460 461 version = ((major << 8) | minor); 462 return version; 463 } 464 465 /* 466 * We can get incoming messages from the host that are not in response to 467 * messages that we have sent out. An example of this would be messages 468 * received by the guest to notify dynamic addition/removal of LUNs. To 469 * deal with potential race conditions where the driver may be in the 470 * midst of being unloaded when we might receive an unsolicited message 471 * from the host, we have implemented a mechanism to gurantee sequential 472 * consistency: 473 * 474 * 1) Once the device is marked as being destroyed, we will fail all 475 * outgoing messages. 476 * 2) We permit incoming messages when the device is being destroyed, 477 * only to properly account for messages already sent out. 478 */ 479 480 static inline struct storvsc_device *get_out_stor_device( 481 struct hv_device *device) 482 { 483 struct storvsc_device *stor_device; 484 485 stor_device = hv_get_drvdata(device); 486 487 if (stor_device && stor_device->destroy) 488 stor_device = NULL; 489 490 return stor_device; 491 } 492 493 494 static inline void storvsc_wait_to_drain(struct storvsc_device *dev) 495 { 496 dev->drain_notify = true; 497 wait_event(dev->waiting_to_drain, 498 atomic_read(&dev->num_outstanding_req) == 0); 499 dev->drain_notify = false; 500 } 501 502 static inline struct storvsc_device *get_in_stor_device( 503 struct hv_device *device) 504 { 505 struct storvsc_device *stor_device; 506 507 stor_device = hv_get_drvdata(device); 508 509 if (!stor_device) 510 goto get_in_err; 511 512 /* 513 * If the device is being destroyed; allow incoming 514 * traffic only to cleanup outstanding requests. 515 */ 516 517 if (stor_device->destroy && 518 (atomic_read(&stor_device->num_outstanding_req) == 0)) 519 stor_device = NULL; 520 521 get_in_err: 522 return stor_device; 523 524 } 525 526 static void destroy_bounce_buffer(struct scatterlist *sgl, 527 unsigned int sg_count) 528 { 529 int i; 530 struct page *page_buf; 531 532 for (i = 0; i < sg_count; i++) { 533 page_buf = sg_page((&sgl[i])); 534 if (page_buf != NULL) 535 __free_page(page_buf); 536 } 537 538 kfree(sgl); 539 } 540 541 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count) 542 { 543 int i; 544 545 /* No need to check */ 546 if (sg_count < 2) 547 return -1; 548 549 /* We have at least 2 sg entries */ 550 for (i = 0; i < sg_count; i++) { 551 if (i == 0) { 552 /* make sure 1st one does not have hole */ 553 if (sgl[i].offset + sgl[i].length != PAGE_SIZE) 554 return i; 555 } else if (i == sg_count - 1) { 556 /* make sure last one does not have hole */ 557 if (sgl[i].offset != 0) 558 return i; 559 } else { 560 /* make sure no hole in the middle */ 561 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0) 562 return i; 563 } 564 } 565 return -1; 566 } 567 568 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, 569 unsigned int sg_count, 570 unsigned int len, 571 int write) 572 { 573 int i; 574 int num_pages; 575 struct scatterlist *bounce_sgl; 576 struct page *page_buf; 577 unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE); 578 579 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT; 580 581 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC); 582 if (!bounce_sgl) 583 return NULL; 584 585 sg_init_table(bounce_sgl, num_pages); 586 for (i = 0; i < num_pages; i++) { 587 page_buf = alloc_page(GFP_ATOMIC); 588 if (!page_buf) 589 goto cleanup; 590 sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0); 591 } 592 593 return bounce_sgl; 594 595 cleanup: 596 destroy_bounce_buffer(bounce_sgl, num_pages); 597 return NULL; 598 } 599 600 /* Disgusting wrapper functions */ 601 static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx) 602 { 603 void *addr = kmap_atomic(sg_page(sgl + idx)); 604 return (unsigned long)addr; 605 } 606 607 static inline void sg_kunmap_atomic(unsigned long addr) 608 { 609 kunmap_atomic((void *)addr); 610 } 611 612 613 /* Assume the original sgl has enough room */ 614 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, 615 struct scatterlist *bounce_sgl, 616 unsigned int orig_sgl_count, 617 unsigned int bounce_sgl_count) 618 { 619 int i; 620 int j = 0; 621 unsigned long src, dest; 622 unsigned int srclen, destlen, copylen; 623 unsigned int total_copied = 0; 624 unsigned long bounce_addr = 0; 625 unsigned long dest_addr = 0; 626 unsigned long flags; 627 628 local_irq_save(flags); 629 630 for (i = 0; i < orig_sgl_count; i++) { 631 dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset; 632 dest = dest_addr; 633 destlen = orig_sgl[i].length; 634 635 if (bounce_addr == 0) 636 bounce_addr = sg_kmap_atomic(bounce_sgl,j); 637 638 while (destlen) { 639 src = bounce_addr + bounce_sgl[j].offset; 640 srclen = bounce_sgl[j].length - bounce_sgl[j].offset; 641 642 copylen = min(srclen, destlen); 643 memcpy((void *)dest, (void *)src, copylen); 644 645 total_copied += copylen; 646 bounce_sgl[j].offset += copylen; 647 destlen -= copylen; 648 dest += copylen; 649 650 if (bounce_sgl[j].offset == bounce_sgl[j].length) { 651 /* full */ 652 sg_kunmap_atomic(bounce_addr); 653 j++; 654 655 /* 656 * It is possible that the number of elements 657 * in the bounce buffer may not be equal to 658 * the number of elements in the original 659 * scatter list. Handle this correctly. 660 */ 661 662 if (j == bounce_sgl_count) { 663 /* 664 * We are done; cleanup and return. 665 */ 666 sg_kunmap_atomic(dest_addr - orig_sgl[i].offset); 667 local_irq_restore(flags); 668 return total_copied; 669 } 670 671 /* if we need to use another bounce buffer */ 672 if (destlen || i != orig_sgl_count - 1) 673 bounce_addr = sg_kmap_atomic(bounce_sgl,j); 674 } else if (destlen == 0 && i == orig_sgl_count - 1) { 675 /* unmap the last bounce that is < PAGE_SIZE */ 676 sg_kunmap_atomic(bounce_addr); 677 } 678 } 679 680 sg_kunmap_atomic(dest_addr - orig_sgl[i].offset); 681 } 682 683 local_irq_restore(flags); 684 685 return total_copied; 686 } 687 688 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */ 689 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, 690 struct scatterlist *bounce_sgl, 691 unsigned int orig_sgl_count) 692 { 693 int i; 694 int j = 0; 695 unsigned long src, dest; 696 unsigned int srclen, destlen, copylen; 697 unsigned int total_copied = 0; 698 unsigned long bounce_addr = 0; 699 unsigned long src_addr = 0; 700 unsigned long flags; 701 702 local_irq_save(flags); 703 704 for (i = 0; i < orig_sgl_count; i++) { 705 src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset; 706 src = src_addr; 707 srclen = orig_sgl[i].length; 708 709 if (bounce_addr == 0) 710 bounce_addr = sg_kmap_atomic(bounce_sgl,j); 711 712 while (srclen) { 713 /* assume bounce offset always == 0 */ 714 dest = bounce_addr + bounce_sgl[j].length; 715 destlen = PAGE_SIZE - bounce_sgl[j].length; 716 717 copylen = min(srclen, destlen); 718 memcpy((void *)dest, (void *)src, copylen); 719 720 total_copied += copylen; 721 bounce_sgl[j].length += copylen; 722 srclen -= copylen; 723 src += copylen; 724 725 if (bounce_sgl[j].length == PAGE_SIZE) { 726 /* full..move to next entry */ 727 sg_kunmap_atomic(bounce_addr); 728 j++; 729 730 /* if we need to use another bounce buffer */ 731 if (srclen || i != orig_sgl_count - 1) 732 bounce_addr = sg_kmap_atomic(bounce_sgl,j); 733 734 } else if (srclen == 0 && i == orig_sgl_count - 1) { 735 /* unmap the last bounce that is < PAGE_SIZE */ 736 sg_kunmap_atomic(bounce_addr); 737 } 738 } 739 740 sg_kunmap_atomic(src_addr - orig_sgl[i].offset); 741 } 742 743 local_irq_restore(flags); 744 745 return total_copied; 746 } 747 748 static void handle_sc_creation(struct vmbus_channel *new_sc) 749 { 750 struct hv_device *device = new_sc->primary_channel->device_obj; 751 struct storvsc_device *stor_device; 752 struct vmstorage_channel_properties props; 753 754 stor_device = get_out_stor_device(device); 755 if (!stor_device) 756 return; 757 758 if (stor_device->open_sub_channel == false) 759 return; 760 761 memset(&props, 0, sizeof(struct vmstorage_channel_properties)); 762 763 vmbus_open(new_sc, 764 storvsc_ringbuffer_size, 765 storvsc_ringbuffer_size, 766 (void *)&props, 767 sizeof(struct vmstorage_channel_properties), 768 storvsc_on_channel_callback, new_sc); 769 } 770 771 static void handle_multichannel_storage(struct hv_device *device, int max_chns) 772 { 773 struct storvsc_device *stor_device; 774 int num_cpus = num_online_cpus(); 775 int num_sc; 776 struct storvsc_cmd_request *request; 777 struct vstor_packet *vstor_packet; 778 int ret, t; 779 780 num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); 781 stor_device = get_out_stor_device(device); 782 if (!stor_device) 783 return; 784 785 request = &stor_device->init_request; 786 vstor_packet = &request->vstor_packet; 787 788 stor_device->open_sub_channel = true; 789 /* 790 * Establish a handler for dealing with subchannels. 791 */ 792 vmbus_set_sc_create_callback(device->channel, handle_sc_creation); 793 794 /* 795 * Check to see if sub-channels have already been created. This 796 * can happen when this driver is re-loaded after unloading. 797 */ 798 799 if (vmbus_are_subchannels_present(device->channel)) 800 return; 801 802 stor_device->open_sub_channel = false; 803 /* 804 * Request the host to create sub-channels. 805 */ 806 memset(request, 0, sizeof(struct storvsc_cmd_request)); 807 init_completion(&request->wait_event); 808 vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS; 809 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 810 vstor_packet->sub_channel_count = num_sc; 811 812 ret = vmbus_sendpacket(device->channel, vstor_packet, 813 (sizeof(struct vstor_packet) - 814 vmscsi_size_delta), 815 (unsigned long)request, 816 VM_PKT_DATA_INBAND, 817 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 818 819 if (ret != 0) 820 return; 821 822 t = wait_for_completion_timeout(&request->wait_event, 10*HZ); 823 if (t == 0) 824 return; 825 826 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 827 vstor_packet->status != 0) 828 return; 829 830 /* 831 * Now that we created the sub-channels, invoke the check; this 832 * may trigger the callback. 833 */ 834 stor_device->open_sub_channel = true; 835 vmbus_are_subchannels_present(device->channel); 836 } 837 838 static int storvsc_channel_init(struct hv_device *device) 839 { 840 struct storvsc_device *stor_device; 841 struct storvsc_cmd_request *request; 842 struct vstor_packet *vstor_packet; 843 int ret, t; 844 int max_chns; 845 bool process_sub_channels = false; 846 847 stor_device = get_out_stor_device(device); 848 if (!stor_device) 849 return -ENODEV; 850 851 request = &stor_device->init_request; 852 vstor_packet = &request->vstor_packet; 853 854 /* 855 * Now, initiate the vsc/vsp initialization protocol on the open 856 * channel 857 */ 858 memset(request, 0, sizeof(struct storvsc_cmd_request)); 859 init_completion(&request->wait_event); 860 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION; 861 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 862 863 ret = vmbus_sendpacket(device->channel, vstor_packet, 864 (sizeof(struct vstor_packet) - 865 vmscsi_size_delta), 866 (unsigned long)request, 867 VM_PKT_DATA_INBAND, 868 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 869 if (ret != 0) 870 goto cleanup; 871 872 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 873 if (t == 0) { 874 ret = -ETIMEDOUT; 875 goto cleanup; 876 } 877 878 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 879 vstor_packet->status != 0) 880 goto cleanup; 881 882 883 /* reuse the packet for version range supported */ 884 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 885 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION; 886 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 887 888 vstor_packet->version.major_minor = 889 storvsc_get_version(vmstor_current_major, vmstor_current_minor); 890 891 /* 892 * The revision number is only used in Windows; set it to 0. 893 */ 894 vstor_packet->version.revision = 0; 895 896 ret = vmbus_sendpacket(device->channel, vstor_packet, 897 (sizeof(struct vstor_packet) - 898 vmscsi_size_delta), 899 (unsigned long)request, 900 VM_PKT_DATA_INBAND, 901 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 902 if (ret != 0) 903 goto cleanup; 904 905 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 906 if (t == 0) { 907 ret = -ETIMEDOUT; 908 goto cleanup; 909 } 910 911 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 912 vstor_packet->status != 0) 913 goto cleanup; 914 915 916 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 917 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES; 918 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 919 920 ret = vmbus_sendpacket(device->channel, vstor_packet, 921 (sizeof(struct vstor_packet) - 922 vmscsi_size_delta), 923 (unsigned long)request, 924 VM_PKT_DATA_INBAND, 925 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 926 927 if (ret != 0) 928 goto cleanup; 929 930 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 931 if (t == 0) { 932 ret = -ETIMEDOUT; 933 goto cleanup; 934 } 935 936 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 937 vstor_packet->status != 0) 938 goto cleanup; 939 940 /* 941 * Check to see if multi-channel support is there. 942 * Hosts that implement protocol version of 5.1 and above 943 * support multi-channel. 944 */ 945 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt; 946 if ((vmbus_proto_version != VERSION_WIN7) && 947 (vmbus_proto_version != VERSION_WS2008)) { 948 if (vstor_packet->storage_channel_properties.flags & 949 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) 950 process_sub_channels = true; 951 } 952 953 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 954 vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION; 955 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 956 957 ret = vmbus_sendpacket(device->channel, vstor_packet, 958 (sizeof(struct vstor_packet) - 959 vmscsi_size_delta), 960 (unsigned long)request, 961 VM_PKT_DATA_INBAND, 962 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 963 964 if (ret != 0) 965 goto cleanup; 966 967 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 968 if (t == 0) { 969 ret = -ETIMEDOUT; 970 goto cleanup; 971 } 972 973 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 974 vstor_packet->status != 0) 975 goto cleanup; 976 977 if (process_sub_channels) 978 handle_multichannel_storage(device, max_chns); 979 980 981 cleanup: 982 return ret; 983 } 984 985 static void storvsc_handle_error(struct vmscsi_request *vm_srb, 986 struct scsi_cmnd *scmnd, 987 struct Scsi_Host *host, 988 u8 asc, u8 ascq) 989 { 990 struct storvsc_scan_work *wrk; 991 void (*process_err_fn)(struct work_struct *work); 992 bool do_work = false; 993 994 switch (vm_srb->srb_status) { 995 case SRB_STATUS_ERROR: 996 /* 997 * If there is an error; offline the device since all 998 * error recovery strategies would have already been 999 * deployed on the host side. However, if the command 1000 * were a pass-through command deal with it appropriately. 1001 */ 1002 switch (scmnd->cmnd[0]) { 1003 case ATA_16: 1004 case ATA_12: 1005 set_host_byte(scmnd, DID_PASSTHROUGH); 1006 break; 1007 /* 1008 * On Some Windows hosts TEST_UNIT_READY command can return 1009 * SRB_STATUS_ERROR, let the upper level code deal with it 1010 * based on the sense information. 1011 */ 1012 case TEST_UNIT_READY: 1013 break; 1014 default: 1015 set_host_byte(scmnd, DID_TARGET_FAILURE); 1016 } 1017 break; 1018 case SRB_STATUS_INVALID_LUN: 1019 do_work = true; 1020 process_err_fn = storvsc_remove_lun; 1021 break; 1022 case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID): 1023 if ((asc == 0x2a) && (ascq == 0x9)) { 1024 do_work = true; 1025 process_err_fn = storvsc_device_scan; 1026 /* 1027 * Retry the I/O that trigerred this. 1028 */ 1029 set_host_byte(scmnd, DID_REQUEUE); 1030 } 1031 break; 1032 } 1033 1034 if (!do_work) 1035 return; 1036 1037 /* 1038 * We need to schedule work to process this error; schedule it. 1039 */ 1040 wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); 1041 if (!wrk) { 1042 set_host_byte(scmnd, DID_TARGET_FAILURE); 1043 return; 1044 } 1045 1046 wrk->host = host; 1047 wrk->lun = vm_srb->lun; 1048 INIT_WORK(&wrk->work, process_err_fn); 1049 schedule_work(&wrk->work); 1050 } 1051 1052 1053 static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) 1054 { 1055 struct scsi_cmnd *scmnd = cmd_request->cmd; 1056 struct hv_host_device *host_dev = shost_priv(scmnd->device->host); 1057 struct scsi_sense_hdr sense_hdr; 1058 struct vmscsi_request *vm_srb; 1059 struct Scsi_Host *host; 1060 struct storvsc_device *stor_dev; 1061 struct hv_device *dev = host_dev->dev; 1062 1063 stor_dev = get_in_stor_device(dev); 1064 host = stor_dev->host; 1065 1066 vm_srb = &cmd_request->vstor_packet.vm_srb; 1067 if (cmd_request->bounce_sgl_count) { 1068 if (vm_srb->data_in == READ_TYPE) 1069 copy_from_bounce_buffer(scsi_sglist(scmnd), 1070 cmd_request->bounce_sgl, 1071 scsi_sg_count(scmnd), 1072 cmd_request->bounce_sgl_count); 1073 destroy_bounce_buffer(cmd_request->bounce_sgl, 1074 cmd_request->bounce_sgl_count); 1075 } 1076 1077 scmnd->result = vm_srb->scsi_status; 1078 1079 if (scmnd->result) { 1080 if (scsi_normalize_sense(scmnd->sense_buffer, 1081 SCSI_SENSE_BUFFERSIZE, &sense_hdr)) 1082 scsi_print_sense_hdr(scmnd->device, "storvsc", 1083 &sense_hdr); 1084 } 1085 1086 if (vm_srb->srb_status != SRB_STATUS_SUCCESS) 1087 storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, 1088 sense_hdr.ascq); 1089 1090 scsi_set_resid(scmnd, 1091 cmd_request->data_buffer.len - 1092 vm_srb->data_transfer_length); 1093 1094 scmnd->scsi_done(scmnd); 1095 } 1096 1097 static void storvsc_on_io_completion(struct hv_device *device, 1098 struct vstor_packet *vstor_packet, 1099 struct storvsc_cmd_request *request) 1100 { 1101 struct storvsc_device *stor_device; 1102 struct vstor_packet *stor_pkt; 1103 1104 stor_device = hv_get_drvdata(device); 1105 stor_pkt = &request->vstor_packet; 1106 1107 /* 1108 * The current SCSI handling on the host side does 1109 * not correctly handle: 1110 * INQUIRY command with page code parameter set to 0x80 1111 * MODE_SENSE command with cmd[2] == 0x1c 1112 * 1113 * Setup srb and scsi status so this won't be fatal. 1114 * We do this so we can distinguish truly fatal failues 1115 * (srb status == 0x4) and off-line the device in that case. 1116 */ 1117 1118 if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || 1119 (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) { 1120 vstor_packet->vm_srb.scsi_status = 0; 1121 vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; 1122 } 1123 1124 1125 /* Copy over the status...etc */ 1126 stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status; 1127 stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status; 1128 stor_pkt->vm_srb.sense_info_length = 1129 vstor_packet->vm_srb.sense_info_length; 1130 1131 1132 if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) { 1133 /* CHECK_CONDITION */ 1134 if (vstor_packet->vm_srb.srb_status & 1135 SRB_STATUS_AUTOSENSE_VALID) { 1136 /* autosense data available */ 1137 1138 memcpy(request->cmd->sense_buffer, 1139 vstor_packet->vm_srb.sense_data, 1140 vstor_packet->vm_srb.sense_info_length); 1141 1142 } 1143 } 1144 1145 stor_pkt->vm_srb.data_transfer_length = 1146 vstor_packet->vm_srb.data_transfer_length; 1147 1148 storvsc_command_completion(request); 1149 1150 if (atomic_dec_and_test(&stor_device->num_outstanding_req) && 1151 stor_device->drain_notify) 1152 wake_up(&stor_device->waiting_to_drain); 1153 1154 1155 } 1156 1157 static void storvsc_on_receive(struct hv_device *device, 1158 struct vstor_packet *vstor_packet, 1159 struct storvsc_cmd_request *request) 1160 { 1161 struct storvsc_scan_work *work; 1162 struct storvsc_device *stor_device; 1163 1164 switch (vstor_packet->operation) { 1165 case VSTOR_OPERATION_COMPLETE_IO: 1166 storvsc_on_io_completion(device, vstor_packet, request); 1167 break; 1168 1169 case VSTOR_OPERATION_REMOVE_DEVICE: 1170 case VSTOR_OPERATION_ENUMERATE_BUS: 1171 stor_device = get_in_stor_device(device); 1172 work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); 1173 if (!work) 1174 return; 1175 1176 INIT_WORK(&work->work, storvsc_bus_scan); 1177 work->host = stor_device->host; 1178 schedule_work(&work->work); 1179 break; 1180 1181 default: 1182 break; 1183 } 1184 } 1185 1186 static void storvsc_on_channel_callback(void *context) 1187 { 1188 struct vmbus_channel *channel = (struct vmbus_channel *)context; 1189 struct hv_device *device; 1190 struct storvsc_device *stor_device; 1191 u32 bytes_recvd; 1192 u64 request_id; 1193 unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)]; 1194 struct storvsc_cmd_request *request; 1195 int ret; 1196 1197 if (channel->primary_channel != NULL) 1198 device = channel->primary_channel->device_obj; 1199 else 1200 device = channel->device_obj; 1201 1202 stor_device = get_in_stor_device(device); 1203 if (!stor_device) 1204 return; 1205 1206 do { 1207 ret = vmbus_recvpacket(channel, packet, 1208 ALIGN((sizeof(struct vstor_packet) - 1209 vmscsi_size_delta), 8), 1210 &bytes_recvd, &request_id); 1211 if (ret == 0 && bytes_recvd > 0) { 1212 1213 request = (struct storvsc_cmd_request *) 1214 (unsigned long)request_id; 1215 1216 if ((request == &stor_device->init_request) || 1217 (request == &stor_device->reset_request)) { 1218 1219 memcpy(&request->vstor_packet, packet, 1220 (sizeof(struct vstor_packet) - 1221 vmscsi_size_delta)); 1222 complete(&request->wait_event); 1223 } else { 1224 storvsc_on_receive(device, 1225 (struct vstor_packet *)packet, 1226 request); 1227 } 1228 } else { 1229 break; 1230 } 1231 } while (1); 1232 1233 return; 1234 } 1235 1236 static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size) 1237 { 1238 struct vmstorage_channel_properties props; 1239 int ret; 1240 1241 memset(&props, 0, sizeof(struct vmstorage_channel_properties)); 1242 1243 ret = vmbus_open(device->channel, 1244 ring_size, 1245 ring_size, 1246 (void *)&props, 1247 sizeof(struct vmstorage_channel_properties), 1248 storvsc_on_channel_callback, device->channel); 1249 1250 if (ret != 0) 1251 return ret; 1252 1253 ret = storvsc_channel_init(device); 1254 1255 return ret; 1256 } 1257 1258 static int storvsc_dev_remove(struct hv_device *device) 1259 { 1260 struct storvsc_device *stor_device; 1261 unsigned long flags; 1262 1263 stor_device = hv_get_drvdata(device); 1264 1265 spin_lock_irqsave(&device->channel->inbound_lock, flags); 1266 stor_device->destroy = true; 1267 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 1268 1269 /* 1270 * At this point, all outbound traffic should be disable. We 1271 * only allow inbound traffic (responses) to proceed so that 1272 * outstanding requests can be completed. 1273 */ 1274 1275 storvsc_wait_to_drain(stor_device); 1276 1277 /* 1278 * Since we have already drained, we don't need to busy wait 1279 * as was done in final_release_stor_device() 1280 * Note that we cannot set the ext pointer to NULL until 1281 * we have drained - to drain the outgoing packets, we need to 1282 * allow incoming packets. 1283 */ 1284 spin_lock_irqsave(&device->channel->inbound_lock, flags); 1285 hv_set_drvdata(device, NULL); 1286 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 1287 1288 /* Close the channel */ 1289 vmbus_close(device->channel); 1290 1291 kfree(stor_device); 1292 return 0; 1293 } 1294 1295 static int storvsc_do_io(struct hv_device *device, 1296 struct storvsc_cmd_request *request) 1297 { 1298 struct storvsc_device *stor_device; 1299 struct vstor_packet *vstor_packet; 1300 struct vmbus_channel *outgoing_channel; 1301 int ret = 0; 1302 1303 vstor_packet = &request->vstor_packet; 1304 stor_device = get_out_stor_device(device); 1305 1306 if (!stor_device) 1307 return -ENODEV; 1308 1309 1310 request->device = device; 1311 /* 1312 * Select an an appropriate channel to send the request out. 1313 */ 1314 1315 outgoing_channel = vmbus_get_outgoing_channel(device->channel); 1316 1317 1318 vstor_packet->flags |= REQUEST_COMPLETION_FLAG; 1319 1320 vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) - 1321 vmscsi_size_delta); 1322 1323 1324 vstor_packet->vm_srb.sense_info_length = sense_buffer_size; 1325 1326 1327 vstor_packet->vm_srb.data_transfer_length = 1328 request->data_buffer.len; 1329 1330 vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB; 1331 1332 if (request->data_buffer.len) { 1333 ret = vmbus_sendpacket_multipagebuffer(outgoing_channel, 1334 &request->data_buffer, 1335 vstor_packet, 1336 (sizeof(struct vstor_packet) - 1337 vmscsi_size_delta), 1338 (unsigned long)request); 1339 } else { 1340 ret = vmbus_sendpacket(device->channel, vstor_packet, 1341 (sizeof(struct vstor_packet) - 1342 vmscsi_size_delta), 1343 (unsigned long)request, 1344 VM_PKT_DATA_INBAND, 1345 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1346 } 1347 1348 if (ret != 0) 1349 return ret; 1350 1351 atomic_inc(&stor_device->num_outstanding_req); 1352 1353 return ret; 1354 } 1355 1356 static int storvsc_device_configure(struct scsi_device *sdevice) 1357 { 1358 scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS); 1359 1360 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE); 1361 1362 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY); 1363 1364 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); 1365 1366 sdevice->no_write_same = 1; 1367 1368 /* 1369 * Add blist flags to permit the reading of the VPD pages even when 1370 * the target may claim SPC-2 compliance. MSFT targets currently 1371 * claim SPC-2 compliance while they implement post SPC-2 features. 1372 * With this patch we can correctly handle WRITE_SAME_16 issues. 1373 */ 1374 sdevice->sdev_bflags |= msft_blist_flags; 1375 1376 return 0; 1377 } 1378 1379 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, 1380 sector_t capacity, int *info) 1381 { 1382 sector_t nsect = capacity; 1383 sector_t cylinders = nsect; 1384 int heads, sectors_pt; 1385 1386 /* 1387 * We are making up these values; let us keep it simple. 1388 */ 1389 heads = 0xff; 1390 sectors_pt = 0x3f; /* Sectors per track */ 1391 sector_div(cylinders, heads * sectors_pt); 1392 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect) 1393 cylinders = 0xffff; 1394 1395 info[0] = heads; 1396 info[1] = sectors_pt; 1397 info[2] = (int)cylinders; 1398 1399 return 0; 1400 } 1401 1402 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) 1403 { 1404 struct hv_host_device *host_dev = shost_priv(scmnd->device->host); 1405 struct hv_device *device = host_dev->dev; 1406 1407 struct storvsc_device *stor_device; 1408 struct storvsc_cmd_request *request; 1409 struct vstor_packet *vstor_packet; 1410 int ret, t; 1411 1412 1413 stor_device = get_out_stor_device(device); 1414 if (!stor_device) 1415 return FAILED; 1416 1417 request = &stor_device->reset_request; 1418 vstor_packet = &request->vstor_packet; 1419 1420 init_completion(&request->wait_event); 1421 1422 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS; 1423 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 1424 vstor_packet->vm_srb.path_id = stor_device->path_id; 1425 1426 ret = vmbus_sendpacket(device->channel, vstor_packet, 1427 (sizeof(struct vstor_packet) - 1428 vmscsi_size_delta), 1429 (unsigned long)&stor_device->reset_request, 1430 VM_PKT_DATA_INBAND, 1431 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1432 if (ret != 0) 1433 return FAILED; 1434 1435 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 1436 if (t == 0) 1437 return TIMEOUT_ERROR; 1438 1439 1440 /* 1441 * At this point, all outstanding requests in the adapter 1442 * should have been flushed out and return to us 1443 * There is a potential race here where the host may be in 1444 * the process of responding when we return from here. 1445 * Just wait for all in-transit packets to be accounted for 1446 * before we return from here. 1447 */ 1448 storvsc_wait_to_drain(stor_device); 1449 1450 return SUCCESS; 1451 } 1452 1453 /* 1454 * The host guarantees to respond to each command, although I/O latencies might 1455 * be unbounded on Azure. Reset the timer unconditionally to give the host a 1456 * chance to perform EH. 1457 */ 1458 static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) 1459 { 1460 return BLK_EH_RESET_TIMER; 1461 } 1462 1463 static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) 1464 { 1465 bool allowed = true; 1466 u8 scsi_op = scmnd->cmnd[0]; 1467 1468 switch (scsi_op) { 1469 /* the host does not handle WRITE_SAME, log accident usage */ 1470 case WRITE_SAME: 1471 /* 1472 * smartd sends this command and the host does not handle 1473 * this. So, don't send it. 1474 */ 1475 case SET_WINDOW: 1476 scmnd->result = ILLEGAL_REQUEST << 16; 1477 allowed = false; 1478 break; 1479 default: 1480 break; 1481 } 1482 return allowed; 1483 } 1484 1485 static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) 1486 { 1487 int ret; 1488 struct hv_host_device *host_dev = shost_priv(host); 1489 struct hv_device *dev = host_dev->dev; 1490 struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd); 1491 int i; 1492 struct scatterlist *sgl; 1493 unsigned int sg_count = 0; 1494 struct vmscsi_request *vm_srb; 1495 1496 if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) { 1497 /* 1498 * On legacy hosts filter unimplemented commands. 1499 * Future hosts are expected to correctly handle 1500 * unsupported commands. Furthermore, it is 1501 * possible that some of the currently 1502 * unsupported commands maybe supported in 1503 * future versions of the host. 1504 */ 1505 if (!storvsc_scsi_cmd_ok(scmnd)) { 1506 scmnd->scsi_done(scmnd); 1507 return 0; 1508 } 1509 } 1510 1511 /* Setup the cmd request */ 1512 cmd_request->cmd = scmnd; 1513 1514 vm_srb = &cmd_request->vstor_packet.vm_srb; 1515 vm_srb->win8_extension.time_out_value = 60; 1516 1517 vm_srb->win8_extension.srb_flags |= 1518 (SRB_FLAGS_QUEUE_ACTION_ENABLE | 1519 SRB_FLAGS_DISABLE_SYNCH_TRANSFER); 1520 1521 /* Build the SRB */ 1522 switch (scmnd->sc_data_direction) { 1523 case DMA_TO_DEVICE: 1524 vm_srb->data_in = WRITE_TYPE; 1525 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT; 1526 break; 1527 case DMA_FROM_DEVICE: 1528 vm_srb->data_in = READ_TYPE; 1529 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN; 1530 break; 1531 default: 1532 vm_srb->data_in = UNKNOWN_TYPE; 1533 vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN | 1534 SRB_FLAGS_DATA_OUT); 1535 break; 1536 } 1537 1538 1539 vm_srb->port_number = host_dev->port; 1540 vm_srb->path_id = scmnd->device->channel; 1541 vm_srb->target_id = scmnd->device->id; 1542 vm_srb->lun = scmnd->device->lun; 1543 1544 vm_srb->cdb_length = scmnd->cmd_len; 1545 1546 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); 1547 1548 cmd_request->data_buffer.len = scsi_bufflen(scmnd); 1549 if (scsi_sg_count(scmnd)) { 1550 sgl = (struct scatterlist *)scsi_sglist(scmnd); 1551 sg_count = scsi_sg_count(scmnd); 1552 1553 /* check if we need to bounce the sgl */ 1554 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { 1555 cmd_request->bounce_sgl = 1556 create_bounce_buffer(sgl, scsi_sg_count(scmnd), 1557 scsi_bufflen(scmnd), 1558 vm_srb->data_in); 1559 if (!cmd_request->bounce_sgl) 1560 return SCSI_MLQUEUE_HOST_BUSY; 1561 1562 cmd_request->bounce_sgl_count = 1563 ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >> 1564 PAGE_SHIFT; 1565 1566 if (vm_srb->data_in == WRITE_TYPE) 1567 copy_to_bounce_buffer(sgl, 1568 cmd_request->bounce_sgl, 1569 scsi_sg_count(scmnd)); 1570 1571 sgl = cmd_request->bounce_sgl; 1572 sg_count = cmd_request->bounce_sgl_count; 1573 } 1574 1575 cmd_request->data_buffer.offset = sgl[0].offset; 1576 1577 for (i = 0; i < sg_count; i++) 1578 cmd_request->data_buffer.pfn_array[i] = 1579 page_to_pfn(sg_page((&sgl[i]))); 1580 1581 } else if (scsi_sglist(scmnd)) { 1582 cmd_request->data_buffer.offset = 1583 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1); 1584 cmd_request->data_buffer.pfn_array[0] = 1585 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT; 1586 } 1587 1588 /* Invokes the vsc to start an IO */ 1589 ret = storvsc_do_io(dev, cmd_request); 1590 1591 if (ret == -EAGAIN) { 1592 /* no more space */ 1593 1594 if (cmd_request->bounce_sgl_count) 1595 destroy_bounce_buffer(cmd_request->bounce_sgl, 1596 cmd_request->bounce_sgl_count); 1597 1598 return SCSI_MLQUEUE_DEVICE_BUSY; 1599 } 1600 1601 return 0; 1602 } 1603 1604 static struct scsi_host_template scsi_driver = { 1605 .module = THIS_MODULE, 1606 .name = "storvsc_host_t", 1607 .cmd_size = sizeof(struct storvsc_cmd_request), 1608 .bios_param = storvsc_get_chs, 1609 .queuecommand = storvsc_queuecommand, 1610 .eh_host_reset_handler = storvsc_host_reset_handler, 1611 .proc_name = "storvsc_host", 1612 .eh_timed_out = storvsc_eh_timed_out, 1613 .slave_configure = storvsc_device_configure, 1614 .cmd_per_lun = 255, 1615 .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, 1616 .this_id = -1, 1617 /* no use setting to 0 since ll_blk_rw reset it to 1 */ 1618 /* currently 32 */ 1619 .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT, 1620 .use_clustering = DISABLE_CLUSTERING, 1621 /* Make sure we dont get a sg segment crosses a page boundary */ 1622 .dma_boundary = PAGE_SIZE-1, 1623 .no_write_same = 1, 1624 }; 1625 1626 enum { 1627 SCSI_GUID, 1628 IDE_GUID, 1629 SFC_GUID, 1630 }; 1631 1632 static const struct hv_vmbus_device_id id_table[] = { 1633 /* SCSI guid */ 1634 { HV_SCSI_GUID, 1635 .driver_data = SCSI_GUID 1636 }, 1637 /* IDE guid */ 1638 { HV_IDE_GUID, 1639 .driver_data = IDE_GUID 1640 }, 1641 /* Fibre Channel GUID */ 1642 { 1643 HV_SYNTHFC_GUID, 1644 .driver_data = SFC_GUID 1645 }, 1646 { }, 1647 }; 1648 1649 MODULE_DEVICE_TABLE(vmbus, id_table); 1650 1651 static int storvsc_probe(struct hv_device *device, 1652 const struct hv_vmbus_device_id *dev_id) 1653 { 1654 int ret; 1655 struct Scsi_Host *host; 1656 struct hv_host_device *host_dev; 1657 bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); 1658 int target = 0; 1659 struct storvsc_device *stor_device; 1660 1661 /* 1662 * Based on the windows host we are running on, 1663 * set state to properly communicate with the host. 1664 */ 1665 1666 switch (vmbus_proto_version) { 1667 case VERSION_WS2008: 1668 case VERSION_WIN7: 1669 sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; 1670 vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); 1671 vmstor_current_major = VMSTOR_WIN7_MAJOR; 1672 vmstor_current_minor = VMSTOR_WIN7_MINOR; 1673 break; 1674 default: 1675 sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; 1676 vmscsi_size_delta = 0; 1677 vmstor_current_major = VMSTOR_WIN8_MAJOR; 1678 vmstor_current_minor = VMSTOR_WIN8_MINOR; 1679 break; 1680 } 1681 1682 if (dev_id->driver_data == SFC_GUID) 1683 scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS * 1684 STORVSC_FC_MAX_TARGETS); 1685 host = scsi_host_alloc(&scsi_driver, 1686 sizeof(struct hv_host_device)); 1687 if (!host) 1688 return -ENOMEM; 1689 1690 host_dev = shost_priv(host); 1691 memset(host_dev, 0, sizeof(struct hv_host_device)); 1692 1693 host_dev->port = host->host_no; 1694 host_dev->dev = device; 1695 1696 1697 stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL); 1698 if (!stor_device) { 1699 ret = -ENOMEM; 1700 goto err_out0; 1701 } 1702 1703 stor_device->destroy = false; 1704 stor_device->open_sub_channel = false; 1705 init_waitqueue_head(&stor_device->waiting_to_drain); 1706 stor_device->device = device; 1707 stor_device->host = host; 1708 hv_set_drvdata(device, stor_device); 1709 1710 stor_device->port_number = host->host_no; 1711 ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size); 1712 if (ret) 1713 goto err_out1; 1714 1715 host_dev->path = stor_device->path_id; 1716 host_dev->target = stor_device->target_id; 1717 1718 switch (dev_id->driver_data) { 1719 case SFC_GUID: 1720 host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET; 1721 host->max_id = STORVSC_FC_MAX_TARGETS; 1722 host->max_channel = STORVSC_FC_MAX_CHANNELS - 1; 1723 break; 1724 1725 case SCSI_GUID: 1726 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; 1727 host->max_id = STORVSC_MAX_TARGETS; 1728 host->max_channel = STORVSC_MAX_CHANNELS - 1; 1729 break; 1730 1731 default: 1732 host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET; 1733 host->max_id = STORVSC_IDE_MAX_TARGETS; 1734 host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1; 1735 break; 1736 } 1737 /* max cmd length */ 1738 host->max_cmd_len = STORVSC_MAX_CMD_LEN; 1739 1740 /* Register the HBA and start the scsi bus scan */ 1741 ret = scsi_add_host(host, &device->device); 1742 if (ret != 0) 1743 goto err_out2; 1744 1745 if (!dev_is_ide) { 1746 scsi_scan_host(host); 1747 } else { 1748 target = (device->dev_instance.b[5] << 8 | 1749 device->dev_instance.b[4]); 1750 ret = scsi_add_device(host, 0, target, 0); 1751 if (ret) { 1752 scsi_remove_host(host); 1753 goto err_out2; 1754 } 1755 } 1756 return 0; 1757 1758 err_out2: 1759 /* 1760 * Once we have connected with the host, we would need to 1761 * to invoke storvsc_dev_remove() to rollback this state and 1762 * this call also frees up the stor_device; hence the jump around 1763 * err_out1 label. 1764 */ 1765 storvsc_dev_remove(device); 1766 goto err_out0; 1767 1768 err_out1: 1769 kfree(stor_device); 1770 1771 err_out0: 1772 scsi_host_put(host); 1773 return ret; 1774 } 1775 1776 static int storvsc_remove(struct hv_device *dev) 1777 { 1778 struct storvsc_device *stor_device = hv_get_drvdata(dev); 1779 struct Scsi_Host *host = stor_device->host; 1780 1781 scsi_remove_host(host); 1782 storvsc_dev_remove(dev); 1783 scsi_host_put(host); 1784 1785 return 0; 1786 } 1787 1788 static struct hv_driver storvsc_drv = { 1789 .name = KBUILD_MODNAME, 1790 .id_table = id_table, 1791 .probe = storvsc_probe, 1792 .remove = storvsc_remove, 1793 }; 1794 1795 static int __init storvsc_drv_init(void) 1796 { 1797 u32 max_outstanding_req_per_channel; 1798 1799 /* 1800 * Divide the ring buffer data size (which is 1 page less 1801 * than the ring buffer size since that page is reserved for 1802 * the ring buffer indices) by the max request size (which is 1803 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) 1804 */ 1805 max_outstanding_req_per_channel = 1806 ((storvsc_ringbuffer_size - PAGE_SIZE) / 1807 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + 1808 sizeof(struct vstor_packet) + sizeof(u64) - 1809 vmscsi_size_delta, 1810 sizeof(u64))); 1811 1812 if (max_outstanding_req_per_channel < 1813 STORVSC_MAX_IO_REQUESTS) 1814 return -EINVAL; 1815 1816 return vmbus_driver_register(&storvsc_drv); 1817 } 1818 1819 static void __exit storvsc_drv_exit(void) 1820 { 1821 vmbus_driver_unregister(&storvsc_drv); 1822 } 1823 1824 MODULE_LICENSE("GPL"); 1825 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver"); 1826 module_init(storvsc_drv_init); 1827 module_exit(storvsc_drv_exit); 1828