1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * Authors: 6 * Haiyang Zhang <haiyangz@microsoft.com> 7 * Hank Janssen <hjanssen@microsoft.com> 8 * K. Y. Srinivasan <kys@microsoft.com> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/wait.h> 13 #include <linux/sched.h> 14 #include <linux/completion.h> 15 #include <linux/string.h> 16 #include <linux/mm.h> 17 #include <linux/delay.h> 18 #include <linux/init.h> 19 #include <linux/slab.h> 20 #include <linux/module.h> 21 #include <linux/device.h> 22 #include <linux/hyperv.h> 23 #include <linux/blkdev.h> 24 #include <scsi/scsi.h> 25 #include <scsi/scsi_cmnd.h> 26 #include <scsi/scsi_host.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_tcq.h> 29 #include <scsi/scsi_eh.h> 30 #include <scsi/scsi_devinfo.h> 31 #include <scsi/scsi_dbg.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/scsi_transport.h> 34 35 /* 36 * All wire protocol details (storage protocol between the guest and the host) 37 * are consolidated here. 38 * 39 * Begin protocol definitions. 40 */ 41 42 /* 43 * Version history: 44 * V1 Beta: 0.1 45 * V1 RC < 2008/1/31: 1.0 46 * V1 RC > 2008/1/31: 2.0 47 * Win7: 4.2 48 * Win8: 5.1 49 * Win8.1: 6.0 50 * Win10: 6.2 51 */ 52 53 #define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \ 54 (((MINOR_) & 0xff))) 55 56 #define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0) 57 #define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2) 58 #define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1) 59 #define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0) 60 #define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2) 61 62 /* Packet structure describing virtual storage requests. */ 63 enum vstor_packet_operation { 64 VSTOR_OPERATION_COMPLETE_IO = 1, 65 VSTOR_OPERATION_REMOVE_DEVICE = 2, 66 VSTOR_OPERATION_EXECUTE_SRB = 3, 67 VSTOR_OPERATION_RESET_LUN = 4, 68 VSTOR_OPERATION_RESET_ADAPTER = 5, 69 VSTOR_OPERATION_RESET_BUS = 6, 70 VSTOR_OPERATION_BEGIN_INITIALIZATION = 7, 71 VSTOR_OPERATION_END_INITIALIZATION = 8, 72 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9, 73 VSTOR_OPERATION_QUERY_PROPERTIES = 10, 74 VSTOR_OPERATION_ENUMERATE_BUS = 11, 75 VSTOR_OPERATION_FCHBA_DATA = 12, 76 VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13, 77 VSTOR_OPERATION_MAXIMUM = 13 78 }; 79 80 /* 81 * WWN packet for Fibre Channel HBA 82 */ 83 84 struct hv_fc_wwn_packet { 85 u8 primary_active; 86 u8 reserved1[3]; 87 u8 primary_port_wwn[8]; 88 u8 primary_node_wwn[8]; 89 u8 secondary_port_wwn[8]; 90 u8 secondary_node_wwn[8]; 91 }; 92 93 94 95 /* 96 * SRB Flag Bits 97 */ 98 99 #define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002 100 #define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004 101 #define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008 102 #define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010 103 #define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020 104 #define SRB_FLAGS_DATA_IN 0x00000040 105 #define SRB_FLAGS_DATA_OUT 0x00000080 106 #define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000 107 #define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT) 108 #define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100 109 #define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200 110 #define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400 111 112 /* 113 * This flag indicates the request is part of the workflow for processing a D3. 114 */ 115 #define SRB_FLAGS_D3_PROCESSING 0x00000800 116 #define SRB_FLAGS_IS_ACTIVE 0x00010000 117 #define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000 118 #define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000 119 #define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000 120 #define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000 121 #define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000 122 #define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000 123 #define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000 124 #define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 125 #define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 126 127 #define SP_UNTAGGED ((unsigned char) ~0) 128 #define SRB_SIMPLE_TAG_REQUEST 0x20 129 130 /* 131 * Platform neutral description of a scsi request - 132 * this remains the same across the write regardless of 32/64 bit 133 * note: it's patterned off the SCSI_PASS_THROUGH structure 134 */ 135 #define STORVSC_MAX_CMD_LEN 0x10 136 137 #define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14 138 #define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12 139 140 #define STORVSC_SENSE_BUFFER_SIZE 0x14 141 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14 142 143 /* 144 * Sense buffer size changed in win8; have a run-time 145 * variable to track the size we should use. This value will 146 * likely change during protocol negotiation but it is valid 147 * to start by assuming pre-Win8. 148 */ 149 static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; 150 151 /* 152 * The storage protocol version is determined during the 153 * initial exchange with the host. It will indicate which 154 * storage functionality is available in the host. 155 */ 156 static int vmstor_proto_version; 157 158 #define STORVSC_LOGGING_NONE 0 159 #define STORVSC_LOGGING_ERROR 1 160 #define STORVSC_LOGGING_WARN 2 161 162 static int logging_level = STORVSC_LOGGING_ERROR; 163 module_param(logging_level, int, S_IRUGO|S_IWUSR); 164 MODULE_PARM_DESC(logging_level, 165 "Logging level, 0 - None, 1 - Error (default), 2 - Warning."); 166 167 static inline bool do_logging(int level) 168 { 169 return logging_level >= level; 170 } 171 172 #define storvsc_log(dev, level, fmt, ...) \ 173 do { \ 174 if (do_logging(level)) \ 175 dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \ 176 } while (0) 177 178 struct vmscsi_win8_extension { 179 /* 180 * The following were added in Windows 8 181 */ 182 u16 reserve; 183 u8 queue_tag; 184 u8 queue_action; 185 u32 srb_flags; 186 u32 time_out_value; 187 u32 queue_sort_ey; 188 } __packed; 189 190 struct vmscsi_request { 191 u16 length; 192 u8 srb_status; 193 u8 scsi_status; 194 195 u8 port_number; 196 u8 path_id; 197 u8 target_id; 198 u8 lun; 199 200 u8 cdb_length; 201 u8 sense_info_length; 202 u8 data_in; 203 u8 reserved; 204 205 u32 data_transfer_length; 206 207 union { 208 u8 cdb[STORVSC_MAX_CMD_LEN]; 209 u8 sense_data[STORVSC_SENSE_BUFFER_SIZE]; 210 u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING]; 211 }; 212 /* 213 * The following was added in win8. 214 */ 215 struct vmscsi_win8_extension win8_extension; 216 217 } __attribute((packed)); 218 219 /* 220 * The list of storage protocols in order of preference. 221 */ 222 struct vmstor_protocol { 223 int protocol_version; 224 int sense_buffer_size; 225 int vmscsi_size_delta; 226 }; 227 228 229 static const struct vmstor_protocol vmstor_protocols[] = { 230 { 231 VMSTOR_PROTO_VERSION_WIN10, 232 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, 233 0 234 }, 235 { 236 VMSTOR_PROTO_VERSION_WIN8_1, 237 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, 238 0 239 }, 240 { 241 VMSTOR_PROTO_VERSION_WIN8, 242 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, 243 0 244 }, 245 { 246 VMSTOR_PROTO_VERSION_WIN7, 247 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE, 248 sizeof(struct vmscsi_win8_extension), 249 }, 250 { 251 VMSTOR_PROTO_VERSION_WIN6, 252 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE, 253 sizeof(struct vmscsi_win8_extension), 254 } 255 }; 256 257 258 /* 259 * This structure is sent during the initialization phase to get the different 260 * properties of the channel. 261 */ 262 263 #define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1 264 265 struct vmstorage_channel_properties { 266 u32 reserved; 267 u16 max_channel_cnt; 268 u16 reserved1; 269 270 u32 flags; 271 u32 max_transfer_bytes; 272 273 u64 reserved2; 274 } __packed; 275 276 /* This structure is sent during the storage protocol negotiations. */ 277 struct vmstorage_protocol_version { 278 /* Major (MSW) and minor (LSW) version numbers. */ 279 u16 major_minor; 280 281 /* 282 * Revision number is auto-incremented whenever this file is changed 283 * (See FILL_VMSTOR_REVISION macro above). Mismatch does not 284 * definitely indicate incompatibility--but it does indicate mismatched 285 * builds. 286 * This is only used on the windows side. Just set it to 0. 287 */ 288 u16 revision; 289 } __packed; 290 291 /* Channel Property Flags */ 292 #define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1 293 #define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2 294 295 struct vstor_packet { 296 /* Requested operation type */ 297 enum vstor_packet_operation operation; 298 299 /* Flags - see below for values */ 300 u32 flags; 301 302 /* Status of the request returned from the server side. */ 303 u32 status; 304 305 /* Data payload area */ 306 union { 307 /* 308 * Structure used to forward SCSI commands from the 309 * client to the server. 310 */ 311 struct vmscsi_request vm_srb; 312 313 /* Structure used to query channel properties. */ 314 struct vmstorage_channel_properties storage_channel_properties; 315 316 /* Used during version negotiations. */ 317 struct vmstorage_protocol_version version; 318 319 /* Fibre channel address packet */ 320 struct hv_fc_wwn_packet wwn_packet; 321 322 /* Number of sub-channels to create */ 323 u16 sub_channel_count; 324 325 /* This will be the maximum of the union members */ 326 u8 buffer[0x34]; 327 }; 328 } __packed; 329 330 /* 331 * Packet Flags: 332 * 333 * This flag indicates that the server should send back a completion for this 334 * packet. 335 */ 336 337 #define REQUEST_COMPLETION_FLAG 0x1 338 339 /* Matches Windows-end */ 340 enum storvsc_request_type { 341 WRITE_TYPE = 0, 342 READ_TYPE, 343 UNKNOWN_TYPE, 344 }; 345 346 /* 347 * SRB status codes and masks; a subset of the codes used here. 348 */ 349 350 #define SRB_STATUS_AUTOSENSE_VALID 0x80 351 #define SRB_STATUS_QUEUE_FROZEN 0x40 352 #define SRB_STATUS_INVALID_LUN 0x20 353 #define SRB_STATUS_SUCCESS 0x01 354 #define SRB_STATUS_ABORTED 0x02 355 #define SRB_STATUS_ERROR 0x04 356 #define SRB_STATUS_DATA_OVERRUN 0x12 357 358 #define SRB_STATUS(status) \ 359 (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) 360 /* 361 * This is the end of Protocol specific defines. 362 */ 363 364 static int storvsc_ringbuffer_size = (128 * 1024); 365 static u32 max_outstanding_req_per_channel; 366 static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth); 367 368 static int storvsc_vcpus_per_sub_channel = 4; 369 static unsigned int storvsc_max_hw_queues; 370 371 module_param(storvsc_ringbuffer_size, int, S_IRUGO); 372 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); 373 374 module_param(storvsc_max_hw_queues, uint, 0644); 375 MODULE_PARM_DESC(storvsc_max_hw_queues, "Maximum number of hardware queues"); 376 377 module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO); 378 MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels"); 379 380 static int ring_avail_percent_lowater = 10; 381 module_param(ring_avail_percent_lowater, int, S_IRUGO); 382 MODULE_PARM_DESC(ring_avail_percent_lowater, 383 "Select a channel if available ring size > this in percent"); 384 385 /* 386 * Timeout in seconds for all devices managed by this driver. 387 */ 388 static int storvsc_timeout = 180; 389 390 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 391 static struct scsi_transport_template *fc_transport_template; 392 #endif 393 394 static struct scsi_host_template scsi_driver; 395 static void storvsc_on_channel_callback(void *context); 396 397 #define STORVSC_MAX_LUNS_PER_TARGET 255 398 #define STORVSC_MAX_TARGETS 2 399 #define STORVSC_MAX_CHANNELS 8 400 401 #define STORVSC_FC_MAX_LUNS_PER_TARGET 255 402 #define STORVSC_FC_MAX_TARGETS 128 403 #define STORVSC_FC_MAX_CHANNELS 8 404 405 #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 406 #define STORVSC_IDE_MAX_TARGETS 1 407 #define STORVSC_IDE_MAX_CHANNELS 1 408 409 /* 410 * Upper bound on the size of a storvsc packet. vmscsi_size_delta is not 411 * included in the calculation because it is set after STORVSC_MAX_PKT_SIZE 412 * is used in storvsc_connect_to_vsp 413 */ 414 #define STORVSC_MAX_PKT_SIZE (sizeof(struct vmpacket_descriptor) +\ 415 sizeof(struct vstor_packet)) 416 417 struct storvsc_cmd_request { 418 struct scsi_cmnd *cmd; 419 420 struct hv_device *device; 421 422 /* Synchronize the request/response if needed */ 423 struct completion wait_event; 424 425 struct vmbus_channel_packet_multipage_buffer mpb; 426 struct vmbus_packet_mpb_array *payload; 427 u32 payload_sz; 428 429 struct vstor_packet vstor_packet; 430 }; 431 432 433 /* A storvsc device is a device object that contains a vmbus channel */ 434 struct storvsc_device { 435 struct hv_device *device; 436 437 bool destroy; 438 bool drain_notify; 439 atomic_t num_outstanding_req; 440 struct Scsi_Host *host; 441 442 wait_queue_head_t waiting_to_drain; 443 444 /* 445 * Each unique Port/Path/Target represents 1 channel ie scsi 446 * controller. In reality, the pathid, targetid is always 0 447 * and the port is set by us 448 */ 449 unsigned int port_number; 450 unsigned char path_id; 451 unsigned char target_id; 452 453 /* 454 * The size of the vmscsi_request has changed in win8. The 455 * additional size is because of new elements added to the 456 * structure. These elements are valid only when we are talking 457 * to a win8 host. 458 * Track the correction to size we need to apply. This value 459 * will likely change during protocol negotiation but it is 460 * valid to start by assuming pre-Win8. 461 */ 462 int vmscsi_size_delta; 463 464 /* 465 * Max I/O, the device can support. 466 */ 467 u32 max_transfer_bytes; 468 /* 469 * Number of sub-channels we will open. 470 */ 471 u16 num_sc; 472 struct vmbus_channel **stor_chns; 473 /* 474 * Mask of CPUs bound to subchannels. 475 */ 476 struct cpumask alloced_cpus; 477 /* 478 * Serializes modifications of stor_chns[] from storvsc_do_io() 479 * and storvsc_change_target_cpu(). 480 */ 481 spinlock_t lock; 482 /* Used for vsc/vsp channel reset process */ 483 struct storvsc_cmd_request init_request; 484 struct storvsc_cmd_request reset_request; 485 /* 486 * Currently active port and node names for FC devices. 487 */ 488 u64 node_name; 489 u64 port_name; 490 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 491 struct fc_rport *rport; 492 #endif 493 }; 494 495 struct hv_host_device { 496 struct hv_device *dev; 497 unsigned int port; 498 unsigned char path; 499 unsigned char target; 500 struct workqueue_struct *handle_error_wq; 501 struct work_struct host_scan_work; 502 struct Scsi_Host *host; 503 }; 504 505 struct storvsc_scan_work { 506 struct work_struct work; 507 struct Scsi_Host *host; 508 u8 lun; 509 u8 tgt_id; 510 }; 511 512 static void storvsc_device_scan(struct work_struct *work) 513 { 514 struct storvsc_scan_work *wrk; 515 struct scsi_device *sdev; 516 517 wrk = container_of(work, struct storvsc_scan_work, work); 518 519 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); 520 if (!sdev) 521 goto done; 522 scsi_rescan_device(&sdev->sdev_gendev); 523 scsi_device_put(sdev); 524 525 done: 526 kfree(wrk); 527 } 528 529 static void storvsc_host_scan(struct work_struct *work) 530 { 531 struct Scsi_Host *host; 532 struct scsi_device *sdev; 533 struct hv_host_device *host_device = 534 container_of(work, struct hv_host_device, host_scan_work); 535 536 host = host_device->host; 537 /* 538 * Before scanning the host, first check to see if any of the 539 * currrently known devices have been hot removed. We issue a 540 * "unit ready" command against all currently known devices. 541 * This I/O will result in an error for devices that have been 542 * removed. As part of handling the I/O error, we remove the device. 543 * 544 * When a LUN is added or removed, the host sends us a signal to 545 * scan the host. Thus we are forced to discover the LUNs that 546 * may have been removed this way. 547 */ 548 mutex_lock(&host->scan_mutex); 549 shost_for_each_device(sdev, host) 550 scsi_test_unit_ready(sdev, 1, 1, NULL); 551 mutex_unlock(&host->scan_mutex); 552 /* 553 * Now scan the host to discover LUNs that may have been added. 554 */ 555 scsi_scan_host(host); 556 } 557 558 static void storvsc_remove_lun(struct work_struct *work) 559 { 560 struct storvsc_scan_work *wrk; 561 struct scsi_device *sdev; 562 563 wrk = container_of(work, struct storvsc_scan_work, work); 564 if (!scsi_host_get(wrk->host)) 565 goto done; 566 567 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); 568 569 if (sdev) { 570 scsi_remove_device(sdev); 571 scsi_device_put(sdev); 572 } 573 scsi_host_put(wrk->host); 574 575 done: 576 kfree(wrk); 577 } 578 579 580 /* 581 * We can get incoming messages from the host that are not in response to 582 * messages that we have sent out. An example of this would be messages 583 * received by the guest to notify dynamic addition/removal of LUNs. To 584 * deal with potential race conditions where the driver may be in the 585 * midst of being unloaded when we might receive an unsolicited message 586 * from the host, we have implemented a mechanism to gurantee sequential 587 * consistency: 588 * 589 * 1) Once the device is marked as being destroyed, we will fail all 590 * outgoing messages. 591 * 2) We permit incoming messages when the device is being destroyed, 592 * only to properly account for messages already sent out. 593 */ 594 595 static inline struct storvsc_device *get_out_stor_device( 596 struct hv_device *device) 597 { 598 struct storvsc_device *stor_device; 599 600 stor_device = hv_get_drvdata(device); 601 602 if (stor_device && stor_device->destroy) 603 stor_device = NULL; 604 605 return stor_device; 606 } 607 608 609 static inline void storvsc_wait_to_drain(struct storvsc_device *dev) 610 { 611 dev->drain_notify = true; 612 wait_event(dev->waiting_to_drain, 613 atomic_read(&dev->num_outstanding_req) == 0); 614 dev->drain_notify = false; 615 } 616 617 static inline struct storvsc_device *get_in_stor_device( 618 struct hv_device *device) 619 { 620 struct storvsc_device *stor_device; 621 622 stor_device = hv_get_drvdata(device); 623 624 if (!stor_device) 625 goto get_in_err; 626 627 /* 628 * If the device is being destroyed; allow incoming 629 * traffic only to cleanup outstanding requests. 630 */ 631 632 if (stor_device->destroy && 633 (atomic_read(&stor_device->num_outstanding_req) == 0)) 634 stor_device = NULL; 635 636 get_in_err: 637 return stor_device; 638 639 } 640 641 static void storvsc_change_target_cpu(struct vmbus_channel *channel, u32 old, 642 u32 new) 643 { 644 struct storvsc_device *stor_device; 645 struct vmbus_channel *cur_chn; 646 bool old_is_alloced = false; 647 struct hv_device *device; 648 unsigned long flags; 649 int cpu; 650 651 device = channel->primary_channel ? 652 channel->primary_channel->device_obj 653 : channel->device_obj; 654 stor_device = get_out_stor_device(device); 655 if (!stor_device) 656 return; 657 658 /* See storvsc_do_io() -> get_og_chn(). */ 659 spin_lock_irqsave(&stor_device->lock, flags); 660 661 /* 662 * Determines if the storvsc device has other channels assigned to 663 * the "old" CPU to update the alloced_cpus mask and the stor_chns 664 * array. 665 */ 666 if (device->channel != channel && device->channel->target_cpu == old) { 667 cur_chn = device->channel; 668 old_is_alloced = true; 669 goto old_is_alloced; 670 } 671 list_for_each_entry(cur_chn, &device->channel->sc_list, sc_list) { 672 if (cur_chn == channel) 673 continue; 674 if (cur_chn->target_cpu == old) { 675 old_is_alloced = true; 676 goto old_is_alloced; 677 } 678 } 679 680 old_is_alloced: 681 if (old_is_alloced) 682 WRITE_ONCE(stor_device->stor_chns[old], cur_chn); 683 else 684 cpumask_clear_cpu(old, &stor_device->alloced_cpus); 685 686 /* "Flush" the stor_chns array. */ 687 for_each_possible_cpu(cpu) { 688 if (stor_device->stor_chns[cpu] && !cpumask_test_cpu( 689 cpu, &stor_device->alloced_cpus)) 690 WRITE_ONCE(stor_device->stor_chns[cpu], NULL); 691 } 692 693 WRITE_ONCE(stor_device->stor_chns[new], channel); 694 cpumask_set_cpu(new, &stor_device->alloced_cpus); 695 696 spin_unlock_irqrestore(&stor_device->lock, flags); 697 } 698 699 static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr) 700 { 701 struct storvsc_cmd_request *request = 702 (struct storvsc_cmd_request *)(unsigned long)rqst_addr; 703 704 if (rqst_addr == VMBUS_RQST_INIT) 705 return VMBUS_RQST_INIT; 706 if (rqst_addr == VMBUS_RQST_RESET) 707 return VMBUS_RQST_RESET; 708 709 /* 710 * Cannot return an ID of 0, which is reserved for an unsolicited 711 * message from Hyper-V. 712 */ 713 return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1; 714 } 715 716 static void handle_sc_creation(struct vmbus_channel *new_sc) 717 { 718 struct hv_device *device = new_sc->primary_channel->device_obj; 719 struct device *dev = &device->device; 720 struct storvsc_device *stor_device; 721 struct vmstorage_channel_properties props; 722 int ret; 723 724 stor_device = get_out_stor_device(device); 725 if (!stor_device) 726 return; 727 728 memset(&props, 0, sizeof(struct vmstorage_channel_properties)); 729 new_sc->max_pkt_size = STORVSC_MAX_PKT_SIZE; 730 731 new_sc->next_request_id_callback = storvsc_next_request_id; 732 733 ret = vmbus_open(new_sc, 734 storvsc_ringbuffer_size, 735 storvsc_ringbuffer_size, 736 (void *)&props, 737 sizeof(struct vmstorage_channel_properties), 738 storvsc_on_channel_callback, new_sc); 739 740 /* In case vmbus_open() fails, we don't use the sub-channel. */ 741 if (ret != 0) { 742 dev_err(dev, "Failed to open sub-channel: err=%d\n", ret); 743 return; 744 } 745 746 new_sc->change_target_cpu_callback = storvsc_change_target_cpu; 747 748 /* Add the sub-channel to the array of available channels. */ 749 stor_device->stor_chns[new_sc->target_cpu] = new_sc; 750 cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus); 751 } 752 753 static void handle_multichannel_storage(struct hv_device *device, int max_chns) 754 { 755 struct device *dev = &device->device; 756 struct storvsc_device *stor_device; 757 int num_sc; 758 struct storvsc_cmd_request *request; 759 struct vstor_packet *vstor_packet; 760 int ret, t; 761 762 /* 763 * If the number of CPUs is artificially restricted, such as 764 * with maxcpus=1 on the kernel boot line, Hyper-V could offer 765 * sub-channels >= the number of CPUs. These sub-channels 766 * should not be created. The primary channel is already created 767 * and assigned to one CPU, so check against # CPUs - 1. 768 */ 769 num_sc = min((int)(num_online_cpus() - 1), max_chns); 770 if (!num_sc) 771 return; 772 773 stor_device = get_out_stor_device(device); 774 if (!stor_device) 775 return; 776 777 stor_device->num_sc = num_sc; 778 request = &stor_device->init_request; 779 vstor_packet = &request->vstor_packet; 780 781 /* 782 * Establish a handler for dealing with subchannels. 783 */ 784 vmbus_set_sc_create_callback(device->channel, handle_sc_creation); 785 786 /* 787 * Request the host to create sub-channels. 788 */ 789 memset(request, 0, sizeof(struct storvsc_cmd_request)); 790 init_completion(&request->wait_event); 791 vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS; 792 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 793 vstor_packet->sub_channel_count = num_sc; 794 795 ret = vmbus_sendpacket(device->channel, vstor_packet, 796 (sizeof(struct vstor_packet) - 797 stor_device->vmscsi_size_delta), 798 VMBUS_RQST_INIT, 799 VM_PKT_DATA_INBAND, 800 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 801 802 if (ret != 0) { 803 dev_err(dev, "Failed to create sub-channel: err=%d\n", ret); 804 return; 805 } 806 807 t = wait_for_completion_timeout(&request->wait_event, 10*HZ); 808 if (t == 0) { 809 dev_err(dev, "Failed to create sub-channel: timed out\n"); 810 return; 811 } 812 813 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 814 vstor_packet->status != 0) { 815 dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n", 816 vstor_packet->operation, vstor_packet->status); 817 return; 818 } 819 820 /* 821 * We need to do nothing here, because vmbus_process_offer() 822 * invokes channel->sc_creation_callback, which will open and use 823 * the sub-channel(s). 824 */ 825 } 826 827 static void cache_wwn(struct storvsc_device *stor_device, 828 struct vstor_packet *vstor_packet) 829 { 830 /* 831 * Cache the currently active port and node ww names. 832 */ 833 if (vstor_packet->wwn_packet.primary_active) { 834 stor_device->node_name = 835 wwn_to_u64(vstor_packet->wwn_packet.primary_node_wwn); 836 stor_device->port_name = 837 wwn_to_u64(vstor_packet->wwn_packet.primary_port_wwn); 838 } else { 839 stor_device->node_name = 840 wwn_to_u64(vstor_packet->wwn_packet.secondary_node_wwn); 841 stor_device->port_name = 842 wwn_to_u64(vstor_packet->wwn_packet.secondary_port_wwn); 843 } 844 } 845 846 847 static int storvsc_execute_vstor_op(struct hv_device *device, 848 struct storvsc_cmd_request *request, 849 bool status_check) 850 { 851 struct storvsc_device *stor_device; 852 struct vstor_packet *vstor_packet; 853 int ret, t; 854 855 stor_device = get_out_stor_device(device); 856 if (!stor_device) 857 return -ENODEV; 858 859 vstor_packet = &request->vstor_packet; 860 861 init_completion(&request->wait_event); 862 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 863 864 ret = vmbus_sendpacket(device->channel, vstor_packet, 865 (sizeof(struct vstor_packet) - 866 stor_device->vmscsi_size_delta), 867 VMBUS_RQST_INIT, 868 VM_PKT_DATA_INBAND, 869 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 870 if (ret != 0) 871 return ret; 872 873 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 874 if (t == 0) 875 return -ETIMEDOUT; 876 877 if (!status_check) 878 return ret; 879 880 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 881 vstor_packet->status != 0) 882 return -EINVAL; 883 884 return ret; 885 } 886 887 static int storvsc_channel_init(struct hv_device *device, bool is_fc) 888 { 889 struct storvsc_device *stor_device; 890 struct storvsc_cmd_request *request; 891 struct vstor_packet *vstor_packet; 892 int ret, i; 893 int max_chns; 894 bool process_sub_channels = false; 895 896 stor_device = get_out_stor_device(device); 897 if (!stor_device) 898 return -ENODEV; 899 900 request = &stor_device->init_request; 901 vstor_packet = &request->vstor_packet; 902 903 /* 904 * Now, initiate the vsc/vsp initialization protocol on the open 905 * channel 906 */ 907 memset(request, 0, sizeof(struct storvsc_cmd_request)); 908 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION; 909 ret = storvsc_execute_vstor_op(device, request, true); 910 if (ret) 911 return ret; 912 /* 913 * Query host supported protocol version. 914 */ 915 916 for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) { 917 /* reuse the packet for version range supported */ 918 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 919 vstor_packet->operation = 920 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION; 921 922 vstor_packet->version.major_minor = 923 vmstor_protocols[i].protocol_version; 924 925 /* 926 * The revision number is only used in Windows; set it to 0. 927 */ 928 vstor_packet->version.revision = 0; 929 ret = storvsc_execute_vstor_op(device, request, false); 930 if (ret != 0) 931 return ret; 932 933 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) 934 return -EINVAL; 935 936 if (vstor_packet->status == 0) { 937 vmstor_proto_version = 938 vmstor_protocols[i].protocol_version; 939 940 sense_buffer_size = 941 vmstor_protocols[i].sense_buffer_size; 942 943 stor_device->vmscsi_size_delta = 944 vmstor_protocols[i].vmscsi_size_delta; 945 946 break; 947 } 948 } 949 950 if (vstor_packet->status != 0) 951 return -EINVAL; 952 953 954 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 955 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES; 956 ret = storvsc_execute_vstor_op(device, request, true); 957 if (ret != 0) 958 return ret; 959 960 /* 961 * Check to see if multi-channel support is there. 962 * Hosts that implement protocol version of 5.1 and above 963 * support multi-channel. 964 */ 965 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt; 966 967 /* 968 * Allocate state to manage the sub-channels. 969 * We allocate an array based on the numbers of possible CPUs 970 * (Hyper-V does not support cpu online/offline). 971 * This Array will be sparseley populated with unique 972 * channels - primary + sub-channels. 973 * We will however populate all the slots to evenly distribute 974 * the load. 975 */ 976 stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *), 977 GFP_KERNEL); 978 if (stor_device->stor_chns == NULL) 979 return -ENOMEM; 980 981 device->channel->change_target_cpu_callback = storvsc_change_target_cpu; 982 983 stor_device->stor_chns[device->channel->target_cpu] = device->channel; 984 cpumask_set_cpu(device->channel->target_cpu, 985 &stor_device->alloced_cpus); 986 987 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) { 988 if (vstor_packet->storage_channel_properties.flags & 989 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) 990 process_sub_channels = true; 991 } 992 stor_device->max_transfer_bytes = 993 vstor_packet->storage_channel_properties.max_transfer_bytes; 994 995 if (!is_fc) 996 goto done; 997 998 /* 999 * For FC devices retrieve FC HBA data. 1000 */ 1001 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 1002 vstor_packet->operation = VSTOR_OPERATION_FCHBA_DATA; 1003 ret = storvsc_execute_vstor_op(device, request, true); 1004 if (ret != 0) 1005 return ret; 1006 1007 /* 1008 * Cache the currently active port and node ww names. 1009 */ 1010 cache_wwn(stor_device, vstor_packet); 1011 1012 done: 1013 1014 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 1015 vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION; 1016 ret = storvsc_execute_vstor_op(device, request, true); 1017 if (ret != 0) 1018 return ret; 1019 1020 if (process_sub_channels) 1021 handle_multichannel_storage(device, max_chns); 1022 1023 return ret; 1024 } 1025 1026 static void storvsc_handle_error(struct vmscsi_request *vm_srb, 1027 struct scsi_cmnd *scmnd, 1028 struct Scsi_Host *host, 1029 u8 asc, u8 ascq) 1030 { 1031 struct storvsc_scan_work *wrk; 1032 void (*process_err_fn)(struct work_struct *work); 1033 struct hv_host_device *host_dev = shost_priv(host); 1034 1035 /* 1036 * In some situations, Hyper-V sets multiple bits in the 1037 * srb_status, such as ABORTED and ERROR. So process them 1038 * individually, with the most specific bits first. 1039 */ 1040 1041 if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) { 1042 set_host_byte(scmnd, DID_NO_CONNECT); 1043 process_err_fn = storvsc_remove_lun; 1044 goto do_work; 1045 } 1046 1047 if (vm_srb->srb_status & SRB_STATUS_ABORTED) { 1048 if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID && 1049 /* Capacity data has changed */ 1050 (asc == 0x2a) && (ascq == 0x9)) { 1051 process_err_fn = storvsc_device_scan; 1052 /* 1053 * Retry the I/O that triggered this. 1054 */ 1055 set_host_byte(scmnd, DID_REQUEUE); 1056 goto do_work; 1057 } 1058 } 1059 1060 if (vm_srb->srb_status & SRB_STATUS_ERROR) { 1061 /* 1062 * Let upper layer deal with error when 1063 * sense message is present. 1064 */ 1065 if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) 1066 return; 1067 1068 /* 1069 * If there is an error; offline the device since all 1070 * error recovery strategies would have already been 1071 * deployed on the host side. However, if the command 1072 * were a pass-through command deal with it appropriately. 1073 */ 1074 switch (scmnd->cmnd[0]) { 1075 case ATA_16: 1076 case ATA_12: 1077 set_host_byte(scmnd, DID_PASSTHROUGH); 1078 break; 1079 /* 1080 * On some Hyper-V hosts TEST_UNIT_READY command can 1081 * return SRB_STATUS_ERROR. Let the upper level code 1082 * deal with it based on the sense information. 1083 */ 1084 case TEST_UNIT_READY: 1085 break; 1086 default: 1087 set_host_byte(scmnd, DID_ERROR); 1088 } 1089 } 1090 return; 1091 1092 do_work: 1093 /* 1094 * We need to schedule work to process this error; schedule it. 1095 */ 1096 wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); 1097 if (!wrk) { 1098 set_host_byte(scmnd, DID_TARGET_FAILURE); 1099 return; 1100 } 1101 1102 wrk->host = host; 1103 wrk->lun = vm_srb->lun; 1104 wrk->tgt_id = vm_srb->target_id; 1105 INIT_WORK(&wrk->work, process_err_fn); 1106 queue_work(host_dev->handle_error_wq, &wrk->work); 1107 } 1108 1109 1110 static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, 1111 struct storvsc_device *stor_dev) 1112 { 1113 struct scsi_cmnd *scmnd = cmd_request->cmd; 1114 struct scsi_sense_hdr sense_hdr; 1115 struct vmscsi_request *vm_srb; 1116 u32 data_transfer_length; 1117 struct Scsi_Host *host; 1118 u32 payload_sz = cmd_request->payload_sz; 1119 void *payload = cmd_request->payload; 1120 bool sense_ok; 1121 1122 host = stor_dev->host; 1123 1124 vm_srb = &cmd_request->vstor_packet.vm_srb; 1125 data_transfer_length = vm_srb->data_transfer_length; 1126 1127 scmnd->result = vm_srb->scsi_status; 1128 1129 if (scmnd->result) { 1130 sense_ok = scsi_normalize_sense(scmnd->sense_buffer, 1131 SCSI_SENSE_BUFFERSIZE, &sense_hdr); 1132 1133 if (sense_ok && do_logging(STORVSC_LOGGING_WARN)) 1134 scsi_print_sense_hdr(scmnd->device, "storvsc", 1135 &sense_hdr); 1136 } 1137 1138 if (vm_srb->srb_status != SRB_STATUS_SUCCESS) { 1139 storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, 1140 sense_hdr.ascq); 1141 /* 1142 * The Windows driver set data_transfer_length on 1143 * SRB_STATUS_DATA_OVERRUN. On other errors, this value 1144 * is untouched. In these cases we set it to 0. 1145 */ 1146 if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN) 1147 data_transfer_length = 0; 1148 } 1149 1150 /* Validate data_transfer_length (from Hyper-V) */ 1151 if (data_transfer_length > cmd_request->payload->range.len) 1152 data_transfer_length = cmd_request->payload->range.len; 1153 1154 scsi_set_resid(scmnd, 1155 cmd_request->payload->range.len - data_transfer_length); 1156 1157 scsi_done(scmnd); 1158 1159 if (payload_sz > 1160 sizeof(struct vmbus_channel_packet_multipage_buffer)) 1161 kfree(payload); 1162 } 1163 1164 static void storvsc_on_io_completion(struct storvsc_device *stor_device, 1165 struct vstor_packet *vstor_packet, 1166 struct storvsc_cmd_request *request) 1167 { 1168 struct vstor_packet *stor_pkt; 1169 struct hv_device *device = stor_device->device; 1170 1171 stor_pkt = &request->vstor_packet; 1172 1173 /* 1174 * The current SCSI handling on the host side does 1175 * not correctly handle: 1176 * INQUIRY command with page code parameter set to 0x80 1177 * MODE_SENSE command with cmd[2] == 0x1c 1178 * 1179 * Setup srb and scsi status so this won't be fatal. 1180 * We do this so we can distinguish truly fatal failues 1181 * (srb status == 0x4) and off-line the device in that case. 1182 */ 1183 1184 if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || 1185 (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) { 1186 vstor_packet->vm_srb.scsi_status = 0; 1187 vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; 1188 } 1189 1190 /* Copy over the status...etc */ 1191 stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status; 1192 stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status; 1193 1194 /* 1195 * Copy over the sense_info_length, but limit to the known max 1196 * size if Hyper-V returns a bad value. 1197 */ 1198 stor_pkt->vm_srb.sense_info_length = min_t(u8, sense_buffer_size, 1199 vstor_packet->vm_srb.sense_info_length); 1200 1201 if (vstor_packet->vm_srb.scsi_status != 0 || 1202 vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) { 1203 1204 /* 1205 * Log TEST_UNIT_READY errors only as warnings. Hyper-V can 1206 * return errors when detecting devices using TEST_UNIT_READY, 1207 * and logging these as errors produces unhelpful noise. 1208 */ 1209 int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ? 1210 STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR; 1211 1212 storvsc_log(device, loglevel, 1213 "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n", 1214 scsi_cmd_to_rq(request->cmd)->tag, 1215 stor_pkt->vm_srb.cdb[0], 1216 vstor_packet->vm_srb.scsi_status, 1217 vstor_packet->vm_srb.srb_status, 1218 vstor_packet->status); 1219 } 1220 1221 if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION && 1222 (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID)) 1223 memcpy(request->cmd->sense_buffer, 1224 vstor_packet->vm_srb.sense_data, 1225 stor_pkt->vm_srb.sense_info_length); 1226 1227 stor_pkt->vm_srb.data_transfer_length = 1228 vstor_packet->vm_srb.data_transfer_length; 1229 1230 storvsc_command_completion(request, stor_device); 1231 1232 if (atomic_dec_and_test(&stor_device->num_outstanding_req) && 1233 stor_device->drain_notify) 1234 wake_up(&stor_device->waiting_to_drain); 1235 } 1236 1237 static void storvsc_on_receive(struct storvsc_device *stor_device, 1238 struct vstor_packet *vstor_packet, 1239 struct storvsc_cmd_request *request) 1240 { 1241 struct hv_host_device *host_dev; 1242 switch (vstor_packet->operation) { 1243 case VSTOR_OPERATION_COMPLETE_IO: 1244 storvsc_on_io_completion(stor_device, vstor_packet, request); 1245 break; 1246 1247 case VSTOR_OPERATION_REMOVE_DEVICE: 1248 case VSTOR_OPERATION_ENUMERATE_BUS: 1249 host_dev = shost_priv(stor_device->host); 1250 queue_work( 1251 host_dev->handle_error_wq, &host_dev->host_scan_work); 1252 break; 1253 1254 case VSTOR_OPERATION_FCHBA_DATA: 1255 cache_wwn(stor_device, vstor_packet); 1256 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 1257 fc_host_node_name(stor_device->host) = stor_device->node_name; 1258 fc_host_port_name(stor_device->host) = stor_device->port_name; 1259 #endif 1260 break; 1261 default: 1262 break; 1263 } 1264 } 1265 1266 static void storvsc_on_channel_callback(void *context) 1267 { 1268 struct vmbus_channel *channel = (struct vmbus_channel *)context; 1269 const struct vmpacket_descriptor *desc; 1270 struct hv_device *device; 1271 struct storvsc_device *stor_device; 1272 struct Scsi_Host *shost; 1273 1274 if (channel->primary_channel != NULL) 1275 device = channel->primary_channel->device_obj; 1276 else 1277 device = channel->device_obj; 1278 1279 stor_device = get_in_stor_device(device); 1280 if (!stor_device) 1281 return; 1282 1283 shost = stor_device->host; 1284 1285 foreach_vmbus_pkt(desc, channel) { 1286 struct vstor_packet *packet = hv_pkt_data(desc); 1287 struct storvsc_cmd_request *request = NULL; 1288 u32 pktlen = hv_pkt_datalen(desc); 1289 u64 rqst_id = desc->trans_id; 1290 u32 minlen = rqst_id ? sizeof(struct vstor_packet) - 1291 stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation); 1292 1293 if (pktlen < minlen) { 1294 dev_err(&device->device, 1295 "Invalid pkt: id=%llu, len=%u, minlen=%u\n", 1296 rqst_id, pktlen, minlen); 1297 continue; 1298 } 1299 1300 if (rqst_id == VMBUS_RQST_INIT) { 1301 request = &stor_device->init_request; 1302 } else if (rqst_id == VMBUS_RQST_RESET) { 1303 request = &stor_device->reset_request; 1304 } else { 1305 /* Hyper-V can send an unsolicited message with ID of 0 */ 1306 if (rqst_id == 0) { 1307 /* 1308 * storvsc_on_receive() looks at the vstor_packet in the message 1309 * from the ring buffer. 1310 * 1311 * - If the operation in the vstor_packet is COMPLETE_IO, then 1312 * we call storvsc_on_io_completion(), and dereference the 1313 * guest memory address. Make sure we don't call 1314 * storvsc_on_io_completion() with a guest memory address 1315 * that is zero if Hyper-V were to construct and send such 1316 * a bogus packet. 1317 * 1318 * - If the operation in the vstor_packet is FCHBA_DATA, then 1319 * we call cache_wwn(), and access the data payload area of 1320 * the packet (wwn_packet); however, there is no guarantee 1321 * that the packet is big enough to contain such area. 1322 * Future-proof the code by rejecting such a bogus packet. 1323 */ 1324 if (packet->operation == VSTOR_OPERATION_COMPLETE_IO || 1325 packet->operation == VSTOR_OPERATION_FCHBA_DATA) { 1326 dev_err(&device->device, "Invalid packet with ID of 0\n"); 1327 continue; 1328 } 1329 } else { 1330 struct scsi_cmnd *scmnd; 1331 1332 /* Transaction 'rqst_id' corresponds to tag 'rqst_id - 1' */ 1333 scmnd = scsi_host_find_tag(shost, rqst_id - 1); 1334 if (scmnd == NULL) { 1335 dev_err(&device->device, "Incorrect transaction ID\n"); 1336 continue; 1337 } 1338 request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd); 1339 } 1340 1341 storvsc_on_receive(stor_device, packet, request); 1342 continue; 1343 } 1344 1345 memcpy(&request->vstor_packet, packet, 1346 (sizeof(struct vstor_packet) - stor_device->vmscsi_size_delta)); 1347 complete(&request->wait_event); 1348 } 1349 } 1350 1351 static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size, 1352 bool is_fc) 1353 { 1354 struct vmstorage_channel_properties props; 1355 int ret; 1356 1357 memset(&props, 0, sizeof(struct vmstorage_channel_properties)); 1358 1359 device->channel->max_pkt_size = STORVSC_MAX_PKT_SIZE; 1360 device->channel->next_request_id_callback = storvsc_next_request_id; 1361 1362 ret = vmbus_open(device->channel, 1363 ring_size, 1364 ring_size, 1365 (void *)&props, 1366 sizeof(struct vmstorage_channel_properties), 1367 storvsc_on_channel_callback, device->channel); 1368 1369 if (ret != 0) 1370 return ret; 1371 1372 ret = storvsc_channel_init(device, is_fc); 1373 1374 return ret; 1375 } 1376 1377 static int storvsc_dev_remove(struct hv_device *device) 1378 { 1379 struct storvsc_device *stor_device; 1380 1381 stor_device = hv_get_drvdata(device); 1382 1383 stor_device->destroy = true; 1384 1385 /* Make sure flag is set before waiting */ 1386 wmb(); 1387 1388 /* 1389 * At this point, all outbound traffic should be disable. We 1390 * only allow inbound traffic (responses) to proceed so that 1391 * outstanding requests can be completed. 1392 */ 1393 1394 storvsc_wait_to_drain(stor_device); 1395 1396 /* 1397 * Since we have already drained, we don't need to busy wait 1398 * as was done in final_release_stor_device() 1399 * Note that we cannot set the ext pointer to NULL until 1400 * we have drained - to drain the outgoing packets, we need to 1401 * allow incoming packets. 1402 */ 1403 hv_set_drvdata(device, NULL); 1404 1405 /* Close the channel */ 1406 vmbus_close(device->channel); 1407 1408 kfree(stor_device->stor_chns); 1409 kfree(stor_device); 1410 return 0; 1411 } 1412 1413 static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device, 1414 u16 q_num) 1415 { 1416 u16 slot = 0; 1417 u16 hash_qnum; 1418 const struct cpumask *node_mask; 1419 int num_channels, tgt_cpu; 1420 1421 if (stor_device->num_sc == 0) { 1422 stor_device->stor_chns[q_num] = stor_device->device->channel; 1423 return stor_device->device->channel; 1424 } 1425 1426 /* 1427 * Our channel array is sparsley populated and we 1428 * initiated I/O on a processor/hw-q that does not 1429 * currently have a designated channel. Fix this. 1430 * The strategy is simple: 1431 * I. Ensure NUMA locality 1432 * II. Distribute evenly (best effort) 1433 */ 1434 1435 node_mask = cpumask_of_node(cpu_to_node(q_num)); 1436 1437 num_channels = 0; 1438 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { 1439 if (cpumask_test_cpu(tgt_cpu, node_mask)) 1440 num_channels++; 1441 } 1442 if (num_channels == 0) { 1443 stor_device->stor_chns[q_num] = stor_device->device->channel; 1444 return stor_device->device->channel; 1445 } 1446 1447 hash_qnum = q_num; 1448 while (hash_qnum >= num_channels) 1449 hash_qnum -= num_channels; 1450 1451 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { 1452 if (!cpumask_test_cpu(tgt_cpu, node_mask)) 1453 continue; 1454 if (slot == hash_qnum) 1455 break; 1456 slot++; 1457 } 1458 1459 stor_device->stor_chns[q_num] = stor_device->stor_chns[tgt_cpu]; 1460 1461 return stor_device->stor_chns[q_num]; 1462 } 1463 1464 1465 static int storvsc_do_io(struct hv_device *device, 1466 struct storvsc_cmd_request *request, u16 q_num) 1467 { 1468 struct storvsc_device *stor_device; 1469 struct vstor_packet *vstor_packet; 1470 struct vmbus_channel *outgoing_channel, *channel; 1471 unsigned long flags; 1472 int ret = 0; 1473 const struct cpumask *node_mask; 1474 int tgt_cpu; 1475 1476 vstor_packet = &request->vstor_packet; 1477 stor_device = get_out_stor_device(device); 1478 1479 if (!stor_device) 1480 return -ENODEV; 1481 1482 1483 request->device = device; 1484 /* 1485 * Select an appropriate channel to send the request out. 1486 */ 1487 /* See storvsc_change_target_cpu(). */ 1488 outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]); 1489 if (outgoing_channel != NULL) { 1490 if (outgoing_channel->target_cpu == q_num) { 1491 /* 1492 * Ideally, we want to pick a different channel if 1493 * available on the same NUMA node. 1494 */ 1495 node_mask = cpumask_of_node(cpu_to_node(q_num)); 1496 for_each_cpu_wrap(tgt_cpu, 1497 &stor_device->alloced_cpus, q_num + 1) { 1498 if (!cpumask_test_cpu(tgt_cpu, node_mask)) 1499 continue; 1500 if (tgt_cpu == q_num) 1501 continue; 1502 channel = READ_ONCE( 1503 stor_device->stor_chns[tgt_cpu]); 1504 if (channel == NULL) 1505 continue; 1506 if (hv_get_avail_to_write_percent( 1507 &channel->outbound) 1508 > ring_avail_percent_lowater) { 1509 outgoing_channel = channel; 1510 goto found_channel; 1511 } 1512 } 1513 1514 /* 1515 * All the other channels on the same NUMA node are 1516 * busy. Try to use the channel on the current CPU 1517 */ 1518 if (hv_get_avail_to_write_percent( 1519 &outgoing_channel->outbound) 1520 > ring_avail_percent_lowater) 1521 goto found_channel; 1522 1523 /* 1524 * If we reach here, all the channels on the current 1525 * NUMA node are busy. Try to find a channel in 1526 * other NUMA nodes 1527 */ 1528 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { 1529 if (cpumask_test_cpu(tgt_cpu, node_mask)) 1530 continue; 1531 channel = READ_ONCE( 1532 stor_device->stor_chns[tgt_cpu]); 1533 if (channel == NULL) 1534 continue; 1535 if (hv_get_avail_to_write_percent( 1536 &channel->outbound) 1537 > ring_avail_percent_lowater) { 1538 outgoing_channel = channel; 1539 goto found_channel; 1540 } 1541 } 1542 } 1543 } else { 1544 spin_lock_irqsave(&stor_device->lock, flags); 1545 outgoing_channel = stor_device->stor_chns[q_num]; 1546 if (outgoing_channel != NULL) { 1547 spin_unlock_irqrestore(&stor_device->lock, flags); 1548 goto found_channel; 1549 } 1550 outgoing_channel = get_og_chn(stor_device, q_num); 1551 spin_unlock_irqrestore(&stor_device->lock, flags); 1552 } 1553 1554 found_channel: 1555 vstor_packet->flags |= REQUEST_COMPLETION_FLAG; 1556 1557 vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) - 1558 stor_device->vmscsi_size_delta); 1559 1560 1561 vstor_packet->vm_srb.sense_info_length = sense_buffer_size; 1562 1563 1564 vstor_packet->vm_srb.data_transfer_length = 1565 request->payload->range.len; 1566 1567 vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB; 1568 1569 if (request->payload->range.len) { 1570 1571 ret = vmbus_sendpacket_mpb_desc(outgoing_channel, 1572 request->payload, request->payload_sz, 1573 vstor_packet, 1574 (sizeof(struct vstor_packet) - 1575 stor_device->vmscsi_size_delta), 1576 (unsigned long)request); 1577 } else { 1578 ret = vmbus_sendpacket(outgoing_channel, vstor_packet, 1579 (sizeof(struct vstor_packet) - 1580 stor_device->vmscsi_size_delta), 1581 (unsigned long)request, 1582 VM_PKT_DATA_INBAND, 1583 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1584 } 1585 1586 if (ret != 0) 1587 return ret; 1588 1589 atomic_inc(&stor_device->num_outstanding_req); 1590 1591 return ret; 1592 } 1593 1594 static int storvsc_device_alloc(struct scsi_device *sdevice) 1595 { 1596 /* 1597 * Set blist flag to permit the reading of the VPD pages even when 1598 * the target may claim SPC-2 compliance. MSFT targets currently 1599 * claim SPC-2 compliance while they implement post SPC-2 features. 1600 * With this flag we can correctly handle WRITE_SAME_16 issues. 1601 * 1602 * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but 1603 * still supports REPORT LUN. 1604 */ 1605 sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES; 1606 1607 return 0; 1608 } 1609 1610 static int storvsc_device_configure(struct scsi_device *sdevice) 1611 { 1612 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); 1613 1614 sdevice->no_write_same = 1; 1615 1616 /* 1617 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 1618 * if the device is a MSFT virtual device. If the host is 1619 * WIN10 or newer, allow write_same. 1620 */ 1621 if (!strncmp(sdevice->vendor, "Msft", 4)) { 1622 switch (vmstor_proto_version) { 1623 case VMSTOR_PROTO_VERSION_WIN8: 1624 case VMSTOR_PROTO_VERSION_WIN8_1: 1625 sdevice->scsi_level = SCSI_SPC_3; 1626 break; 1627 } 1628 1629 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10) 1630 sdevice->no_write_same = 0; 1631 } 1632 1633 return 0; 1634 } 1635 1636 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, 1637 sector_t capacity, int *info) 1638 { 1639 sector_t nsect = capacity; 1640 sector_t cylinders = nsect; 1641 int heads, sectors_pt; 1642 1643 /* 1644 * We are making up these values; let us keep it simple. 1645 */ 1646 heads = 0xff; 1647 sectors_pt = 0x3f; /* Sectors per track */ 1648 sector_div(cylinders, heads * sectors_pt); 1649 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect) 1650 cylinders = 0xffff; 1651 1652 info[0] = heads; 1653 info[1] = sectors_pt; 1654 info[2] = (int)cylinders; 1655 1656 return 0; 1657 } 1658 1659 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) 1660 { 1661 struct hv_host_device *host_dev = shost_priv(scmnd->device->host); 1662 struct hv_device *device = host_dev->dev; 1663 1664 struct storvsc_device *stor_device; 1665 struct storvsc_cmd_request *request; 1666 struct vstor_packet *vstor_packet; 1667 int ret, t; 1668 1669 stor_device = get_out_stor_device(device); 1670 if (!stor_device) 1671 return FAILED; 1672 1673 request = &stor_device->reset_request; 1674 vstor_packet = &request->vstor_packet; 1675 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 1676 1677 init_completion(&request->wait_event); 1678 1679 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS; 1680 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 1681 vstor_packet->vm_srb.path_id = stor_device->path_id; 1682 1683 ret = vmbus_sendpacket(device->channel, vstor_packet, 1684 (sizeof(struct vstor_packet) - 1685 stor_device->vmscsi_size_delta), 1686 VMBUS_RQST_RESET, 1687 VM_PKT_DATA_INBAND, 1688 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1689 if (ret != 0) 1690 return FAILED; 1691 1692 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 1693 if (t == 0) 1694 return TIMEOUT_ERROR; 1695 1696 1697 /* 1698 * At this point, all outstanding requests in the adapter 1699 * should have been flushed out and return to us 1700 * There is a potential race here where the host may be in 1701 * the process of responding when we return from here. 1702 * Just wait for all in-transit packets to be accounted for 1703 * before we return from here. 1704 */ 1705 storvsc_wait_to_drain(stor_device); 1706 1707 return SUCCESS; 1708 } 1709 1710 /* 1711 * The host guarantees to respond to each command, although I/O latencies might 1712 * be unbounded on Azure. Reset the timer unconditionally to give the host a 1713 * chance to perform EH. 1714 */ 1715 static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) 1716 { 1717 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 1718 if (scmnd->device->host->transportt == fc_transport_template) 1719 return fc_eh_timed_out(scmnd); 1720 #endif 1721 return BLK_EH_RESET_TIMER; 1722 } 1723 1724 static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) 1725 { 1726 bool allowed = true; 1727 u8 scsi_op = scmnd->cmnd[0]; 1728 1729 switch (scsi_op) { 1730 /* the host does not handle WRITE_SAME, log accident usage */ 1731 case WRITE_SAME: 1732 /* 1733 * smartd sends this command and the host does not handle 1734 * this. So, don't send it. 1735 */ 1736 case SET_WINDOW: 1737 set_host_byte(scmnd, DID_ERROR); 1738 allowed = false; 1739 break; 1740 default: 1741 break; 1742 } 1743 return allowed; 1744 } 1745 1746 static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) 1747 { 1748 int ret; 1749 struct hv_host_device *host_dev = shost_priv(host); 1750 struct hv_device *dev = host_dev->dev; 1751 struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd); 1752 int i; 1753 struct scatterlist *sgl; 1754 unsigned int sg_count; 1755 struct vmscsi_request *vm_srb; 1756 struct vmbus_packet_mpb_array *payload; 1757 u32 payload_sz; 1758 u32 length; 1759 1760 if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) { 1761 /* 1762 * On legacy hosts filter unimplemented commands. 1763 * Future hosts are expected to correctly handle 1764 * unsupported commands. Furthermore, it is 1765 * possible that some of the currently 1766 * unsupported commands maybe supported in 1767 * future versions of the host. 1768 */ 1769 if (!storvsc_scsi_cmd_ok(scmnd)) { 1770 scsi_done(scmnd); 1771 return 0; 1772 } 1773 } 1774 1775 /* Setup the cmd request */ 1776 cmd_request->cmd = scmnd; 1777 1778 memset(&cmd_request->vstor_packet, 0, sizeof(struct vstor_packet)); 1779 vm_srb = &cmd_request->vstor_packet.vm_srb; 1780 vm_srb->win8_extension.time_out_value = 60; 1781 1782 vm_srb->win8_extension.srb_flags |= 1783 SRB_FLAGS_DISABLE_SYNCH_TRANSFER; 1784 1785 if (scmnd->device->tagged_supported) { 1786 vm_srb->win8_extension.srb_flags |= 1787 (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE); 1788 vm_srb->win8_extension.queue_tag = SP_UNTAGGED; 1789 vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST; 1790 } 1791 1792 /* Build the SRB */ 1793 switch (scmnd->sc_data_direction) { 1794 case DMA_TO_DEVICE: 1795 vm_srb->data_in = WRITE_TYPE; 1796 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT; 1797 break; 1798 case DMA_FROM_DEVICE: 1799 vm_srb->data_in = READ_TYPE; 1800 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN; 1801 break; 1802 case DMA_NONE: 1803 vm_srb->data_in = UNKNOWN_TYPE; 1804 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; 1805 break; 1806 default: 1807 /* 1808 * This is DMA_BIDIRECTIONAL or something else we are never 1809 * supposed to see here. 1810 */ 1811 WARN(1, "Unexpected data direction: %d\n", 1812 scmnd->sc_data_direction); 1813 return -EINVAL; 1814 } 1815 1816 1817 vm_srb->port_number = host_dev->port; 1818 vm_srb->path_id = scmnd->device->channel; 1819 vm_srb->target_id = scmnd->device->id; 1820 vm_srb->lun = scmnd->device->lun; 1821 1822 vm_srb->cdb_length = scmnd->cmd_len; 1823 1824 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); 1825 1826 sgl = (struct scatterlist *)scsi_sglist(scmnd); 1827 sg_count = scsi_sg_count(scmnd); 1828 1829 length = scsi_bufflen(scmnd); 1830 payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; 1831 payload_sz = sizeof(cmd_request->mpb); 1832 1833 if (sg_count) { 1834 unsigned int hvpgoff, hvpfns_to_add; 1835 unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset); 1836 unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length); 1837 u64 hvpfn; 1838 1839 if (hvpg_count > MAX_PAGE_BUFFER_COUNT) { 1840 1841 payload_sz = (hvpg_count * sizeof(u64) + 1842 sizeof(struct vmbus_packet_mpb_array)); 1843 payload = kzalloc(payload_sz, GFP_ATOMIC); 1844 if (!payload) 1845 return SCSI_MLQUEUE_DEVICE_BUSY; 1846 } 1847 1848 payload->range.len = length; 1849 payload->range.offset = offset_in_hvpg; 1850 1851 1852 for (i = 0; sgl != NULL; sgl = sg_next(sgl)) { 1853 /* 1854 * Init values for the current sgl entry. hvpgoff 1855 * and hvpfns_to_add are in units of Hyper-V size 1856 * pages. Handling the PAGE_SIZE != HV_HYP_PAGE_SIZE 1857 * case also handles values of sgl->offset that are 1858 * larger than PAGE_SIZE. Such offsets are handled 1859 * even on other than the first sgl entry, provided 1860 * they are a multiple of PAGE_SIZE. 1861 */ 1862 hvpgoff = HVPFN_DOWN(sgl->offset); 1863 hvpfn = page_to_hvpfn(sg_page(sgl)) + hvpgoff; 1864 hvpfns_to_add = HVPFN_UP(sgl->offset + sgl->length) - 1865 hvpgoff; 1866 1867 /* 1868 * Fill the next portion of the PFN array with 1869 * sequential Hyper-V PFNs for the continguous physical 1870 * memory described by the sgl entry. The end of the 1871 * last sgl should be reached at the same time that 1872 * the PFN array is filled. 1873 */ 1874 while (hvpfns_to_add--) 1875 payload->range.pfn_array[i++] = hvpfn++; 1876 } 1877 } 1878 1879 cmd_request->payload = payload; 1880 cmd_request->payload_sz = payload_sz; 1881 1882 /* Invokes the vsc to start an IO */ 1883 ret = storvsc_do_io(dev, cmd_request, get_cpu()); 1884 put_cpu(); 1885 1886 if (ret == -EAGAIN) { 1887 if (payload_sz > sizeof(cmd_request->mpb)) 1888 kfree(payload); 1889 /* no more space */ 1890 return SCSI_MLQUEUE_DEVICE_BUSY; 1891 } 1892 1893 return 0; 1894 } 1895 1896 static struct scsi_host_template scsi_driver = { 1897 .module = THIS_MODULE, 1898 .name = "storvsc_host_t", 1899 .cmd_size = sizeof(struct storvsc_cmd_request), 1900 .bios_param = storvsc_get_chs, 1901 .queuecommand = storvsc_queuecommand, 1902 .eh_host_reset_handler = storvsc_host_reset_handler, 1903 .proc_name = "storvsc_host", 1904 .eh_timed_out = storvsc_eh_timed_out, 1905 .slave_alloc = storvsc_device_alloc, 1906 .slave_configure = storvsc_device_configure, 1907 .cmd_per_lun = 2048, 1908 .this_id = -1, 1909 /* Ensure there are no gaps in presented sgls */ 1910 .virt_boundary_mask = PAGE_SIZE-1, 1911 .no_write_same = 1, 1912 .track_queue_depth = 1, 1913 .change_queue_depth = storvsc_change_queue_depth, 1914 }; 1915 1916 enum { 1917 SCSI_GUID, 1918 IDE_GUID, 1919 SFC_GUID, 1920 }; 1921 1922 static const struct hv_vmbus_device_id id_table[] = { 1923 /* SCSI guid */ 1924 { HV_SCSI_GUID, 1925 .driver_data = SCSI_GUID 1926 }, 1927 /* IDE guid */ 1928 { HV_IDE_GUID, 1929 .driver_data = IDE_GUID 1930 }, 1931 /* Fibre Channel GUID */ 1932 { 1933 HV_SYNTHFC_GUID, 1934 .driver_data = SFC_GUID 1935 }, 1936 { }, 1937 }; 1938 1939 MODULE_DEVICE_TABLE(vmbus, id_table); 1940 1941 static const struct { guid_t guid; } fc_guid = { HV_SYNTHFC_GUID }; 1942 1943 static bool hv_dev_is_fc(struct hv_device *hv_dev) 1944 { 1945 return guid_equal(&fc_guid.guid, &hv_dev->dev_type); 1946 } 1947 1948 static int storvsc_probe(struct hv_device *device, 1949 const struct hv_vmbus_device_id *dev_id) 1950 { 1951 int ret; 1952 int num_cpus = num_online_cpus(); 1953 int num_present_cpus = num_present_cpus(); 1954 struct Scsi_Host *host; 1955 struct hv_host_device *host_dev; 1956 bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); 1957 bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false); 1958 int target = 0; 1959 struct storvsc_device *stor_device; 1960 int max_luns_per_target; 1961 int max_targets; 1962 int max_channels; 1963 int max_sub_channels = 0; 1964 1965 /* 1966 * Based on the windows host we are running on, 1967 * set state to properly communicate with the host. 1968 */ 1969 1970 if (vmbus_proto_version < VERSION_WIN8) { 1971 max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET; 1972 max_targets = STORVSC_IDE_MAX_TARGETS; 1973 max_channels = STORVSC_IDE_MAX_CHANNELS; 1974 } else { 1975 max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET; 1976 max_targets = STORVSC_MAX_TARGETS; 1977 max_channels = STORVSC_MAX_CHANNELS; 1978 /* 1979 * On Windows8 and above, we support sub-channels for storage 1980 * on SCSI and FC controllers. 1981 * The number of sub-channels offerred is based on the number of 1982 * VCPUs in the guest. 1983 */ 1984 if (!dev_is_ide) 1985 max_sub_channels = 1986 (num_cpus - 1) / storvsc_vcpus_per_sub_channel; 1987 } 1988 1989 scsi_driver.can_queue = max_outstanding_req_per_channel * 1990 (max_sub_channels + 1) * 1991 (100 - ring_avail_percent_lowater) / 100; 1992 1993 host = scsi_host_alloc(&scsi_driver, 1994 sizeof(struct hv_host_device)); 1995 if (!host) 1996 return -ENOMEM; 1997 1998 host_dev = shost_priv(host); 1999 memset(host_dev, 0, sizeof(struct hv_host_device)); 2000 2001 host_dev->port = host->host_no; 2002 host_dev->dev = device; 2003 host_dev->host = host; 2004 2005 2006 stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL); 2007 if (!stor_device) { 2008 ret = -ENOMEM; 2009 goto err_out0; 2010 } 2011 2012 stor_device->destroy = false; 2013 init_waitqueue_head(&stor_device->waiting_to_drain); 2014 stor_device->device = device; 2015 stor_device->host = host; 2016 stor_device->vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); 2017 spin_lock_init(&stor_device->lock); 2018 hv_set_drvdata(device, stor_device); 2019 2020 stor_device->port_number = host->host_no; 2021 ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc); 2022 if (ret) 2023 goto err_out1; 2024 2025 host_dev->path = stor_device->path_id; 2026 host_dev->target = stor_device->target_id; 2027 2028 switch (dev_id->driver_data) { 2029 case SFC_GUID: 2030 host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET; 2031 host->max_id = STORVSC_FC_MAX_TARGETS; 2032 host->max_channel = STORVSC_FC_MAX_CHANNELS - 1; 2033 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2034 host->transportt = fc_transport_template; 2035 #endif 2036 break; 2037 2038 case SCSI_GUID: 2039 host->max_lun = max_luns_per_target; 2040 host->max_id = max_targets; 2041 host->max_channel = max_channels - 1; 2042 break; 2043 2044 default: 2045 host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET; 2046 host->max_id = STORVSC_IDE_MAX_TARGETS; 2047 host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1; 2048 break; 2049 } 2050 /* max cmd length */ 2051 host->max_cmd_len = STORVSC_MAX_CMD_LEN; 2052 2053 /* 2054 * set the table size based on the info we got 2055 * from the host. 2056 */ 2057 host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); 2058 /* 2059 * For non-IDE disks, the host supports multiple channels. 2060 * Set the number of HW queues we are supporting. 2061 */ 2062 if (!dev_is_ide) { 2063 if (storvsc_max_hw_queues > num_present_cpus) { 2064 storvsc_max_hw_queues = 0; 2065 storvsc_log(device, STORVSC_LOGGING_WARN, 2066 "Resetting invalid storvsc_max_hw_queues value to default.\n"); 2067 } 2068 if (storvsc_max_hw_queues) 2069 host->nr_hw_queues = storvsc_max_hw_queues; 2070 else 2071 host->nr_hw_queues = num_present_cpus; 2072 } 2073 2074 /* 2075 * Set the error handler work queue. 2076 */ 2077 host_dev->handle_error_wq = 2078 alloc_ordered_workqueue("storvsc_error_wq_%d", 2079 WQ_MEM_RECLAIM, 2080 host->host_no); 2081 if (!host_dev->handle_error_wq) { 2082 ret = -ENOMEM; 2083 goto err_out2; 2084 } 2085 INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan); 2086 /* Register the HBA and start the scsi bus scan */ 2087 ret = scsi_add_host(host, &device->device); 2088 if (ret != 0) 2089 goto err_out3; 2090 2091 if (!dev_is_ide) { 2092 scsi_scan_host(host); 2093 } else { 2094 target = (device->dev_instance.b[5] << 8 | 2095 device->dev_instance.b[4]); 2096 ret = scsi_add_device(host, 0, target, 0); 2097 if (ret) 2098 goto err_out4; 2099 } 2100 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2101 if (host->transportt == fc_transport_template) { 2102 struct fc_rport_identifiers ids = { 2103 .roles = FC_PORT_ROLE_FCP_DUMMY_INITIATOR, 2104 }; 2105 2106 fc_host_node_name(host) = stor_device->node_name; 2107 fc_host_port_name(host) = stor_device->port_name; 2108 stor_device->rport = fc_remote_port_add(host, 0, &ids); 2109 if (!stor_device->rport) { 2110 ret = -ENOMEM; 2111 goto err_out4; 2112 } 2113 } 2114 #endif 2115 return 0; 2116 2117 err_out4: 2118 scsi_remove_host(host); 2119 2120 err_out3: 2121 destroy_workqueue(host_dev->handle_error_wq); 2122 2123 err_out2: 2124 /* 2125 * Once we have connected with the host, we would need to 2126 * to invoke storvsc_dev_remove() to rollback this state and 2127 * this call also frees up the stor_device; hence the jump around 2128 * err_out1 label. 2129 */ 2130 storvsc_dev_remove(device); 2131 goto err_out0; 2132 2133 err_out1: 2134 kfree(stor_device->stor_chns); 2135 kfree(stor_device); 2136 2137 err_out0: 2138 scsi_host_put(host); 2139 return ret; 2140 } 2141 2142 /* Change a scsi target's queue depth */ 2143 static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth) 2144 { 2145 if (queue_depth > scsi_driver.can_queue) 2146 queue_depth = scsi_driver.can_queue; 2147 2148 return scsi_change_queue_depth(sdev, queue_depth); 2149 } 2150 2151 static int storvsc_remove(struct hv_device *dev) 2152 { 2153 struct storvsc_device *stor_device = hv_get_drvdata(dev); 2154 struct Scsi_Host *host = stor_device->host; 2155 struct hv_host_device *host_dev = shost_priv(host); 2156 2157 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2158 if (host->transportt == fc_transport_template) { 2159 fc_remote_port_delete(stor_device->rport); 2160 fc_remove_host(host); 2161 } 2162 #endif 2163 destroy_workqueue(host_dev->handle_error_wq); 2164 scsi_remove_host(host); 2165 storvsc_dev_remove(dev); 2166 scsi_host_put(host); 2167 2168 return 0; 2169 } 2170 2171 static int storvsc_suspend(struct hv_device *hv_dev) 2172 { 2173 struct storvsc_device *stor_device = hv_get_drvdata(hv_dev); 2174 struct Scsi_Host *host = stor_device->host; 2175 struct hv_host_device *host_dev = shost_priv(host); 2176 2177 storvsc_wait_to_drain(stor_device); 2178 2179 drain_workqueue(host_dev->handle_error_wq); 2180 2181 vmbus_close(hv_dev->channel); 2182 2183 kfree(stor_device->stor_chns); 2184 stor_device->stor_chns = NULL; 2185 2186 cpumask_clear(&stor_device->alloced_cpus); 2187 2188 return 0; 2189 } 2190 2191 static int storvsc_resume(struct hv_device *hv_dev) 2192 { 2193 int ret; 2194 2195 ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size, 2196 hv_dev_is_fc(hv_dev)); 2197 return ret; 2198 } 2199 2200 static struct hv_driver storvsc_drv = { 2201 .name = KBUILD_MODNAME, 2202 .id_table = id_table, 2203 .probe = storvsc_probe, 2204 .remove = storvsc_remove, 2205 .suspend = storvsc_suspend, 2206 .resume = storvsc_resume, 2207 .driver = { 2208 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2209 }, 2210 }; 2211 2212 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2213 static struct fc_function_template fc_transport_functions = { 2214 .show_host_node_name = 1, 2215 .show_host_port_name = 1, 2216 }; 2217 #endif 2218 2219 static int __init storvsc_drv_init(void) 2220 { 2221 int ret; 2222 2223 /* 2224 * Divide the ring buffer data size (which is 1 page less 2225 * than the ring buffer size since that page is reserved for 2226 * the ring buffer indices) by the max request size (which is 2227 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) 2228 * 2229 * The computation underestimates max_outstanding_req_per_channel 2230 * for Win7 and older hosts because it does not take into account 2231 * the vmscsi_size_delta correction to the max request size. 2232 */ 2233 max_outstanding_req_per_channel = 2234 ((storvsc_ringbuffer_size - PAGE_SIZE) / 2235 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + 2236 sizeof(struct vstor_packet) + sizeof(u64), 2237 sizeof(u64))); 2238 2239 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2240 fc_transport_template = fc_attach_transport(&fc_transport_functions); 2241 if (!fc_transport_template) 2242 return -ENODEV; 2243 #endif 2244 2245 ret = vmbus_driver_register(&storvsc_drv); 2246 2247 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2248 if (ret) 2249 fc_release_transport(fc_transport_template); 2250 #endif 2251 2252 return ret; 2253 } 2254 2255 static void __exit storvsc_drv_exit(void) 2256 { 2257 vmbus_driver_unregister(&storvsc_drv); 2258 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2259 fc_release_transport(fc_transport_template); 2260 #endif 2261 } 2262 2263 MODULE_LICENSE("GPL"); 2264 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver"); 2265 module_init(storvsc_drv_init); 2266 module_exit(storvsc_drv_exit); 2267