1 #ifndef _SCSI_SCSI_HOST_H 2 #define _SCSI_SCSI_HOST_H 3 4 #include <linux/device.h> 5 #include <linux/list.h> 6 #include <linux/types.h> 7 #include <linux/workqueue.h> 8 #include <linux/mutex.h> 9 #include <linux/seq_file.h> 10 #include <linux/blk-mq.h> 11 #include <scsi/scsi.h> 12 13 struct request_queue; 14 struct block_device; 15 struct completion; 16 struct module; 17 struct scsi_cmnd; 18 struct scsi_device; 19 struct scsi_host_cmd_pool; 20 struct scsi_target; 21 struct Scsi_Host; 22 struct scsi_host_cmd_pool; 23 struct scsi_transport_template; 24 struct blk_queue_tags; 25 26 27 /* 28 * The various choices mean: 29 * NONE: Self evident. Host adapter is not capable of scatter-gather. 30 * ALL: Means that the host adapter module can do scatter-gather, 31 * and that there is no limit to the size of the table to which 32 * we scatter/gather data. The value we set here is the maximum 33 * single element sglist. To use chained sglists, the adapter 34 * has to set a value beyond ALL (and correctly use the chain 35 * handling API. 36 * Anything else: Indicates the maximum number of chains that can be 37 * used in one scatter-gather request. 38 */ 39 #define SG_NONE 0 40 #define SG_ALL SCSI_MAX_SG_SEGMENTS 41 42 #define MODE_UNKNOWN 0x00 43 #define MODE_INITIATOR 0x01 44 #define MODE_TARGET 0x02 45 46 #define DISABLE_CLUSTERING 0 47 #define ENABLE_CLUSTERING 1 48 49 enum { 50 SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */ 51 SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */ 52 SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshold event */ 53 }; 54 55 struct scsi_host_template { 56 struct module *module; 57 const char *name; 58 59 /* 60 * Used to initialize old-style drivers. For new-style drivers 61 * just perform all work in your module initialization function. 62 * 63 * Status: OBSOLETE 64 */ 65 int (* detect)(struct scsi_host_template *); 66 67 /* 68 * Used as unload callback for hosts with old-style drivers. 69 * 70 * Status: OBSOLETE 71 */ 72 int (* release)(struct Scsi_Host *); 73 74 /* 75 * The info function will return whatever useful information the 76 * developer sees fit. If not provided, then the name field will 77 * be used instead. 78 * 79 * Status: OPTIONAL 80 */ 81 const char *(* info)(struct Scsi_Host *); 82 83 /* 84 * Ioctl interface 85 * 86 * Status: OPTIONAL 87 */ 88 int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg); 89 90 91 #ifdef CONFIG_COMPAT 92 /* 93 * Compat handler. Handle 32bit ABI. 94 * When unknown ioctl is passed return -ENOIOCTLCMD. 95 * 96 * Status: OPTIONAL 97 */ 98 int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg); 99 #endif 100 101 /* 102 * The queuecommand function is used to queue up a scsi 103 * command block to the LLDD. When the driver finished 104 * processing the command the done callback is invoked. 105 * 106 * If queuecommand returns 0, then the HBA has accepted the 107 * command. The done() function must be called on the command 108 * when the driver has finished with it. (you may call done on the 109 * command before queuecommand returns, but in this case you 110 * *must* return 0 from queuecommand). 111 * 112 * Queuecommand may also reject the command, in which case it may 113 * not touch the command and must not call done() for it. 114 * 115 * There are two possible rejection returns: 116 * 117 * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but 118 * allow commands to other devices serviced by this host. 119 * 120 * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this 121 * host temporarily. 122 * 123 * For compatibility, any other non-zero return is treated the 124 * same as SCSI_MLQUEUE_HOST_BUSY. 125 * 126 * NOTE: "temporarily" means either until the next command for# 127 * this device/host completes, or a period of time determined by 128 * I/O pressure in the system if there are no other outstanding 129 * commands. 130 * 131 * STATUS: REQUIRED 132 */ 133 int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); 134 135 /* 136 * This is an error handling strategy routine. You don't need to 137 * define one of these if you don't want to - there is a default 138 * routine that is present that should work in most cases. For those 139 * driver authors that have the inclination and ability to write their 140 * own strategy routine, this is where it is specified. Note - the 141 * strategy routine is *ALWAYS* run in the context of the kernel eh 142 * thread. Thus you are guaranteed to *NOT* be in an interrupt 143 * handler when you execute this, and you are also guaranteed to 144 * *NOT* have any other commands being queued while you are in the 145 * strategy routine. When you return from this function, operations 146 * return to normal. 147 * 148 * See scsi_error.c scsi_unjam_host for additional comments about 149 * what this function should and should not be attempting to do. 150 * 151 * Status: REQUIRED (at least one of them) 152 */ 153 int (* eh_abort_handler)(struct scsi_cmnd *); 154 int (* eh_device_reset_handler)(struct scsi_cmnd *); 155 int (* eh_target_reset_handler)(struct scsi_cmnd *); 156 int (* eh_bus_reset_handler)(struct scsi_cmnd *); 157 int (* eh_host_reset_handler)(struct scsi_cmnd *); 158 159 /* 160 * Before the mid layer attempts to scan for a new device where none 161 * currently exists, it will call this entry in your driver. Should 162 * your driver need to allocate any structs or perform any other init 163 * items in order to send commands to a currently unused target/lun 164 * combo, then this is where you can perform those allocations. This 165 * is specifically so that drivers won't have to perform any kind of 166 * "is this a new device" checks in their queuecommand routine, 167 * thereby making the hot path a bit quicker. 168 * 169 * Return values: 0 on success, non-0 on failure 170 * 171 * Deallocation: If we didn't find any devices at this ID, you will 172 * get an immediate call to slave_destroy(). If we find something 173 * here then you will get a call to slave_configure(), then the 174 * device will be used for however long it is kept around, then when 175 * the device is removed from the system (or * possibly at reboot 176 * time), you will then get a call to slave_destroy(). This is 177 * assuming you implement slave_configure and slave_destroy. 178 * However, if you allocate memory and hang it off the device struct, 179 * then you must implement the slave_destroy() routine at a minimum 180 * in order to avoid leaking memory 181 * each time a device is tore down. 182 * 183 * Status: OPTIONAL 184 */ 185 int (* slave_alloc)(struct scsi_device *); 186 187 /* 188 * Once the device has responded to an INQUIRY and we know the 189 * device is online, we call into the low level driver with the 190 * struct scsi_device *. If the low level device driver implements 191 * this function, it *must* perform the task of setting the queue 192 * depth on the device. All other tasks are optional and depend 193 * on what the driver supports and various implementation details. 194 * 195 * Things currently recommended to be handled at this time include: 196 * 197 * 1. Setting the device queue depth. Proper setting of this is 198 * described in the comments for scsi_adjust_queue_depth. 199 * 2. Determining if the device supports the various synchronous 200 * negotiation protocols. The device struct will already have 201 * responded to INQUIRY and the results of the standard items 202 * will have been shoved into the various device flag bits, eg. 203 * device->sdtr will be true if the device supports SDTR messages. 204 * 3. Allocating command structs that the device will need. 205 * 4. Setting the default timeout on this device (if needed). 206 * 5. Anything else the low level driver might want to do on a device 207 * specific setup basis... 208 * 6. Return 0 on success, non-0 on error. The device will be marked 209 * as offline on error so that no access will occur. If you return 210 * non-0, your slave_destroy routine will never get called for this 211 * device, so don't leave any loose memory hanging around, clean 212 * up after yourself before returning non-0 213 * 214 * Status: OPTIONAL 215 */ 216 int (* slave_configure)(struct scsi_device *); 217 218 /* 219 * Immediately prior to deallocating the device and after all activity 220 * has ceased the mid layer calls this point so that the low level 221 * driver may completely detach itself from the scsi device and vice 222 * versa. The low level driver is responsible for freeing any memory 223 * it allocated in the slave_alloc or slave_configure calls. 224 * 225 * Status: OPTIONAL 226 */ 227 void (* slave_destroy)(struct scsi_device *); 228 229 /* 230 * Before the mid layer attempts to scan for a new device attached 231 * to a target where no target currently exists, it will call this 232 * entry in your driver. Should your driver need to allocate any 233 * structs or perform any other init items in order to send commands 234 * to a currently unused target, then this is where you can perform 235 * those allocations. 236 * 237 * Return values: 0 on success, non-0 on failure 238 * 239 * Status: OPTIONAL 240 */ 241 int (* target_alloc)(struct scsi_target *); 242 243 /* 244 * Immediately prior to deallocating the target structure, and 245 * after all activity to attached scsi devices has ceased, the 246 * midlayer calls this point so that the driver may deallocate 247 * and terminate any references to the target. 248 * 249 * Status: OPTIONAL 250 */ 251 void (* target_destroy)(struct scsi_target *); 252 253 /* 254 * If a host has the ability to discover targets on its own instead 255 * of scanning the entire bus, it can fill in this function and 256 * call scsi_scan_host(). This function will be called periodically 257 * until it returns 1 with the scsi_host and the elapsed time of 258 * the scan in jiffies. 259 * 260 * Status: OPTIONAL 261 */ 262 int (* scan_finished)(struct Scsi_Host *, unsigned long); 263 264 /* 265 * If the host wants to be called before the scan starts, but 266 * after the midlayer has set up ready for the scan, it can fill 267 * in this function. 268 * 269 * Status: OPTIONAL 270 */ 271 void (* scan_start)(struct Scsi_Host *); 272 273 /* 274 * Fill in this function to allow the queue depth of this host 275 * to be changeable (on a per device basis). Returns either 276 * the current queue depth setting (may be different from what 277 * was passed in) or an error. An error should only be 278 * returned if the requested depth is legal but the driver was 279 * unable to set it. If the requested depth is illegal, the 280 * driver should set and return the closest legal queue depth. 281 * 282 * Status: OPTIONAL 283 */ 284 int (* change_queue_depth)(struct scsi_device *, int, int); 285 286 /* 287 * Fill in this function to allow the changing of tag types 288 * (this also allows the enabling/disabling of tag command 289 * queueing). An error should only be returned if something 290 * went wrong in the driver while trying to set the tag type. 291 * If the driver doesn't support the requested tag type, then 292 * it should set the closest type it does support without 293 * returning an error. Returns the actual tag type set. 294 * 295 * Status: OPTIONAL 296 */ 297 int (* change_queue_type)(struct scsi_device *, int); 298 299 /* 300 * This function determines the BIOS parameters for a given 301 * harddisk. These tend to be numbers that are made up by 302 * the host adapter. Parameters: 303 * size, device, list (heads, sectors, cylinders) 304 * 305 * Status: OPTIONAL 306 */ 307 int (* bios_param)(struct scsi_device *, struct block_device *, 308 sector_t, int []); 309 310 /* 311 * This function is called when one or more partitions on the 312 * device reach beyond the end of the device. 313 * 314 * Status: OPTIONAL 315 */ 316 void (*unlock_native_capacity)(struct scsi_device *); 317 318 /* 319 * Can be used to export driver statistics and other infos to the 320 * world outside the kernel ie. userspace and it also provides an 321 * interface to feed the driver with information. 322 * 323 * Status: OBSOLETE 324 */ 325 int (*show_info)(struct seq_file *, struct Scsi_Host *); 326 int (*write_info)(struct Scsi_Host *, char *, int); 327 328 /* 329 * This is an optional routine that allows the transport to become 330 * involved when a scsi io timer fires. The return value tells the 331 * timer routine how to finish the io timeout handling: 332 * EH_HANDLED: I fixed the error, please complete the command 333 * EH_RESET_TIMER: I need more time, reset the timer and 334 * begin counting again 335 * EH_NOT_HANDLED Begin normal error recovery 336 * 337 * Status: OPTIONAL 338 */ 339 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); 340 341 /* This is an optional routine that allows transport to initiate 342 * LLD adapter or firmware reset using sysfs attribute. 343 * 344 * Return values: 0 on success, -ve value on failure. 345 * 346 * Status: OPTIONAL 347 */ 348 349 int (*host_reset)(struct Scsi_Host *shost, int reset_type); 350 #define SCSI_ADAPTER_RESET 1 351 #define SCSI_FIRMWARE_RESET 2 352 353 354 /* 355 * Name of proc directory 356 */ 357 const char *proc_name; 358 359 /* 360 * Used to store the procfs directory if a driver implements the 361 * show_info method. 362 */ 363 struct proc_dir_entry *proc_dir; 364 365 /* 366 * This determines if we will use a non-interrupt driven 367 * or an interrupt driven scheme. It is set to the maximum number 368 * of simultaneous commands a given host adapter will accept. 369 */ 370 int can_queue; 371 372 /* 373 * In many instances, especially where disconnect / reconnect are 374 * supported, our host also has an ID on the SCSI bus. If this is 375 * the case, then it must be reserved. Please set this_id to -1 if 376 * your setup is in single initiator mode, and the host lacks an 377 * ID. 378 */ 379 int this_id; 380 381 /* 382 * This determines the degree to which the host adapter is capable 383 * of scatter-gather. 384 */ 385 unsigned short sg_tablesize; 386 unsigned short sg_prot_tablesize; 387 388 /* 389 * Set this if the host adapter has limitations beside segment count. 390 */ 391 unsigned int max_sectors; 392 393 /* 394 * DMA scatter gather segment boundary limit. A segment crossing this 395 * boundary will be split in two. 396 */ 397 unsigned long dma_boundary; 398 399 /* 400 * This specifies "machine infinity" for host templates which don't 401 * limit the transfer size. Note this limit represents an absolute 402 * maximum, and may be over the transfer limits allowed for 403 * individual devices (e.g. 256 for SCSI-1). 404 */ 405 #define SCSI_DEFAULT_MAX_SECTORS 1024 406 407 /* 408 * True if this host adapter can make good use of linked commands. 409 * This will allow more than one command to be queued to a given 410 * unit on a given host. Set this to the maximum number of command 411 * blocks to be provided for each device. Set this to 1 for one 412 * command block per lun, 2 for two, etc. Do not set this to 0. 413 * You should make sure that the host adapter will do the right thing 414 * before you try setting this above 1. 415 */ 416 short cmd_per_lun; 417 418 /* 419 * present contains counter indicating how many boards of this 420 * type were found when we did the scan. 421 */ 422 unsigned char present; 423 424 /* 425 * This specifies the mode that a LLD supports. 426 */ 427 unsigned supported_mode:2; 428 429 /* 430 * True if this host adapter uses unchecked DMA onto an ISA bus. 431 */ 432 unsigned unchecked_isa_dma:1; 433 434 /* 435 * True if this host adapter can make good use of clustering. 436 * I originally thought that if the tablesize was large that it 437 * was a waste of CPU cycles to prepare a cluster list, but 438 * it works out that the Buslogic is faster if you use a smaller 439 * number of segments (i.e. use clustering). I guess it is 440 * inefficient. 441 */ 442 unsigned use_clustering:1; 443 444 /* 445 * True for emulated SCSI host adapters (e.g. ATAPI). 446 */ 447 unsigned emulated:1; 448 449 /* 450 * True if the low-level driver performs its own reset-settle delays. 451 */ 452 unsigned skip_settle_delay:1; 453 454 /* 455 * True if we are using ordered write support. 456 */ 457 unsigned ordered_tag:1; 458 459 /* True if the controller does not support WRITE SAME */ 460 unsigned no_write_same:1; 461 462 /* 463 * True if asynchronous aborts are not supported 464 */ 465 unsigned no_async_abort:1; 466 467 /* 468 * Countdown for host blocking with no commands outstanding. 469 */ 470 unsigned int max_host_blocked; 471 472 /* 473 * Default value for the blocking. If the queue is empty, 474 * host_blocked counts down in the request_fn until it restarts 475 * host operations as zero is reached. 476 * 477 * FIXME: This should probably be a value in the template 478 */ 479 #define SCSI_DEFAULT_HOST_BLOCKED 7 480 481 /* 482 * Pointer to the sysfs class properties for this host, NULL terminated. 483 */ 484 struct device_attribute **shost_attrs; 485 486 /* 487 * Pointer to the SCSI device properties for this host, NULL terminated. 488 */ 489 struct device_attribute **sdev_attrs; 490 491 /* 492 * List of hosts per template. 493 * 494 * This is only for use by scsi_module.c for legacy templates. 495 * For these access to it is synchronized implicitly by 496 * module_init/module_exit. 497 */ 498 struct list_head legacy_hosts; 499 500 /* 501 * Vendor Identifier associated with the host 502 * 503 * Note: When specifying vendor_id, be sure to read the 504 * Vendor Type and ID formatting requirements specified in 505 * scsi_netlink.h 506 */ 507 u64 vendor_id; 508 509 /* 510 * Additional per-command data allocated for the driver. 511 */ 512 unsigned int cmd_size; 513 struct scsi_host_cmd_pool *cmd_pool; 514 515 /* temporary flag to disable blk-mq I/O path */ 516 bool disable_blk_mq; 517 }; 518 519 /* 520 * Temporary #define for host lock push down. Can be removed when all 521 * drivers have been updated to take advantage of unlocked 522 * queuecommand. 523 * 524 */ 525 #define DEF_SCSI_QCMD(func_name) \ 526 int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \ 527 { \ 528 unsigned long irq_flags; \ 529 int rc; \ 530 spin_lock_irqsave(shost->host_lock, irq_flags); \ 531 scsi_cmd_get_serial(shost, cmd); \ 532 rc = func_name##_lck (cmd, cmd->scsi_done); \ 533 spin_unlock_irqrestore(shost->host_lock, irq_flags); \ 534 return rc; \ 535 } 536 537 538 /* 539 * shost state: If you alter this, you also need to alter scsi_sysfs.c 540 * (for the ascii descriptions) and the state model enforcer: 541 * scsi_host_set_state() 542 */ 543 enum scsi_host_state { 544 SHOST_CREATED = 1, 545 SHOST_RUNNING, 546 SHOST_CANCEL, 547 SHOST_DEL, 548 SHOST_RECOVERY, 549 SHOST_CANCEL_RECOVERY, 550 SHOST_DEL_RECOVERY, 551 }; 552 553 struct Scsi_Host { 554 /* 555 * __devices is protected by the host_lock, but you should 556 * usually use scsi_device_lookup / shost_for_each_device 557 * to access it and don't care about locking yourself. 558 * In the rare case of beeing in irq context you can use 559 * their __ prefixed variants with the lock held. NEVER 560 * access this list directly from a driver. 561 */ 562 struct list_head __devices; 563 struct list_head __targets; 564 565 struct scsi_host_cmd_pool *cmd_pool; 566 spinlock_t free_list_lock; 567 struct list_head free_list; /* backup store of cmd structs */ 568 struct list_head starved_list; 569 570 spinlock_t default_lock; 571 spinlock_t *host_lock; 572 573 struct mutex scan_mutex;/* serialize scanning activity */ 574 575 struct list_head eh_cmd_q; 576 struct task_struct * ehandler; /* Error recovery thread. */ 577 struct completion * eh_action; /* Wait for specific actions on the 578 host. */ 579 wait_queue_head_t host_wait; 580 struct scsi_host_template *hostt; 581 struct scsi_transport_template *transportt; 582 583 /* 584 * Area to keep a shared tag map (if needed, will be 585 * NULL if not). 586 */ 587 union { 588 struct blk_queue_tag *bqt; 589 struct blk_mq_tag_set tag_set; 590 }; 591 592 atomic_t host_busy; /* commands actually active on low-level */ 593 atomic_t host_blocked; 594 595 unsigned int host_failed; /* commands that failed. 596 protected by host_lock */ 597 unsigned int host_eh_scheduled; /* EH scheduled without command */ 598 599 unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ 600 601 /* next two fields are used to bound the time spent in error handling */ 602 int eh_deadline; 603 unsigned long last_reset; 604 605 606 /* 607 * These three parameters can be used to allow for wide scsi, 608 * and for host adapters that support multiple busses 609 * The last two should be set to 1 more than the actual max id 610 * or lun (e.g. 8 for SCSI parallel systems). 611 */ 612 unsigned int max_channel; 613 unsigned int max_id; 614 u64 max_lun; 615 616 /* 617 * This is a unique identifier that must be assigned so that we 618 * have some way of identifying each detected host adapter properly 619 * and uniquely. For hosts that do not support more than one card 620 * in the system at one time, this does not need to be set. It is 621 * initialized to 0 in scsi_register. 622 */ 623 unsigned int unique_id; 624 625 /* 626 * The maximum length of SCSI commands that this host can accept. 627 * Probably 12 for most host adapters, but could be 16 for others. 628 * or 260 if the driver supports variable length cdbs. 629 * For drivers that don't set this field, a value of 12 is 630 * assumed. 631 */ 632 unsigned short max_cmd_len; 633 634 int this_id; 635 int can_queue; 636 short cmd_per_lun; 637 short unsigned int sg_tablesize; 638 short unsigned int sg_prot_tablesize; 639 unsigned int max_sectors; 640 unsigned long dma_boundary; 641 /* 642 * Used to assign serial numbers to the cmds. 643 * Protected by the host lock. 644 */ 645 unsigned long cmd_serial_number; 646 647 unsigned active_mode:2; 648 unsigned unchecked_isa_dma:1; 649 unsigned use_clustering:1; 650 unsigned use_blk_tcq:1; 651 652 /* 653 * Host has requested that no further requests come through for the 654 * time being. 655 */ 656 unsigned host_self_blocked:1; 657 658 /* 659 * Host uses correct SCSI ordering not PC ordering. The bit is 660 * set for the minority of drivers whose authors actually read 661 * the spec ;). 662 */ 663 unsigned reverse_ordering:1; 664 665 /* 666 * Ordered write support 667 */ 668 unsigned ordered_tag:1; 669 670 /* Task mgmt function in progress */ 671 unsigned tmf_in_progress:1; 672 673 /* Asynchronous scan in progress */ 674 unsigned async_scan:1; 675 676 /* Don't resume host in EH */ 677 unsigned eh_noresume:1; 678 679 /* The controller does not support WRITE SAME */ 680 unsigned no_write_same:1; 681 682 unsigned use_blk_mq:1; 683 unsigned use_cmd_list:1; 684 685 /* 686 * Optional work queue to be utilized by the transport 687 */ 688 char work_q_name[20]; 689 struct workqueue_struct *work_q; 690 691 /* 692 * Task management function work queue 693 */ 694 struct workqueue_struct *tmf_work_q; 695 696 /* The transport requires the LUN bits NOT to be stored in CDB[1] */ 697 unsigned no_scsi2_lun_in_cdb:1; 698 699 /* 700 * Value host_blocked counts down from 701 */ 702 unsigned int max_host_blocked; 703 704 /* Protection Information */ 705 unsigned int prot_capabilities; 706 unsigned char prot_guard_type; 707 708 /* 709 * q used for scsi_tgt msgs, async events or any other requests that 710 * need to be processed in userspace 711 */ 712 struct request_queue *uspace_req_q; 713 714 /* legacy crap */ 715 unsigned long base; 716 unsigned long io_port; 717 unsigned char n_io_port; 718 unsigned char dma_channel; 719 unsigned int irq; 720 721 722 enum scsi_host_state shost_state; 723 724 /* ldm bits */ 725 struct device shost_gendev, shost_dev; 726 727 /* 728 * List of hosts per template. 729 * 730 * This is only for use by scsi_module.c for legacy templates. 731 * For these access to it is synchronized implicitly by 732 * module_init/module_exit. 733 */ 734 struct list_head sht_legacy_list; 735 736 /* 737 * Points to the transport data (if any) which is allocated 738 * separately 739 */ 740 void *shost_data; 741 742 /* 743 * Points to the physical bus device we'd use to do DMA 744 * Needed just in case we have virtual hosts. 745 */ 746 struct device *dma_dev; 747 748 /* 749 * We should ensure that this is aligned, both for better performance 750 * and also because some compilers (m68k) don't automatically force 751 * alignment to a long boundary. 752 */ 753 unsigned long hostdata[0] /* Used for storage of host specific stuff */ 754 __attribute__ ((aligned (sizeof(unsigned long)))); 755 }; 756 757 #define class_to_shost(d) \ 758 container_of(d, struct Scsi_Host, shost_dev) 759 760 #define shost_printk(prefix, shost, fmt, a...) \ 761 dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a) 762 763 static inline void *shost_priv(struct Scsi_Host *shost) 764 { 765 return (void *)shost->hostdata; 766 } 767 768 int scsi_is_host_device(const struct device *); 769 770 static inline struct Scsi_Host *dev_to_shost(struct device *dev) 771 { 772 while (!scsi_is_host_device(dev)) { 773 if (!dev->parent) 774 return NULL; 775 dev = dev->parent; 776 } 777 return container_of(dev, struct Scsi_Host, shost_gendev); 778 } 779 780 static inline int scsi_host_in_recovery(struct Scsi_Host *shost) 781 { 782 return shost->shost_state == SHOST_RECOVERY || 783 shost->shost_state == SHOST_CANCEL_RECOVERY || 784 shost->shost_state == SHOST_DEL_RECOVERY || 785 shost->tmf_in_progress; 786 } 787 788 extern bool scsi_use_blk_mq; 789 790 static inline bool shost_use_blk_mq(struct Scsi_Host *shost) 791 { 792 return shost->use_blk_mq; 793 } 794 795 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); 796 extern void scsi_flush_work(struct Scsi_Host *); 797 798 extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); 799 extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, 800 struct device *, 801 struct device *); 802 extern void scsi_scan_host(struct Scsi_Host *); 803 extern void scsi_rescan_device(struct device *); 804 extern void scsi_remove_host(struct Scsi_Host *); 805 extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); 806 extern void scsi_host_put(struct Scsi_Host *t); 807 extern struct Scsi_Host *scsi_host_lookup(unsigned short); 808 extern const char *scsi_host_state_name(enum scsi_host_state); 809 extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *); 810 811 static inline int __must_check scsi_add_host(struct Scsi_Host *host, 812 struct device *dev) 813 { 814 return scsi_add_host_with_dma(host, dev, dev); 815 } 816 817 static inline struct device *scsi_get_device(struct Scsi_Host *shost) 818 { 819 return shost->shost_gendev.parent; 820 } 821 822 /** 823 * scsi_host_scan_allowed - Is scanning of this host allowed 824 * @shost: Pointer to Scsi_Host. 825 **/ 826 static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) 827 { 828 return shost->shost_state == SHOST_RUNNING || 829 shost->shost_state == SHOST_RECOVERY; 830 } 831 832 extern void scsi_unblock_requests(struct Scsi_Host *); 833 extern void scsi_block_requests(struct Scsi_Host *); 834 835 struct class_container; 836 837 extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 838 void (*) (struct request_queue *)); 839 /* 840 * These two functions are used to allocate and free a pseudo device 841 * which will connect to the host adapter itself rather than any 842 * physical device. You must deallocate when you are done with the 843 * thing. This physical pseudo-device isn't real and won't be available 844 * from any high-level drivers. 845 */ 846 extern void scsi_free_host_dev(struct scsi_device *); 847 extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *); 848 849 /* 850 * DIF defines the exchange of protection information between 851 * initiator and SBC block device. 852 * 853 * DIX defines the exchange of protection information between OS and 854 * initiator. 855 */ 856 enum scsi_host_prot_capabilities { 857 SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */ 858 SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */ 859 SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */ 860 861 SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */ 862 SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */ 863 SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */ 864 SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */ 865 }; 866 867 /* 868 * SCSI hosts which support the Data Integrity Extensions must 869 * indicate their capabilities by setting the prot_capabilities using 870 * this call. 871 */ 872 static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask) 873 { 874 shost->prot_capabilities = mask; 875 } 876 877 static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost) 878 { 879 return shost->prot_capabilities; 880 } 881 882 static inline int scsi_host_prot_dma(struct Scsi_Host *shost) 883 { 884 return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION; 885 } 886 887 static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) 888 { 889 static unsigned char cap[] = { 0, 890 SHOST_DIF_TYPE1_PROTECTION, 891 SHOST_DIF_TYPE2_PROTECTION, 892 SHOST_DIF_TYPE3_PROTECTION }; 893 894 if (target_type >= ARRAY_SIZE(cap)) 895 return 0; 896 897 return shost->prot_capabilities & cap[target_type] ? target_type : 0; 898 } 899 900 static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type) 901 { 902 #if defined(CONFIG_BLK_DEV_INTEGRITY) 903 static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION, 904 SHOST_DIX_TYPE1_PROTECTION, 905 SHOST_DIX_TYPE2_PROTECTION, 906 SHOST_DIX_TYPE3_PROTECTION }; 907 908 if (target_type >= ARRAY_SIZE(cap)) 909 return 0; 910 911 return shost->prot_capabilities & cap[target_type]; 912 #endif 913 return 0; 914 } 915 916 /* 917 * All DIX-capable initiators must support the T10-mandated CRC 918 * checksum. Controllers can optionally implement the IP checksum 919 * scheme which has much lower impact on system performance. Note 920 * that the main rationale for the checksum is to match integrity 921 * metadata with data. Detecting bit errors are a job for ECC memory 922 * and buses. 923 */ 924 925 enum scsi_host_guard_type { 926 SHOST_DIX_GUARD_CRC = 1 << 0, 927 SHOST_DIX_GUARD_IP = 1 << 1, 928 }; 929 930 static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type) 931 { 932 shost->prot_guard_type = type; 933 } 934 935 static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost) 936 { 937 return shost->prot_guard_type; 938 } 939 940 /* legacy interfaces */ 941 extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int); 942 extern void scsi_unregister(struct Scsi_Host *); 943 extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state); 944 945 #endif /* _SCSI_SCSI_HOST_H */ 946