1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _SCSI_SCSI_HOST_H 3 #define _SCSI_SCSI_HOST_H 4 5 #include <linux/device.h> 6 #include <linux/list.h> 7 #include <linux/types.h> 8 #include <linux/workqueue.h> 9 #include <linux/mutex.h> 10 #include <linux/seq_file.h> 11 #include <linux/blk-mq.h> 12 #include <scsi/scsi.h> 13 14 struct block_device; 15 struct completion; 16 struct module; 17 struct scsi_cmnd; 18 struct scsi_device; 19 struct scsi_host_cmd_pool; 20 struct scsi_target; 21 struct Scsi_Host; 22 struct scsi_host_cmd_pool; 23 struct scsi_transport_template; 24 25 26 #define SG_ALL SG_CHUNK_SIZE 27 28 #define MODE_UNKNOWN 0x00 29 #define MODE_INITIATOR 0x01 30 #define MODE_TARGET 0x02 31 32 struct scsi_host_template { 33 struct module *module; 34 const char *name; 35 36 /* 37 * The info function will return whatever useful information the 38 * developer sees fit. If not provided, then the name field will 39 * be used instead. 40 * 41 * Status: OPTIONAL 42 */ 43 const char *(* info)(struct Scsi_Host *); 44 45 /* 46 * Ioctl interface 47 * 48 * Status: OPTIONAL 49 */ 50 int (*ioctl)(struct scsi_device *dev, unsigned int cmd, 51 void __user *arg); 52 53 54 #ifdef CONFIG_COMPAT 55 /* 56 * Compat handler. Handle 32bit ABI. 57 * When unknown ioctl is passed return -ENOIOCTLCMD. 58 * 59 * Status: OPTIONAL 60 */ 61 int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd, 62 void __user *arg); 63 #endif 64 65 /* 66 * The queuecommand function is used to queue up a scsi 67 * command block to the LLDD. When the driver finished 68 * processing the command the done callback is invoked. 69 * 70 * If queuecommand returns 0, then the driver has accepted the 71 * command. It must also push it to the HBA if the scsi_cmnd 72 * flag SCMD_LAST is set, or if the driver does not implement 73 * commit_rqs. The done() function must be called on the command 74 * when the driver has finished with it. (you may call done on the 75 * command before queuecommand returns, but in this case you 76 * *must* return 0 from queuecommand). 77 * 78 * Queuecommand may also reject the command, in which case it may 79 * not touch the command and must not call done() for it. 80 * 81 * There are two possible rejection returns: 82 * 83 * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but 84 * allow commands to other devices serviced by this host. 85 * 86 * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this 87 * host temporarily. 88 * 89 * For compatibility, any other non-zero return is treated the 90 * same as SCSI_MLQUEUE_HOST_BUSY. 91 * 92 * NOTE: "temporarily" means either until the next command for# 93 * this device/host completes, or a period of time determined by 94 * I/O pressure in the system if there are no other outstanding 95 * commands. 96 * 97 * STATUS: REQUIRED 98 */ 99 int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); 100 101 /* 102 * The commit_rqs function is used to trigger a hardware 103 * doorbell after some requests have been queued with 104 * queuecommand, when an error is encountered before sending 105 * the request with SCMD_LAST set. 106 * 107 * STATUS: OPTIONAL 108 */ 109 void (*commit_rqs)(struct Scsi_Host *, u16); 110 111 /* 112 * This is an error handling strategy routine. You don't need to 113 * define one of these if you don't want to - there is a default 114 * routine that is present that should work in most cases. For those 115 * driver authors that have the inclination and ability to write their 116 * own strategy routine, this is where it is specified. Note - the 117 * strategy routine is *ALWAYS* run in the context of the kernel eh 118 * thread. Thus you are guaranteed to *NOT* be in an interrupt 119 * handler when you execute this, and you are also guaranteed to 120 * *NOT* have any other commands being queued while you are in the 121 * strategy routine. When you return from this function, operations 122 * return to normal. 123 * 124 * See scsi_error.c scsi_unjam_host for additional comments about 125 * what this function should and should not be attempting to do. 126 * 127 * Status: REQUIRED (at least one of them) 128 */ 129 int (* eh_abort_handler)(struct scsi_cmnd *); 130 int (* eh_device_reset_handler)(struct scsi_cmnd *); 131 int (* eh_target_reset_handler)(struct scsi_cmnd *); 132 int (* eh_bus_reset_handler)(struct scsi_cmnd *); 133 int (* eh_host_reset_handler)(struct scsi_cmnd *); 134 135 /* 136 * Before the mid layer attempts to scan for a new device where none 137 * currently exists, it will call this entry in your driver. Should 138 * your driver need to allocate any structs or perform any other init 139 * items in order to send commands to a currently unused target/lun 140 * combo, then this is where you can perform those allocations. This 141 * is specifically so that drivers won't have to perform any kind of 142 * "is this a new device" checks in their queuecommand routine, 143 * thereby making the hot path a bit quicker. 144 * 145 * Return values: 0 on success, non-0 on failure 146 * 147 * Deallocation: If we didn't find any devices at this ID, you will 148 * get an immediate call to slave_destroy(). If we find something 149 * here then you will get a call to slave_configure(), then the 150 * device will be used for however long it is kept around, then when 151 * the device is removed from the system (or * possibly at reboot 152 * time), you will then get a call to slave_destroy(). This is 153 * assuming you implement slave_configure and slave_destroy. 154 * However, if you allocate memory and hang it off the device struct, 155 * then you must implement the slave_destroy() routine at a minimum 156 * in order to avoid leaking memory 157 * each time a device is tore down. 158 * 159 * Status: OPTIONAL 160 */ 161 int (* slave_alloc)(struct scsi_device *); 162 163 /* 164 * Once the device has responded to an INQUIRY and we know the 165 * device is online, we call into the low level driver with the 166 * struct scsi_device *. If the low level device driver implements 167 * this function, it *must* perform the task of setting the queue 168 * depth on the device. All other tasks are optional and depend 169 * on what the driver supports and various implementation details. 170 * 171 * Things currently recommended to be handled at this time include: 172 * 173 * 1. Setting the device queue depth. Proper setting of this is 174 * described in the comments for scsi_change_queue_depth. 175 * 2. Determining if the device supports the various synchronous 176 * negotiation protocols. The device struct will already have 177 * responded to INQUIRY and the results of the standard items 178 * will have been shoved into the various device flag bits, eg. 179 * device->sdtr will be true if the device supports SDTR messages. 180 * 3. Allocating command structs that the device will need. 181 * 4. Setting the default timeout on this device (if needed). 182 * 5. Anything else the low level driver might want to do on a device 183 * specific setup basis... 184 * 6. Return 0 on success, non-0 on error. The device will be marked 185 * as offline on error so that no access will occur. If you return 186 * non-0, your slave_destroy routine will never get called for this 187 * device, so don't leave any loose memory hanging around, clean 188 * up after yourself before returning non-0 189 * 190 * Status: OPTIONAL 191 */ 192 int (* slave_configure)(struct scsi_device *); 193 194 /* 195 * Immediately prior to deallocating the device and after all activity 196 * has ceased the mid layer calls this point so that the low level 197 * driver may completely detach itself from the scsi device and vice 198 * versa. The low level driver is responsible for freeing any memory 199 * it allocated in the slave_alloc or slave_configure calls. 200 * 201 * Status: OPTIONAL 202 */ 203 void (* slave_destroy)(struct scsi_device *); 204 205 /* 206 * Before the mid layer attempts to scan for a new device attached 207 * to a target where no target currently exists, it will call this 208 * entry in your driver. Should your driver need to allocate any 209 * structs or perform any other init items in order to send commands 210 * to a currently unused target, then this is where you can perform 211 * those allocations. 212 * 213 * Return values: 0 on success, non-0 on failure 214 * 215 * Status: OPTIONAL 216 */ 217 int (* target_alloc)(struct scsi_target *); 218 219 /* 220 * Immediately prior to deallocating the target structure, and 221 * after all activity to attached scsi devices has ceased, the 222 * midlayer calls this point so that the driver may deallocate 223 * and terminate any references to the target. 224 * 225 * Status: OPTIONAL 226 */ 227 void (* target_destroy)(struct scsi_target *); 228 229 /* 230 * If a host has the ability to discover targets on its own instead 231 * of scanning the entire bus, it can fill in this function and 232 * call scsi_scan_host(). This function will be called periodically 233 * until it returns 1 with the scsi_host and the elapsed time of 234 * the scan in jiffies. 235 * 236 * Status: OPTIONAL 237 */ 238 int (* scan_finished)(struct Scsi_Host *, unsigned long); 239 240 /* 241 * If the host wants to be called before the scan starts, but 242 * after the midlayer has set up ready for the scan, it can fill 243 * in this function. 244 * 245 * Status: OPTIONAL 246 */ 247 void (* scan_start)(struct Scsi_Host *); 248 249 /* 250 * Fill in this function to allow the queue depth of this host 251 * to be changeable (on a per device basis). Returns either 252 * the current queue depth setting (may be different from what 253 * was passed in) or an error. An error should only be 254 * returned if the requested depth is legal but the driver was 255 * unable to set it. If the requested depth is illegal, the 256 * driver should set and return the closest legal queue depth. 257 * 258 * Status: OPTIONAL 259 */ 260 int (* change_queue_depth)(struct scsi_device *, int); 261 262 /* 263 * This functions lets the driver expose the queue mapping 264 * to the block layer. 265 * 266 * Status: OPTIONAL 267 */ 268 int (* map_queues)(struct Scsi_Host *shost); 269 270 /* 271 * This function determines the BIOS parameters for a given 272 * harddisk. These tend to be numbers that are made up by 273 * the host adapter. Parameters: 274 * size, device, list (heads, sectors, cylinders) 275 * 276 * Status: OPTIONAL 277 */ 278 int (* bios_param)(struct scsi_device *, struct block_device *, 279 sector_t, int []); 280 281 /* 282 * This function is called when one or more partitions on the 283 * device reach beyond the end of the device. 284 * 285 * Status: OPTIONAL 286 */ 287 void (*unlock_native_capacity)(struct scsi_device *); 288 289 /* 290 * Can be used to export driver statistics and other infos to the 291 * world outside the kernel ie. userspace and it also provides an 292 * interface to feed the driver with information. 293 * 294 * Status: OBSOLETE 295 */ 296 int (*show_info)(struct seq_file *, struct Scsi_Host *); 297 int (*write_info)(struct Scsi_Host *, char *, int); 298 299 /* 300 * This is an optional routine that allows the transport to become 301 * involved when a scsi io timer fires. The return value tells the 302 * timer routine how to finish the io timeout handling. 303 * 304 * Status: OPTIONAL 305 */ 306 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); 307 308 /* This is an optional routine that allows transport to initiate 309 * LLD adapter or firmware reset using sysfs attribute. 310 * 311 * Return values: 0 on success, -ve value on failure. 312 * 313 * Status: OPTIONAL 314 */ 315 316 int (*host_reset)(struct Scsi_Host *shost, int reset_type); 317 #define SCSI_ADAPTER_RESET 1 318 #define SCSI_FIRMWARE_RESET 2 319 320 321 /* 322 * Name of proc directory 323 */ 324 const char *proc_name; 325 326 /* 327 * Used to store the procfs directory if a driver implements the 328 * show_info method. 329 */ 330 struct proc_dir_entry *proc_dir; 331 332 /* 333 * This determines if we will use a non-interrupt driven 334 * or an interrupt driven scheme. It is set to the maximum number 335 * of simultaneous commands a single hw queue in HBA will accept. 336 */ 337 int can_queue; 338 339 /* 340 * In many instances, especially where disconnect / reconnect are 341 * supported, our host also has an ID on the SCSI bus. If this is 342 * the case, then it must be reserved. Please set this_id to -1 if 343 * your setup is in single initiator mode, and the host lacks an 344 * ID. 345 */ 346 int this_id; 347 348 /* 349 * This determines the degree to which the host adapter is capable 350 * of scatter-gather. 351 */ 352 unsigned short sg_tablesize; 353 unsigned short sg_prot_tablesize; 354 355 /* 356 * Set this if the host adapter has limitations beside segment count. 357 */ 358 unsigned int max_sectors; 359 360 /* 361 * Maximum size in bytes of a single segment. 362 */ 363 unsigned int max_segment_size; 364 365 /* 366 * DMA scatter gather segment boundary limit. A segment crossing this 367 * boundary will be split in two. 368 */ 369 unsigned long dma_boundary; 370 371 unsigned long virt_boundary_mask; 372 373 /* 374 * This specifies "machine infinity" for host templates which don't 375 * limit the transfer size. Note this limit represents an absolute 376 * maximum, and may be over the transfer limits allowed for 377 * individual devices (e.g. 256 for SCSI-1). 378 */ 379 #define SCSI_DEFAULT_MAX_SECTORS 1024 380 381 /* 382 * True if this host adapter can make good use of linked commands. 383 * This will allow more than one command to be queued to a given 384 * unit on a given host. Set this to the maximum number of command 385 * blocks to be provided for each device. Set this to 1 for one 386 * command block per lun, 2 for two, etc. Do not set this to 0. 387 * You should make sure that the host adapter will do the right thing 388 * before you try setting this above 1. 389 */ 390 short cmd_per_lun; 391 392 /* 393 * present contains counter indicating how many boards of this 394 * type were found when we did the scan. 395 */ 396 unsigned char present; 397 398 /* If use block layer to manage tags, this is tag allocation policy */ 399 int tag_alloc_policy; 400 401 /* 402 * Track QUEUE_FULL events and reduce queue depth on demand. 403 */ 404 unsigned track_queue_depth:1; 405 406 /* 407 * This specifies the mode that a LLD supports. 408 */ 409 unsigned supported_mode:2; 410 411 /* 412 * True if this host adapter uses unchecked DMA onto an ISA bus. 413 */ 414 unsigned unchecked_isa_dma:1; 415 416 /* 417 * True for emulated SCSI host adapters (e.g. ATAPI). 418 */ 419 unsigned emulated:1; 420 421 /* 422 * True if the low-level driver performs its own reset-settle delays. 423 */ 424 unsigned skip_settle_delay:1; 425 426 /* True if the controller does not support WRITE SAME */ 427 unsigned no_write_same:1; 428 429 /* True if the low-level driver supports blk-mq only */ 430 unsigned force_blk_mq:1; 431 432 /* 433 * Countdown for host blocking with no commands outstanding. 434 */ 435 unsigned int max_host_blocked; 436 437 /* 438 * Default value for the blocking. If the queue is empty, 439 * host_blocked counts down in the request_fn until it restarts 440 * host operations as zero is reached. 441 * 442 * FIXME: This should probably be a value in the template 443 */ 444 #define SCSI_DEFAULT_HOST_BLOCKED 7 445 446 /* 447 * Pointer to the sysfs class properties for this host, NULL terminated. 448 */ 449 struct device_attribute **shost_attrs; 450 451 /* 452 * Pointer to the SCSI device properties for this host, NULL terminated. 453 */ 454 struct device_attribute **sdev_attrs; 455 456 /* 457 * Pointer to the SCSI device attribute groups for this host, 458 * NULL terminated. 459 */ 460 const struct attribute_group **sdev_groups; 461 462 /* 463 * Vendor Identifier associated with the host 464 * 465 * Note: When specifying vendor_id, be sure to read the 466 * Vendor Type and ID formatting requirements specified in 467 * scsi_netlink.h 468 */ 469 u64 vendor_id; 470 471 /* 472 * Additional per-command data allocated for the driver. 473 */ 474 unsigned int cmd_size; 475 struct scsi_host_cmd_pool *cmd_pool; 476 477 /* Delay for runtime autosuspend */ 478 int rpm_autosuspend_delay; 479 }; 480 481 /* 482 * Temporary #define for host lock push down. Can be removed when all 483 * drivers have been updated to take advantage of unlocked 484 * queuecommand. 485 * 486 */ 487 #define DEF_SCSI_QCMD(func_name) \ 488 int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \ 489 { \ 490 unsigned long irq_flags; \ 491 int rc; \ 492 spin_lock_irqsave(shost->host_lock, irq_flags); \ 493 rc = func_name##_lck (cmd, cmd->scsi_done); \ 494 spin_unlock_irqrestore(shost->host_lock, irq_flags); \ 495 return rc; \ 496 } 497 498 499 /* 500 * shost state: If you alter this, you also need to alter scsi_sysfs.c 501 * (for the ascii descriptions) and the state model enforcer: 502 * scsi_host_set_state() 503 */ 504 enum scsi_host_state { 505 SHOST_CREATED = 1, 506 SHOST_RUNNING, 507 SHOST_CANCEL, 508 SHOST_DEL, 509 SHOST_RECOVERY, 510 SHOST_CANCEL_RECOVERY, 511 SHOST_DEL_RECOVERY, 512 }; 513 514 struct Scsi_Host { 515 /* 516 * __devices is protected by the host_lock, but you should 517 * usually use scsi_device_lookup / shost_for_each_device 518 * to access it and don't care about locking yourself. 519 * In the rare case of being in irq context you can use 520 * their __ prefixed variants with the lock held. NEVER 521 * access this list directly from a driver. 522 */ 523 struct list_head __devices; 524 struct list_head __targets; 525 526 struct list_head starved_list; 527 528 spinlock_t default_lock; 529 spinlock_t *host_lock; 530 531 struct mutex scan_mutex;/* serialize scanning activity */ 532 533 struct list_head eh_cmd_q; 534 struct task_struct * ehandler; /* Error recovery thread. */ 535 struct completion * eh_action; /* Wait for specific actions on the 536 host. */ 537 wait_queue_head_t host_wait; 538 struct scsi_host_template *hostt; 539 struct scsi_transport_template *transportt; 540 541 /* Area to keep a shared tag map */ 542 struct blk_mq_tag_set tag_set; 543 544 atomic_t host_blocked; 545 546 unsigned int host_failed; /* commands that failed. 547 protected by host_lock */ 548 unsigned int host_eh_scheduled; /* EH scheduled without command */ 549 550 unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ 551 552 /* next two fields are used to bound the time spent in error handling */ 553 int eh_deadline; 554 unsigned long last_reset; 555 556 557 /* 558 * These three parameters can be used to allow for wide scsi, 559 * and for host adapters that support multiple busses 560 * The last two should be set to 1 more than the actual max id 561 * or lun (e.g. 8 for SCSI parallel systems). 562 */ 563 unsigned int max_channel; 564 unsigned int max_id; 565 u64 max_lun; 566 567 /* 568 * This is a unique identifier that must be assigned so that we 569 * have some way of identifying each detected host adapter properly 570 * and uniquely. For hosts that do not support more than one card 571 * in the system at one time, this does not need to be set. It is 572 * initialized to 0 in scsi_register. 573 */ 574 unsigned int unique_id; 575 576 /* 577 * The maximum length of SCSI commands that this host can accept. 578 * Probably 12 for most host adapters, but could be 16 for others. 579 * or 260 if the driver supports variable length cdbs. 580 * For drivers that don't set this field, a value of 12 is 581 * assumed. 582 */ 583 unsigned short max_cmd_len; 584 585 int this_id; 586 int can_queue; 587 short cmd_per_lun; 588 short unsigned int sg_tablesize; 589 short unsigned int sg_prot_tablesize; 590 unsigned int max_sectors; 591 unsigned int max_segment_size; 592 unsigned long dma_boundary; 593 unsigned long virt_boundary_mask; 594 /* 595 * In scsi-mq mode, the number of hardware queues supported by the LLD. 596 * 597 * Note: it is assumed that each hardware queue has a queue depth of 598 * can_queue. In other words, the total queue depth per host 599 * is nr_hw_queues * can_queue. 600 */ 601 unsigned nr_hw_queues; 602 unsigned active_mode:2; 603 unsigned unchecked_isa_dma:1; 604 605 /* 606 * Host has requested that no further requests come through for the 607 * time being. 608 */ 609 unsigned host_self_blocked:1; 610 611 /* 612 * Host uses correct SCSI ordering not PC ordering. The bit is 613 * set for the minority of drivers whose authors actually read 614 * the spec ;). 615 */ 616 unsigned reverse_ordering:1; 617 618 /* Task mgmt function in progress */ 619 unsigned tmf_in_progress:1; 620 621 /* Asynchronous scan in progress */ 622 unsigned async_scan:1; 623 624 /* Don't resume host in EH */ 625 unsigned eh_noresume:1; 626 627 /* The controller does not support WRITE SAME */ 628 unsigned no_write_same:1; 629 630 unsigned use_cmd_list:1; 631 632 /* Host responded with short (<36 bytes) INQUIRY result */ 633 unsigned short_inquiry:1; 634 635 /* The transport requires the LUN bits NOT to be stored in CDB[1] */ 636 unsigned no_scsi2_lun_in_cdb:1; 637 638 /* 639 * Optional work queue to be utilized by the transport 640 */ 641 char work_q_name[20]; 642 struct workqueue_struct *work_q; 643 644 /* 645 * Task management function work queue 646 */ 647 struct workqueue_struct *tmf_work_q; 648 649 /* 650 * Value host_blocked counts down from 651 */ 652 unsigned int max_host_blocked; 653 654 /* Protection Information */ 655 unsigned int prot_capabilities; 656 unsigned char prot_guard_type; 657 658 /* legacy crap */ 659 unsigned long base; 660 unsigned long io_port; 661 unsigned char n_io_port; 662 unsigned char dma_channel; 663 unsigned int irq; 664 665 666 enum scsi_host_state shost_state; 667 668 /* ldm bits */ 669 struct device shost_gendev, shost_dev; 670 671 /* 672 * Points to the transport data (if any) which is allocated 673 * separately 674 */ 675 void *shost_data; 676 677 /* 678 * Points to the physical bus device we'd use to do DMA 679 * Needed just in case we have virtual hosts. 680 */ 681 struct device *dma_dev; 682 683 /* 684 * We should ensure that this is aligned, both for better performance 685 * and also because some compilers (m68k) don't automatically force 686 * alignment to a long boundary. 687 */ 688 unsigned long hostdata[0] /* Used for storage of host specific stuff */ 689 __attribute__ ((aligned (sizeof(unsigned long)))); 690 }; 691 692 #define class_to_shost(d) \ 693 container_of(d, struct Scsi_Host, shost_dev) 694 695 #define shost_printk(prefix, shost, fmt, a...) \ 696 dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a) 697 698 static inline void *shost_priv(struct Scsi_Host *shost) 699 { 700 return (void *)shost->hostdata; 701 } 702 703 int scsi_is_host_device(const struct device *); 704 705 static inline struct Scsi_Host *dev_to_shost(struct device *dev) 706 { 707 while (!scsi_is_host_device(dev)) { 708 if (!dev->parent) 709 return NULL; 710 dev = dev->parent; 711 } 712 return container_of(dev, struct Scsi_Host, shost_gendev); 713 } 714 715 static inline int scsi_host_in_recovery(struct Scsi_Host *shost) 716 { 717 return shost->shost_state == SHOST_RECOVERY || 718 shost->shost_state == SHOST_CANCEL_RECOVERY || 719 shost->shost_state == SHOST_DEL_RECOVERY || 720 shost->tmf_in_progress; 721 } 722 723 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); 724 extern void scsi_flush_work(struct Scsi_Host *); 725 726 extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); 727 extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, 728 struct device *, 729 struct device *); 730 extern void scsi_scan_host(struct Scsi_Host *); 731 extern void scsi_rescan_device(struct device *); 732 extern void scsi_remove_host(struct Scsi_Host *); 733 extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); 734 extern int scsi_host_busy(struct Scsi_Host *shost); 735 extern void scsi_host_put(struct Scsi_Host *t); 736 extern struct Scsi_Host *scsi_host_lookup(unsigned short); 737 extern const char *scsi_host_state_name(enum scsi_host_state); 738 739 static inline int __must_check scsi_add_host(struct Scsi_Host *host, 740 struct device *dev) 741 { 742 return scsi_add_host_with_dma(host, dev, dev); 743 } 744 745 static inline struct device *scsi_get_device(struct Scsi_Host *shost) 746 { 747 return shost->shost_gendev.parent; 748 } 749 750 /** 751 * scsi_host_scan_allowed - Is scanning of this host allowed 752 * @shost: Pointer to Scsi_Host. 753 **/ 754 static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) 755 { 756 return shost->shost_state == SHOST_RUNNING || 757 shost->shost_state == SHOST_RECOVERY; 758 } 759 760 extern void scsi_unblock_requests(struct Scsi_Host *); 761 extern void scsi_block_requests(struct Scsi_Host *); 762 763 struct class_container; 764 765 /* 766 * These two functions are used to allocate and free a pseudo device 767 * which will connect to the host adapter itself rather than any 768 * physical device. You must deallocate when you are done with the 769 * thing. This physical pseudo-device isn't real and won't be available 770 * from any high-level drivers. 771 */ 772 extern void scsi_free_host_dev(struct scsi_device *); 773 extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *); 774 775 /* 776 * DIF defines the exchange of protection information between 777 * initiator and SBC block device. 778 * 779 * DIX defines the exchange of protection information between OS and 780 * initiator. 781 */ 782 enum scsi_host_prot_capabilities { 783 SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */ 784 SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */ 785 SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */ 786 787 SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */ 788 SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */ 789 SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */ 790 SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */ 791 }; 792 793 /* 794 * SCSI hosts which support the Data Integrity Extensions must 795 * indicate their capabilities by setting the prot_capabilities using 796 * this call. 797 */ 798 static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask) 799 { 800 shost->prot_capabilities = mask; 801 } 802 803 static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost) 804 { 805 return shost->prot_capabilities; 806 } 807 808 static inline int scsi_host_prot_dma(struct Scsi_Host *shost) 809 { 810 return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION; 811 } 812 813 static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) 814 { 815 static unsigned char cap[] = { 0, 816 SHOST_DIF_TYPE1_PROTECTION, 817 SHOST_DIF_TYPE2_PROTECTION, 818 SHOST_DIF_TYPE3_PROTECTION }; 819 820 if (target_type >= ARRAY_SIZE(cap)) 821 return 0; 822 823 return shost->prot_capabilities & cap[target_type] ? target_type : 0; 824 } 825 826 static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type) 827 { 828 #if defined(CONFIG_BLK_DEV_INTEGRITY) 829 static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION, 830 SHOST_DIX_TYPE1_PROTECTION, 831 SHOST_DIX_TYPE2_PROTECTION, 832 SHOST_DIX_TYPE3_PROTECTION }; 833 834 if (target_type >= ARRAY_SIZE(cap)) 835 return 0; 836 837 return shost->prot_capabilities & cap[target_type]; 838 #endif 839 return 0; 840 } 841 842 /* 843 * All DIX-capable initiators must support the T10-mandated CRC 844 * checksum. Controllers can optionally implement the IP checksum 845 * scheme which has much lower impact on system performance. Note 846 * that the main rationale for the checksum is to match integrity 847 * metadata with data. Detecting bit errors are a job for ECC memory 848 * and buses. 849 */ 850 851 enum scsi_host_guard_type { 852 SHOST_DIX_GUARD_CRC = 1 << 0, 853 SHOST_DIX_GUARD_IP = 1 << 1, 854 }; 855 856 static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type) 857 { 858 shost->prot_guard_type = type; 859 } 860 861 static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost) 862 { 863 return shost->prot_guard_type; 864 } 865 866 extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state); 867 868 #endif /* _SCSI_SCSI_HOST_H */ 869