xref: /openbmc/linux/include/scsi/scsi_host.h (revision 46eeaa11bdd1bc9e077bdf741d32ca7235d263c6)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _SCSI_SCSI_HOST_H
3  #define _SCSI_SCSI_HOST_H
4  
5  #include <linux/device.h>
6  #include <linux/list.h>
7  #include <linux/types.h>
8  #include <linux/workqueue.h>
9  #include <linux/mutex.h>
10  #include <linux/seq_file.h>
11  #include <linux/blk-mq.h>
12  #include <scsi/scsi.h>
13  
14  struct block_device;
15  struct completion;
16  struct module;
17  struct scsi_cmnd;
18  struct scsi_device;
19  struct scsi_target;
20  struct Scsi_Host;
21  struct scsi_transport_template;
22  
23  
24  #define SG_ALL	SG_CHUNK_SIZE
25  
26  #define MODE_UNKNOWN 0x00
27  #define MODE_INITIATOR 0x01
28  #define MODE_TARGET 0x02
29  
30  /**
31   * enum scsi_timeout_action - How to handle a command that timed out.
32   * @SCSI_EH_DONE: The command has already been completed.
33   * @SCSI_EH_RESET_TIMER: Reset the timer and continue waiting for completion.
34   * @SCSI_EH_NOT_HANDLED: The command has not yet finished. Abort the command.
35   */
36  enum scsi_timeout_action {
37  	SCSI_EH_DONE,
38  	SCSI_EH_RESET_TIMER,
39  	SCSI_EH_NOT_HANDLED,
40  };
41  
42  struct scsi_host_template {
43  	/*
44  	 * Put fields referenced in IO submission path together in
45  	 * same cacheline
46  	 */
47  
48  	/*
49  	 * Additional per-command data allocated for the driver.
50  	 */
51  	unsigned int cmd_size;
52  
53  	/*
54  	 * The queuecommand function is used to queue up a scsi
55  	 * command block to the LLDD.  When the driver finished
56  	 * processing the command the done callback is invoked.
57  	 *
58  	 * If queuecommand returns 0, then the driver has accepted the
59  	 * command.  It must also push it to the HBA if the scsi_cmnd
60  	 * flag SCMD_LAST is set, or if the driver does not implement
61  	 * commit_rqs.  The done() function must be called on the command
62  	 * when the driver has finished with it. (you may call done on the
63  	 * command before queuecommand returns, but in this case you
64  	 * *must* return 0 from queuecommand).
65  	 *
66  	 * Queuecommand may also reject the command, in which case it may
67  	 * not touch the command and must not call done() for it.
68  	 *
69  	 * There are two possible rejection returns:
70  	 *
71  	 *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
72  	 *   allow commands to other devices serviced by this host.
73  	 *
74  	 *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
75  	 *   host temporarily.
76  	 *
77           * For compatibility, any other non-zero return is treated the
78           * same as SCSI_MLQUEUE_HOST_BUSY.
79  	 *
80  	 * NOTE: "temporarily" means either until the next command for#
81  	 * this device/host completes, or a period of time determined by
82  	 * I/O pressure in the system if there are no other outstanding
83  	 * commands.
84  	 *
85  	 * STATUS: REQUIRED
86  	 */
87  	int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
88  
89  	/*
90  	 * The commit_rqs function is used to trigger a hardware
91  	 * doorbell after some requests have been queued with
92  	 * queuecommand, when an error is encountered before sending
93  	 * the request with SCMD_LAST set.
94  	 *
95  	 * STATUS: OPTIONAL
96  	 */
97  	void (*commit_rqs)(struct Scsi_Host *, u16);
98  
99  	struct module *module;
100  	const char *name;
101  
102  	/*
103  	 * The info function will return whatever useful information the
104  	 * developer sees fit.  If not provided, then the name field will
105  	 * be used instead.
106  	 *
107  	 * Status: OPTIONAL
108  	 */
109  	const char *(*info)(struct Scsi_Host *);
110  
111  	/*
112  	 * Ioctl interface
113  	 *
114  	 * Status: OPTIONAL
115  	 */
116  	int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
117  		     void __user *arg);
118  
119  
120  #ifdef CONFIG_COMPAT
121  	/*
122  	 * Compat handler. Handle 32bit ABI.
123  	 * When unknown ioctl is passed return -ENOIOCTLCMD.
124  	 *
125  	 * Status: OPTIONAL
126  	 */
127  	int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
128  			    void __user *arg);
129  #endif
130  
131  	int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
132  	int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
133  
134  	/*
135  	 * This is an error handling strategy routine.  You don't need to
136  	 * define one of these if you don't want to - there is a default
137  	 * routine that is present that should work in most cases.  For those
138  	 * driver authors that have the inclination and ability to write their
139  	 * own strategy routine, this is where it is specified.  Note - the
140  	 * strategy routine is *ALWAYS* run in the context of the kernel eh
141  	 * thread.  Thus you are guaranteed to *NOT* be in an interrupt
142  	 * handler when you execute this, and you are also guaranteed to
143  	 * *NOT* have any other commands being queued while you are in the
144  	 * strategy routine. When you return from this function, operations
145  	 * return to normal.
146  	 *
147  	 * See scsi_error.c scsi_unjam_host for additional comments about
148  	 * what this function should and should not be attempting to do.
149  	 *
150  	 * Status: REQUIRED	(at least one of them)
151  	 */
152  	int (* eh_abort_handler)(struct scsi_cmnd *);
153  	int (* eh_device_reset_handler)(struct scsi_cmnd *);
154  	int (* eh_target_reset_handler)(struct scsi_cmnd *);
155  	int (* eh_bus_reset_handler)(struct scsi_cmnd *);
156  	int (* eh_host_reset_handler)(struct scsi_cmnd *);
157  
158  	/*
159  	 * Before the mid layer attempts to scan for a new device where none
160  	 * currently exists, it will call this entry in your driver.  Should
161  	 * your driver need to allocate any structs or perform any other init
162  	 * items in order to send commands to a currently unused target/lun
163  	 * combo, then this is where you can perform those allocations.  This
164  	 * is specifically so that drivers won't have to perform any kind of
165  	 * "is this a new device" checks in their queuecommand routine,
166  	 * thereby making the hot path a bit quicker.
167  	 *
168  	 * Return values: 0 on success, non-0 on failure
169  	 *
170  	 * Deallocation:  If we didn't find any devices at this ID, you will
171  	 * get an immediate call to slave_destroy().  If we find something
172  	 * here then you will get a call to slave_configure(), then the
173  	 * device will be used for however long it is kept around, then when
174  	 * the device is removed from the system (or * possibly at reboot
175  	 * time), you will then get a call to slave_destroy().  This is
176  	 * assuming you implement slave_configure and slave_destroy.
177  	 * However, if you allocate memory and hang it off the device struct,
178  	 * then you must implement the slave_destroy() routine at a minimum
179  	 * in order to avoid leaking memory
180  	 * each time a device is tore down.
181  	 *
182  	 * Status: OPTIONAL
183  	 */
184  	int (* slave_alloc)(struct scsi_device *);
185  
186  	/*
187  	 * Once the device has responded to an INQUIRY and we know the
188  	 * device is online, we call into the low level driver with the
189  	 * struct scsi_device *.  If the low level device driver implements
190  	 * this function, it *must* perform the task of setting the queue
191  	 * depth on the device.  All other tasks are optional and depend
192  	 * on what the driver supports and various implementation details.
193  	 *
194  	 * Things currently recommended to be handled at this time include:
195  	 *
196  	 * 1.  Setting the device queue depth.  Proper setting of this is
197  	 *     described in the comments for scsi_change_queue_depth.
198  	 * 2.  Determining if the device supports the various synchronous
199  	 *     negotiation protocols.  The device struct will already have
200  	 *     responded to INQUIRY and the results of the standard items
201  	 *     will have been shoved into the various device flag bits, eg.
202  	 *     device->sdtr will be true if the device supports SDTR messages.
203  	 * 3.  Allocating command structs that the device will need.
204  	 * 4.  Setting the default timeout on this device (if needed).
205  	 * 5.  Anything else the low level driver might want to do on a device
206  	 *     specific setup basis...
207  	 * 6.  Return 0 on success, non-0 on error.  The device will be marked
208  	 *     as offline on error so that no access will occur.  If you return
209  	 *     non-0, your slave_destroy routine will never get called for this
210  	 *     device, so don't leave any loose memory hanging around, clean
211  	 *     up after yourself before returning non-0
212  	 *
213  	 * Status: OPTIONAL
214  	 */
215  	int (* slave_configure)(struct scsi_device *);
216  
217  	/*
218  	 * Immediately prior to deallocating the device and after all activity
219  	 * has ceased the mid layer calls this point so that the low level
220  	 * driver may completely detach itself from the scsi device and vice
221  	 * versa.  The low level driver is responsible for freeing any memory
222  	 * it allocated in the slave_alloc or slave_configure calls.
223  	 *
224  	 * Status: OPTIONAL
225  	 */
226  	void (* slave_destroy)(struct scsi_device *);
227  
228  	/*
229  	 * Before the mid layer attempts to scan for a new device attached
230  	 * to a target where no target currently exists, it will call this
231  	 * entry in your driver.  Should your driver need to allocate any
232  	 * structs or perform any other init items in order to send commands
233  	 * to a currently unused target, then this is where you can perform
234  	 * those allocations.
235  	 *
236  	 * Return values: 0 on success, non-0 on failure
237  	 *
238  	 * Status: OPTIONAL
239  	 */
240  	int (* target_alloc)(struct scsi_target *);
241  
242  	/*
243  	 * Immediately prior to deallocating the target structure, and
244  	 * after all activity to attached scsi devices has ceased, the
245  	 * midlayer calls this point so that the driver may deallocate
246  	 * and terminate any references to the target.
247  	 *
248  	 * Status: OPTIONAL
249  	 */
250  	void (* target_destroy)(struct scsi_target *);
251  
252  	/*
253  	 * If a host has the ability to discover targets on its own instead
254  	 * of scanning the entire bus, it can fill in this function and
255  	 * call scsi_scan_host().  This function will be called periodically
256  	 * until it returns 1 with the scsi_host and the elapsed time of
257  	 * the scan in jiffies.
258  	 *
259  	 * Status: OPTIONAL
260  	 */
261  	int (* scan_finished)(struct Scsi_Host *, unsigned long);
262  
263  	/*
264  	 * If the host wants to be called before the scan starts, but
265  	 * after the midlayer has set up ready for the scan, it can fill
266  	 * in this function.
267  	 *
268  	 * Status: OPTIONAL
269  	 */
270  	void (* scan_start)(struct Scsi_Host *);
271  
272  	/*
273  	 * Fill in this function to allow the queue depth of this host
274  	 * to be changeable (on a per device basis).  Returns either
275  	 * the current queue depth setting (may be different from what
276  	 * was passed in) or an error.  An error should only be
277  	 * returned if the requested depth is legal but the driver was
278  	 * unable to set it.  If the requested depth is illegal, the
279  	 * driver should set and return the closest legal queue depth.
280  	 *
281  	 * Status: OPTIONAL
282  	 */
283  	int (* change_queue_depth)(struct scsi_device *, int);
284  
285  	/*
286  	 * This functions lets the driver expose the queue mapping
287  	 * to the block layer.
288  	 *
289  	 * Status: OPTIONAL
290  	 */
291  	void (* map_queues)(struct Scsi_Host *shost);
292  
293  	/*
294  	 * SCSI interface of blk_poll - poll for IO completions.
295  	 * Only applicable if SCSI LLD exposes multiple h/w queues.
296  	 *
297  	 * Return value: Number of completed entries found.
298  	 *
299  	 * Status: OPTIONAL
300  	 */
301  	int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
302  
303  	/*
304  	 * Check if scatterlists need to be padded for DMA draining.
305  	 *
306  	 * Status: OPTIONAL
307  	 */
308  	bool (* dma_need_drain)(struct request *rq);
309  
310  	/*
311  	 * This function determines the BIOS parameters for a given
312  	 * harddisk.  These tend to be numbers that are made up by
313  	 * the host adapter.  Parameters:
314  	 * size, device, list (heads, sectors, cylinders)
315  	 *
316  	 * Status: OPTIONAL
317  	 */
318  	int (* bios_param)(struct scsi_device *, struct block_device *,
319  			sector_t, int []);
320  
321  	/*
322  	 * This function is called when one or more partitions on the
323  	 * device reach beyond the end of the device.
324  	 *
325  	 * Status: OPTIONAL
326  	 */
327  	void (*unlock_native_capacity)(struct scsi_device *);
328  
329  	/*
330  	 * Can be used to export driver statistics and other infos to the
331  	 * world outside the kernel ie. userspace and it also provides an
332  	 * interface to feed the driver with information.
333  	 *
334  	 * Status: OBSOLETE
335  	 */
336  	int (*show_info)(struct seq_file *, struct Scsi_Host *);
337  	int (*write_info)(struct Scsi_Host *, char *, int);
338  
339  	/*
340  	 * This is an optional routine that allows the transport to become
341  	 * involved when a scsi io timer fires. The return value tells the
342  	 * timer routine how to finish the io timeout handling.
343  	 *
344  	 * Status: OPTIONAL
345  	 */
346  	enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *);
347  	/*
348  	 * Optional routine that allows the transport to decide if a cmd
349  	 * is retryable. Return true if the transport is in a state the
350  	 * cmd should be retried on.
351  	 */
352  	bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
353  
354  	/* This is an optional routine that allows transport to initiate
355  	 * LLD adapter or firmware reset using sysfs attribute.
356  	 *
357  	 * Return values: 0 on success, -ve value on failure.
358  	 *
359  	 * Status: OPTIONAL
360  	 */
361  
362  	int (*host_reset)(struct Scsi_Host *shost, int reset_type);
363  #define SCSI_ADAPTER_RESET	1
364  #define SCSI_FIRMWARE_RESET	2
365  
366  
367  	/*
368  	 * Name of proc directory
369  	 */
370  	const char *proc_name;
371  
372  	/*
373  	 * This determines if we will use a non-interrupt driven
374  	 * or an interrupt driven scheme.  It is set to the maximum number
375  	 * of simultaneous commands a single hw queue in HBA will accept.
376  	 */
377  	int can_queue;
378  
379  	/*
380  	 * In many instances, especially where disconnect / reconnect are
381  	 * supported, our host also has an ID on the SCSI bus.  If this is
382  	 * the case, then it must be reserved.  Please set this_id to -1 if
383  	 * your setup is in single initiator mode, and the host lacks an
384  	 * ID.
385  	 */
386  	int this_id;
387  
388  	/*
389  	 * This determines the degree to which the host adapter is capable
390  	 * of scatter-gather.
391  	 */
392  	unsigned short sg_tablesize;
393  	unsigned short sg_prot_tablesize;
394  
395  	/*
396  	 * Set this if the host adapter has limitations beside segment count.
397  	 */
398  	unsigned int max_sectors;
399  
400  	/*
401  	 * Maximum size in bytes of a single segment.
402  	 */
403  	unsigned int max_segment_size;
404  
405  	/*
406  	 * DMA scatter gather segment boundary limit. A segment crossing this
407  	 * boundary will be split in two.
408  	 */
409  	unsigned long dma_boundary;
410  
411  	unsigned long virt_boundary_mask;
412  
413  	/*
414  	 * This specifies "machine infinity" for host templates which don't
415  	 * limit the transfer size.  Note this limit represents an absolute
416  	 * maximum, and may be over the transfer limits allowed for
417  	 * individual devices (e.g. 256 for SCSI-1).
418  	 */
419  #define SCSI_DEFAULT_MAX_SECTORS	1024
420  
421  	/*
422  	 * True if this host adapter can make good use of linked commands.
423  	 * This will allow more than one command to be queued to a given
424  	 * unit on a given host.  Set this to the maximum number of command
425  	 * blocks to be provided for each device.  Set this to 1 for one
426  	 * command block per lun, 2 for two, etc.  Do not set this to 0.
427  	 * You should make sure that the host adapter will do the right thing
428  	 * before you try setting this above 1.
429  	 */
430  	short cmd_per_lun;
431  
432  	/* If use block layer to manage tags, this is tag allocation policy */
433  	int tag_alloc_policy;
434  
435  	/*
436  	 * Track QUEUE_FULL events and reduce queue depth on demand.
437  	 */
438  	unsigned track_queue_depth:1;
439  
440  	/*
441  	 * This specifies the mode that a LLD supports.
442  	 */
443  	unsigned supported_mode:2;
444  
445  	/*
446  	 * True for emulated SCSI host adapters (e.g. ATAPI).
447  	 */
448  	unsigned emulated:1;
449  
450  	/*
451  	 * True if the low-level driver performs its own reset-settle delays.
452  	 */
453  	unsigned skip_settle_delay:1;
454  
455  	/* True if the controller does not support WRITE SAME */
456  	unsigned no_write_same:1;
457  
458  	/* True if the host uses host-wide tagspace */
459  	unsigned host_tagset:1;
460  
461  	/* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
462  	unsigned queuecommand_may_block:1;
463  
464  	/*
465  	 * Countdown for host blocking with no commands outstanding.
466  	 */
467  	unsigned int max_host_blocked;
468  
469  	/*
470  	 * Default value for the blocking.  If the queue is empty,
471  	 * host_blocked counts down in the request_fn until it restarts
472  	 * host operations as zero is reached.
473  	 *
474  	 * FIXME: This should probably be a value in the template
475  	 */
476  #define SCSI_DEFAULT_HOST_BLOCKED	7
477  
478  	/*
479  	 * Pointer to the SCSI host sysfs attribute groups, NULL terminated.
480  	 */
481  	const struct attribute_group **shost_groups;
482  
483  	/*
484  	 * Pointer to the SCSI device attribute groups for this host,
485  	 * NULL terminated.
486  	 */
487  	const struct attribute_group **sdev_groups;
488  
489  	/*
490  	 * Vendor Identifier associated with the host
491  	 *
492  	 * Note: When specifying vendor_id, be sure to read the
493  	 *   Vendor Type and ID formatting requirements specified in
494  	 *   scsi_netlink.h
495  	 */
496  	u64 vendor_id;
497  
498  	/* Delay for runtime autosuspend */
499  	int rpm_autosuspend_delay;
500  };
501  
502  /*
503   * Temporary #define for host lock push down. Can be removed when all
504   * drivers have been updated to take advantage of unlocked
505   * queuecommand.
506   *
507   */
508  #define DEF_SCSI_QCMD(func_name) \
509  	int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)	\
510  	{								\
511  		unsigned long irq_flags;				\
512  		int rc;							\
513  		spin_lock_irqsave(shost->host_lock, irq_flags);		\
514  		rc = func_name##_lck(cmd);				\
515  		spin_unlock_irqrestore(shost->host_lock, irq_flags);	\
516  		return rc;						\
517  	}
518  
519  
520  /*
521   * shost state: If you alter this, you also need to alter scsi_sysfs.c
522   * (for the ascii descriptions) and the state model enforcer:
523   * scsi_host_set_state()
524   */
525  enum scsi_host_state {
526  	SHOST_CREATED = 1,
527  	SHOST_RUNNING,
528  	SHOST_CANCEL,
529  	SHOST_DEL,
530  	SHOST_RECOVERY,
531  	SHOST_CANCEL_RECOVERY,
532  	SHOST_DEL_RECOVERY,
533  };
534  
535  struct Scsi_Host {
536  	/*
537  	 * __devices is protected by the host_lock, but you should
538  	 * usually use scsi_device_lookup / shost_for_each_device
539  	 * to access it and don't care about locking yourself.
540  	 * In the rare case of being in irq context you can use
541  	 * their __ prefixed variants with the lock held. NEVER
542  	 * access this list directly from a driver.
543  	 */
544  	struct list_head	__devices;
545  	struct list_head	__targets;
546  
547  	struct list_head	starved_list;
548  
549  	spinlock_t		default_lock;
550  	spinlock_t		*host_lock;
551  
552  	struct mutex		scan_mutex;/* serialize scanning activity */
553  
554  	struct list_head	eh_abort_list;
555  	struct list_head	eh_cmd_q;
556  	struct task_struct    * ehandler;  /* Error recovery thread. */
557  	struct completion     * eh_action; /* Wait for specific actions on the
558  					      host. */
559  	wait_queue_head_t       host_wait;
560  	const struct scsi_host_template *hostt;
561  	struct scsi_transport_template *transportt;
562  
563  	struct kref		tagset_refcnt;
564  	struct completion	tagset_freed;
565  	/* Area to keep a shared tag map */
566  	struct blk_mq_tag_set	tag_set;
567  
568  	atomic_t host_blocked;
569  
570  	unsigned int host_failed;	   /* commands that failed.
571  					      protected by host_lock */
572  	unsigned int host_eh_scheduled;    /* EH scheduled without command */
573  
574  	unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
575  
576  	/* next two fields are used to bound the time spent in error handling */
577  	int eh_deadline;
578  	unsigned long last_reset;
579  
580  
581  	/*
582  	 * These three parameters can be used to allow for wide scsi,
583  	 * and for host adapters that support multiple busses
584  	 * The last two should be set to 1 more than the actual max id
585  	 * or lun (e.g. 8 for SCSI parallel systems).
586  	 */
587  	unsigned int max_channel;
588  	unsigned int max_id;
589  	u64 max_lun;
590  
591  	/*
592  	 * This is a unique identifier that must be assigned so that we
593  	 * have some way of identifying each detected host adapter properly
594  	 * and uniquely.  For hosts that do not support more than one card
595  	 * in the system at one time, this does not need to be set.  It is
596  	 * initialized to 0 in scsi_register.
597  	 */
598  	unsigned int unique_id;
599  
600  	/*
601  	 * The maximum length of SCSI commands that this host can accept.
602  	 * Probably 12 for most host adapters, but could be 16 for others.
603  	 * or 260 if the driver supports variable length cdbs.
604  	 * For drivers that don't set this field, a value of 12 is
605  	 * assumed.
606  	 */
607  	unsigned short max_cmd_len;
608  
609  	int this_id;
610  	int can_queue;
611  	short cmd_per_lun;
612  	short unsigned int sg_tablesize;
613  	short unsigned int sg_prot_tablesize;
614  	unsigned int max_sectors;
615  	unsigned int opt_sectors;
616  	unsigned int max_segment_size;
617  	unsigned long dma_boundary;
618  	unsigned long virt_boundary_mask;
619  	/*
620  	 * In scsi-mq mode, the number of hardware queues supported by the LLD.
621  	 *
622  	 * Note: it is assumed that each hardware queue has a queue depth of
623  	 * can_queue. In other words, the total queue depth per host
624  	 * is nr_hw_queues * can_queue. However, for when host_tagset is set,
625  	 * the total queue depth is can_queue.
626  	 */
627  	unsigned nr_hw_queues;
628  	unsigned nr_maps;
629  	unsigned active_mode:2;
630  
631  	/*
632  	 * Host has requested that no further requests come through for the
633  	 * time being.
634  	 */
635  	unsigned host_self_blocked:1;
636  
637  	/*
638  	 * Host uses correct SCSI ordering not PC ordering. The bit is
639  	 * set for the minority of drivers whose authors actually read
640  	 * the spec ;).
641  	 */
642  	unsigned reverse_ordering:1;
643  
644  	/* Task mgmt function in progress */
645  	unsigned tmf_in_progress:1;
646  
647  	/* Asynchronous scan in progress */
648  	unsigned async_scan:1;
649  
650  	/* Don't resume host in EH */
651  	unsigned eh_noresume:1;
652  
653  	/* The controller does not support WRITE SAME */
654  	unsigned no_write_same:1;
655  
656  	/* True if the host uses host-wide tagspace */
657  	unsigned host_tagset:1;
658  
659  	/* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
660  	unsigned queuecommand_may_block:1;
661  
662  	/* Host responded with short (<36 bytes) INQUIRY result */
663  	unsigned short_inquiry:1;
664  
665  	/* The transport requires the LUN bits NOT to be stored in CDB[1] */
666  	unsigned no_scsi2_lun_in_cdb:1;
667  
668  	/*
669  	 * Optional work queue to be utilized by the transport
670  	 */
671  	char work_q_name[20];
672  	struct workqueue_struct *work_q;
673  
674  	/*
675  	 * Task management function work queue
676  	 */
677  	struct workqueue_struct *tmf_work_q;
678  
679  	/*
680  	 * Value host_blocked counts down from
681  	 */
682  	unsigned int max_host_blocked;
683  
684  	/* Protection Information */
685  	unsigned int prot_capabilities;
686  	unsigned char prot_guard_type;
687  
688  	/* legacy crap */
689  	unsigned long base;
690  	unsigned long io_port;
691  	unsigned char n_io_port;
692  	unsigned char dma_channel;
693  	unsigned int  irq;
694  
695  
696  	enum scsi_host_state shost_state;
697  
698  	/* ldm bits */
699  	struct device		shost_gendev, shost_dev;
700  
701  	/*
702  	 * Points to the transport data (if any) which is allocated
703  	 * separately
704  	 */
705  	void *shost_data;
706  
707  	/*
708  	 * Points to the physical bus device we'd use to do DMA
709  	 * Needed just in case we have virtual hosts.
710  	 */
711  	struct device *dma_dev;
712  
713  	/*
714  	 * We should ensure that this is aligned, both for better performance
715  	 * and also because some compilers (m68k) don't automatically force
716  	 * alignment to a long boundary.
717  	 */
718  	unsigned long hostdata[]  /* Used for storage of host specific stuff */
719  		__attribute__ ((aligned (sizeof(unsigned long))));
720  };
721  
722  #define		class_to_shost(d)	\
723  	container_of(d, struct Scsi_Host, shost_dev)
724  
725  #define shost_printk(prefix, shost, fmt, a...)	\
726  	dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
727  
shost_priv(struct Scsi_Host * shost)728  static inline void *shost_priv(struct Scsi_Host *shost)
729  {
730  	return (void *)shost->hostdata;
731  }
732  
733  int scsi_is_host_device(const struct device *);
734  
dev_to_shost(struct device * dev)735  static inline struct Scsi_Host *dev_to_shost(struct device *dev)
736  {
737  	while (!scsi_is_host_device(dev)) {
738  		if (!dev->parent)
739  			return NULL;
740  		dev = dev->parent;
741  	}
742  	return container_of(dev, struct Scsi_Host, shost_gendev);
743  }
744  
scsi_host_in_recovery(struct Scsi_Host * shost)745  static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
746  {
747  	return shost->shost_state == SHOST_RECOVERY ||
748  		shost->shost_state == SHOST_CANCEL_RECOVERY ||
749  		shost->shost_state == SHOST_DEL_RECOVERY ||
750  		shost->tmf_in_progress;
751  }
752  
753  extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
754  extern void scsi_flush_work(struct Scsi_Host *);
755  
756  extern struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *, int);
757  extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
758  					       struct device *,
759  					       struct device *);
760  #if defined(CONFIG_SCSI_PROC_FS)
761  struct proc_dir_entry *
762  scsi_template_proc_dir(const struct scsi_host_template *sht);
763  #else
764  #define scsi_template_proc_dir(sht) NULL
765  #endif
766  extern void scsi_scan_host(struct Scsi_Host *);
767  extern int scsi_resume_device(struct scsi_device *sdev);
768  extern int scsi_rescan_device(struct scsi_device *sdev);
769  extern void scsi_remove_host(struct Scsi_Host *);
770  extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
771  extern int scsi_host_busy(struct Scsi_Host *shost);
772  extern void scsi_host_put(struct Scsi_Host *t);
773  extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
774  extern const char *scsi_host_state_name(enum scsi_host_state);
775  extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
776  					    enum scsi_host_status status);
777  
scsi_add_host(struct Scsi_Host * host,struct device * dev)778  static inline int __must_check scsi_add_host(struct Scsi_Host *host,
779  					     struct device *dev)
780  {
781  	return scsi_add_host_with_dma(host, dev, dev);
782  }
783  
scsi_get_device(struct Scsi_Host * shost)784  static inline struct device *scsi_get_device(struct Scsi_Host *shost)
785  {
786          return shost->shost_gendev.parent;
787  }
788  
789  /**
790   * scsi_host_scan_allowed - Is scanning of this host allowed
791   * @shost:	Pointer to Scsi_Host.
792   **/
scsi_host_scan_allowed(struct Scsi_Host * shost)793  static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
794  {
795  	return shost->shost_state == SHOST_RUNNING ||
796  	       shost->shost_state == SHOST_RECOVERY;
797  }
798  
799  extern void scsi_unblock_requests(struct Scsi_Host *);
800  extern void scsi_block_requests(struct Scsi_Host *);
801  extern int scsi_host_block(struct Scsi_Host *shost);
802  extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
803  
804  void scsi_host_busy_iter(struct Scsi_Host *,
805  			 bool (*fn)(struct scsi_cmnd *, void *), void *priv);
806  
807  struct class_container;
808  
809  /*
810   * DIF defines the exchange of protection information between
811   * initiator and SBC block device.
812   *
813   * DIX defines the exchange of protection information between OS and
814   * initiator.
815   */
816  enum scsi_host_prot_capabilities {
817  	SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
818  	SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
819  	SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
820  
821  	SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
822  	SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
823  	SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
824  	SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
825  };
826  
827  /*
828   * SCSI hosts which support the Data Integrity Extensions must
829   * indicate their capabilities by setting the prot_capabilities using
830   * this call.
831   */
scsi_host_set_prot(struct Scsi_Host * shost,unsigned int mask)832  static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
833  {
834  	shost->prot_capabilities = mask;
835  }
836  
scsi_host_get_prot(struct Scsi_Host * shost)837  static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
838  {
839  	return shost->prot_capabilities;
840  }
841  
scsi_host_prot_dma(struct Scsi_Host * shost)842  static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
843  {
844  	return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
845  }
846  
scsi_host_dif_capable(struct Scsi_Host * shost,unsigned int target_type)847  static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
848  {
849  	static unsigned char cap[] = { 0,
850  				       SHOST_DIF_TYPE1_PROTECTION,
851  				       SHOST_DIF_TYPE2_PROTECTION,
852  				       SHOST_DIF_TYPE3_PROTECTION };
853  
854  	if (target_type >= ARRAY_SIZE(cap))
855  		return 0;
856  
857  	return shost->prot_capabilities & cap[target_type] ? target_type : 0;
858  }
859  
scsi_host_dix_capable(struct Scsi_Host * shost,unsigned int target_type)860  static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
861  {
862  #if defined(CONFIG_BLK_DEV_INTEGRITY)
863  	static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
864  				       SHOST_DIX_TYPE1_PROTECTION,
865  				       SHOST_DIX_TYPE2_PROTECTION,
866  				       SHOST_DIX_TYPE3_PROTECTION };
867  
868  	if (target_type >= ARRAY_SIZE(cap))
869  		return 0;
870  
871  	return shost->prot_capabilities & cap[target_type];
872  #endif
873  	return 0;
874  }
875  
876  /*
877   * All DIX-capable initiators must support the T10-mandated CRC
878   * checksum.  Controllers can optionally implement the IP checksum
879   * scheme which has much lower impact on system performance.  Note
880   * that the main rationale for the checksum is to match integrity
881   * metadata with data.  Detecting bit errors are a job for ECC memory
882   * and buses.
883   */
884  
885  enum scsi_host_guard_type {
886  	SHOST_DIX_GUARD_CRC = 1 << 0,
887  	SHOST_DIX_GUARD_IP  = 1 << 1,
888  };
889  
scsi_host_set_guard(struct Scsi_Host * shost,unsigned char type)890  static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
891  {
892  	shost->prot_guard_type = type;
893  }
894  
scsi_host_get_guard(struct Scsi_Host * shost)895  static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
896  {
897  	return shost->prot_guard_type;
898  }
899  
900  extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
901  
902  #endif /* _SCSI_SCSI_HOST_H */
903