xref: /openbmc/linux/include/scsi/scsi_host.h (revision 36bccb11)
1 #ifndef _SCSI_SCSI_HOST_H
2 #define _SCSI_SCSI_HOST_H
3 
4 #include <linux/device.h>
5 #include <linux/list.h>
6 #include <linux/types.h>
7 #include <linux/workqueue.h>
8 #include <linux/mutex.h>
9 #include <linux/seq_file.h>
10 #include <scsi/scsi.h>
11 
12 struct request_queue;
13 struct block_device;
14 struct completion;
15 struct module;
16 struct scsi_cmnd;
17 struct scsi_device;
18 struct scsi_host_cmd_pool;
19 struct scsi_target;
20 struct Scsi_Host;
21 struct scsi_host_cmd_pool;
22 struct scsi_transport_template;
23 struct blk_queue_tags;
24 
25 
26 /*
27  * The various choices mean:
28  * NONE: Self evident.	Host adapter is not capable of scatter-gather.
29  * ALL:	 Means that the host adapter module can do scatter-gather,
30  *	 and that there is no limit to the size of the table to which
31  *	 we scatter/gather data.  The value we set here is the maximum
32  *	 single element sglist.  To use chained sglists, the adapter
33  *	 has to set a value beyond ALL (and correctly use the chain
34  *	 handling API.
35  * Anything else:  Indicates the maximum number of chains that can be
36  *	 used in one scatter-gather request.
37  */
38 #define SG_NONE 0
39 #define SG_ALL	SCSI_MAX_SG_SEGMENTS
40 
41 #define MODE_UNKNOWN 0x00
42 #define MODE_INITIATOR 0x01
43 #define MODE_TARGET 0x02
44 
45 #define DISABLE_CLUSTERING 0
46 #define ENABLE_CLUSTERING 1
47 
48 enum {
49 	SCSI_QDEPTH_DEFAULT,	/* default requested change, e.g. from sysfs */
50 	SCSI_QDEPTH_QFULL,	/* scsi-ml requested due to queue full */
51 	SCSI_QDEPTH_RAMP_UP,	/* scsi-ml requested due to threshold event */
52 };
53 
54 struct scsi_host_template {
55 	struct module *module;
56 	const char *name;
57 
58 	/*
59 	 * Used to initialize old-style drivers.  For new-style drivers
60 	 * just perform all work in your module initialization function.
61 	 *
62 	 * Status:  OBSOLETE
63 	 */
64 	int (* detect)(struct scsi_host_template *);
65 
66 	/*
67 	 * Used as unload callback for hosts with old-style drivers.
68 	 *
69 	 * Status: OBSOLETE
70 	 */
71 	int (* release)(struct Scsi_Host *);
72 
73 	/*
74 	 * The info function will return whatever useful information the
75 	 * developer sees fit.  If not provided, then the name field will
76 	 * be used instead.
77 	 *
78 	 * Status: OPTIONAL
79 	 */
80 	const char *(* info)(struct Scsi_Host *);
81 
82 	/*
83 	 * Ioctl interface
84 	 *
85 	 * Status: OPTIONAL
86 	 */
87 	int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
88 
89 
90 #ifdef CONFIG_COMPAT
91 	/*
92 	 * Compat handler. Handle 32bit ABI.
93 	 * When unknown ioctl is passed return -ENOIOCTLCMD.
94 	 *
95 	 * Status: OPTIONAL
96 	 */
97 	int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
98 #endif
99 
100 	/*
101 	 * The queuecommand function is used to queue up a scsi
102 	 * command block to the LLDD.  When the driver finished
103 	 * processing the command the done callback is invoked.
104 	 *
105 	 * If queuecommand returns 0, then the HBA has accepted the
106 	 * command.  The done() function must be called on the command
107 	 * when the driver has finished with it. (you may call done on the
108 	 * command before queuecommand returns, but in this case you
109 	 * *must* return 0 from queuecommand).
110 	 *
111 	 * Queuecommand may also reject the command, in which case it may
112 	 * not touch the command and must not call done() for it.
113 	 *
114 	 * There are two possible rejection returns:
115 	 *
116 	 *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
117 	 *   allow commands to other devices serviced by this host.
118 	 *
119 	 *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
120 	 *   host temporarily.
121 	 *
122          * For compatibility, any other non-zero return is treated the
123          * same as SCSI_MLQUEUE_HOST_BUSY.
124 	 *
125 	 * NOTE: "temporarily" means either until the next command for#
126 	 * this device/host completes, or a period of time determined by
127 	 * I/O pressure in the system if there are no other outstanding
128 	 * commands.
129 	 *
130 	 * STATUS: REQUIRED
131 	 */
132 	int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
133 
134 	/*
135 	 * The transfer functions are used to queue a scsi command to
136 	 * the LLD. When the driver is finished processing the command
137 	 * the done callback is invoked.
138 	 *
139 	 * This is called to inform the LLD to transfer
140 	 * scsi_bufflen(cmd) bytes. scsi_sg_count(cmd) speciefies the
141 	 * number of scatterlist entried in the command and
142 	 * scsi_sglist(cmd) returns the scatterlist.
143 	 *
144 	 * return values: see queuecommand
145 	 *
146 	 * If the LLD accepts the cmd, it should set the result to an
147 	 * appropriate value when completed before calling the done function.
148 	 *
149 	 * STATUS: REQUIRED FOR TARGET DRIVERS
150 	 */
151 	/* TODO: rename */
152 	int (* transfer_response)(struct scsi_cmnd *,
153 				  void (*done)(struct scsi_cmnd *));
154 
155 	/*
156 	 * This is an error handling strategy routine.  You don't need to
157 	 * define one of these if you don't want to - there is a default
158 	 * routine that is present that should work in most cases.  For those
159 	 * driver authors that have the inclination and ability to write their
160 	 * own strategy routine, this is where it is specified.  Note - the
161 	 * strategy routine is *ALWAYS* run in the context of the kernel eh
162 	 * thread.  Thus you are guaranteed to *NOT* be in an interrupt
163 	 * handler when you execute this, and you are also guaranteed to
164 	 * *NOT* have any other commands being queued while you are in the
165 	 * strategy routine. When you return from this function, operations
166 	 * return to normal.
167 	 *
168 	 * See scsi_error.c scsi_unjam_host for additional comments about
169 	 * what this function should and should not be attempting to do.
170 	 *
171 	 * Status: REQUIRED	(at least one of them)
172 	 */
173 	int (* eh_abort_handler)(struct scsi_cmnd *);
174 	int (* eh_device_reset_handler)(struct scsi_cmnd *);
175 	int (* eh_target_reset_handler)(struct scsi_cmnd *);
176 	int (* eh_bus_reset_handler)(struct scsi_cmnd *);
177 	int (* eh_host_reset_handler)(struct scsi_cmnd *);
178 
179 	/*
180 	 * Before the mid layer attempts to scan for a new device where none
181 	 * currently exists, it will call this entry in your driver.  Should
182 	 * your driver need to allocate any structs or perform any other init
183 	 * items in order to send commands to a currently unused target/lun
184 	 * combo, then this is where you can perform those allocations.  This
185 	 * is specifically so that drivers won't have to perform any kind of
186 	 * "is this a new device" checks in their queuecommand routine,
187 	 * thereby making the hot path a bit quicker.
188 	 *
189 	 * Return values: 0 on success, non-0 on failure
190 	 *
191 	 * Deallocation:  If we didn't find any devices at this ID, you will
192 	 * get an immediate call to slave_destroy().  If we find something
193 	 * here then you will get a call to slave_configure(), then the
194 	 * device will be used for however long it is kept around, then when
195 	 * the device is removed from the system (or * possibly at reboot
196 	 * time), you will then get a call to slave_destroy().  This is
197 	 * assuming you implement slave_configure and slave_destroy.
198 	 * However, if you allocate memory and hang it off the device struct,
199 	 * then you must implement the slave_destroy() routine at a minimum
200 	 * in order to avoid leaking memory
201 	 * each time a device is tore down.
202 	 *
203 	 * Status: OPTIONAL
204 	 */
205 	int (* slave_alloc)(struct scsi_device *);
206 
207 	/*
208 	 * Once the device has responded to an INQUIRY and we know the
209 	 * device is online, we call into the low level driver with the
210 	 * struct scsi_device *.  If the low level device driver implements
211 	 * this function, it *must* perform the task of setting the queue
212 	 * depth on the device.  All other tasks are optional and depend
213 	 * on what the driver supports and various implementation details.
214 	 *
215 	 * Things currently recommended to be handled at this time include:
216 	 *
217 	 * 1.  Setting the device queue depth.  Proper setting of this is
218 	 *     described in the comments for scsi_adjust_queue_depth.
219 	 * 2.  Determining if the device supports the various synchronous
220 	 *     negotiation protocols.  The device struct will already have
221 	 *     responded to INQUIRY and the results of the standard items
222 	 *     will have been shoved into the various device flag bits, eg.
223 	 *     device->sdtr will be true if the device supports SDTR messages.
224 	 * 3.  Allocating command structs that the device will need.
225 	 * 4.  Setting the default timeout on this device (if needed).
226 	 * 5.  Anything else the low level driver might want to do on a device
227 	 *     specific setup basis...
228 	 * 6.  Return 0 on success, non-0 on error.  The device will be marked
229 	 *     as offline on error so that no access will occur.  If you return
230 	 *     non-0, your slave_destroy routine will never get called for this
231 	 *     device, so don't leave any loose memory hanging around, clean
232 	 *     up after yourself before returning non-0
233 	 *
234 	 * Status: OPTIONAL
235 	 */
236 	int (* slave_configure)(struct scsi_device *);
237 
238 	/*
239 	 * Immediately prior to deallocating the device and after all activity
240 	 * has ceased the mid layer calls this point so that the low level
241 	 * driver may completely detach itself from the scsi device and vice
242 	 * versa.  The low level driver is responsible for freeing any memory
243 	 * it allocated in the slave_alloc or slave_configure calls.
244 	 *
245 	 * Status: OPTIONAL
246 	 */
247 	void (* slave_destroy)(struct scsi_device *);
248 
249 	/*
250 	 * Before the mid layer attempts to scan for a new device attached
251 	 * to a target where no target currently exists, it will call this
252 	 * entry in your driver.  Should your driver need to allocate any
253 	 * structs or perform any other init items in order to send commands
254 	 * to a currently unused target, then this is where you can perform
255 	 * those allocations.
256 	 *
257 	 * Return values: 0 on success, non-0 on failure
258 	 *
259 	 * Status: OPTIONAL
260 	 */
261 	int (* target_alloc)(struct scsi_target *);
262 
263 	/*
264 	 * Immediately prior to deallocating the target structure, and
265 	 * after all activity to attached scsi devices has ceased, the
266 	 * midlayer calls this point so that the driver may deallocate
267 	 * and terminate any references to the target.
268 	 *
269 	 * Status: OPTIONAL
270 	 */
271 	void (* target_destroy)(struct scsi_target *);
272 
273 	/*
274 	 * If a host has the ability to discover targets on its own instead
275 	 * of scanning the entire bus, it can fill in this function and
276 	 * call scsi_scan_host().  This function will be called periodically
277 	 * until it returns 1 with the scsi_host and the elapsed time of
278 	 * the scan in jiffies.
279 	 *
280 	 * Status: OPTIONAL
281 	 */
282 	int (* scan_finished)(struct Scsi_Host *, unsigned long);
283 
284 	/*
285 	 * If the host wants to be called before the scan starts, but
286 	 * after the midlayer has set up ready for the scan, it can fill
287 	 * in this function.
288 	 *
289 	 * Status: OPTIONAL
290 	 */
291 	void (* scan_start)(struct Scsi_Host *);
292 
293 	/*
294 	 * Fill in this function to allow the queue depth of this host
295 	 * to be changeable (on a per device basis).  Returns either
296 	 * the current queue depth setting (may be different from what
297 	 * was passed in) or an error.  An error should only be
298 	 * returned if the requested depth is legal but the driver was
299 	 * unable to set it.  If the requested depth is illegal, the
300 	 * driver should set and return the closest legal queue depth.
301 	 *
302 	 * Status: OPTIONAL
303 	 */
304 	int (* change_queue_depth)(struct scsi_device *, int, int);
305 
306 	/*
307 	 * Fill in this function to allow the changing of tag types
308 	 * (this also allows the enabling/disabling of tag command
309 	 * queueing).  An error should only be returned if something
310 	 * went wrong in the driver while trying to set the tag type.
311 	 * If the driver doesn't support the requested tag type, then
312 	 * it should set the closest type it does support without
313 	 * returning an error.  Returns the actual tag type set.
314 	 *
315 	 * Status: OPTIONAL
316 	 */
317 	int (* change_queue_type)(struct scsi_device *, int);
318 
319 	/*
320 	 * This function determines the BIOS parameters for a given
321 	 * harddisk.  These tend to be numbers that are made up by
322 	 * the host adapter.  Parameters:
323 	 * size, device, list (heads, sectors, cylinders)
324 	 *
325 	 * Status: OPTIONAL
326 	 */
327 	int (* bios_param)(struct scsi_device *, struct block_device *,
328 			sector_t, int []);
329 
330 	/*
331 	 * This function is called when one or more partitions on the
332 	 * device reach beyond the end of the device.
333 	 *
334 	 * Status: OPTIONAL
335 	 */
336 	void (*unlock_native_capacity)(struct scsi_device *);
337 
338 	/*
339 	 * Can be used to export driver statistics and other infos to the
340 	 * world outside the kernel ie. userspace and it also provides an
341 	 * interface to feed the driver with information.
342 	 *
343 	 * Status: OBSOLETE
344 	 */
345 	int (*show_info)(struct seq_file *, struct Scsi_Host *);
346 	int (*write_info)(struct Scsi_Host *, char *, int);
347 
348 	/*
349 	 * This is an optional routine that allows the transport to become
350 	 * involved when a scsi io timer fires. The return value tells the
351 	 * timer routine how to finish the io timeout handling:
352 	 * EH_HANDLED:		I fixed the error, please complete the command
353 	 * EH_RESET_TIMER:	I need more time, reset the timer and
354 	 *			begin counting again
355 	 * EH_NOT_HANDLED	Begin normal error recovery
356 	 *
357 	 * Status: OPTIONAL
358 	 */
359 	enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
360 
361 	/* This is an optional routine that allows transport to initiate
362 	 * LLD adapter or firmware reset using sysfs attribute.
363 	 *
364 	 * Return values: 0 on success, -ve value on failure.
365 	 *
366 	 * Status: OPTIONAL
367 	 */
368 
369 	int (*host_reset)(struct Scsi_Host *shost, int reset_type);
370 #define SCSI_ADAPTER_RESET	1
371 #define SCSI_FIRMWARE_RESET	2
372 
373 
374 	/*
375 	 * Name of proc directory
376 	 */
377 	const char *proc_name;
378 
379 	/*
380 	 * Used to store the procfs directory if a driver implements the
381 	 * show_info method.
382 	 */
383 	struct proc_dir_entry *proc_dir;
384 
385 	/*
386 	 * This determines if we will use a non-interrupt driven
387 	 * or an interrupt driven scheme.  It is set to the maximum number
388 	 * of simultaneous commands a given host adapter will accept.
389 	 */
390 	int can_queue;
391 
392 	/*
393 	 * In many instances, especially where disconnect / reconnect are
394 	 * supported, our host also has an ID on the SCSI bus.  If this is
395 	 * the case, then it must be reserved.  Please set this_id to -1 if
396 	 * your setup is in single initiator mode, and the host lacks an
397 	 * ID.
398 	 */
399 	int this_id;
400 
401 	/*
402 	 * This determines the degree to which the host adapter is capable
403 	 * of scatter-gather.
404 	 */
405 	unsigned short sg_tablesize;
406 	unsigned short sg_prot_tablesize;
407 
408 	/*
409 	 * Set this if the host adapter has limitations beside segment count.
410 	 */
411 	unsigned short max_sectors;
412 
413 	/*
414 	 * DMA scatter gather segment boundary limit. A segment crossing this
415 	 * boundary will be split in two.
416 	 */
417 	unsigned long dma_boundary;
418 
419 	/*
420 	 * This specifies "machine infinity" for host templates which don't
421 	 * limit the transfer size.  Note this limit represents an absolute
422 	 * maximum, and may be over the transfer limits allowed for
423 	 * individual devices (e.g. 256 for SCSI-1).
424 	 */
425 #define SCSI_DEFAULT_MAX_SECTORS	1024
426 
427 	/*
428 	 * True if this host adapter can make good use of linked commands.
429 	 * This will allow more than one command to be queued to a given
430 	 * unit on a given host.  Set this to the maximum number of command
431 	 * blocks to be provided for each device.  Set this to 1 for one
432 	 * command block per lun, 2 for two, etc.  Do not set this to 0.
433 	 * You should make sure that the host adapter will do the right thing
434 	 * before you try setting this above 1.
435 	 */
436 	short cmd_per_lun;
437 
438 	/*
439 	 * present contains counter indicating how many boards of this
440 	 * type were found when we did the scan.
441 	 */
442 	unsigned char present;
443 
444 	/*
445 	 * This specifies the mode that a LLD supports.
446 	 */
447 	unsigned supported_mode:2;
448 
449 	/*
450 	 * True if this host adapter uses unchecked DMA onto an ISA bus.
451 	 */
452 	unsigned unchecked_isa_dma:1;
453 
454 	/*
455 	 * True if this host adapter can make good use of clustering.
456 	 * I originally thought that if the tablesize was large that it
457 	 * was a waste of CPU cycles to prepare a cluster list, but
458 	 * it works out that the Buslogic is faster if you use a smaller
459 	 * number of segments (i.e. use clustering).  I guess it is
460 	 * inefficient.
461 	 */
462 	unsigned use_clustering:1;
463 
464 	/*
465 	 * True for emulated SCSI host adapters (e.g. ATAPI).
466 	 */
467 	unsigned emulated:1;
468 
469 	/*
470 	 * True if the low-level driver performs its own reset-settle delays.
471 	 */
472 	unsigned skip_settle_delay:1;
473 
474 	/*
475 	 * True if we are using ordered write support.
476 	 */
477 	unsigned ordered_tag:1;
478 
479 	/* True if the controller does not support WRITE SAME */
480 	unsigned no_write_same:1;
481 
482 	/*
483 	 * True if asynchronous aborts are not supported
484 	 */
485 	unsigned no_async_abort:1;
486 
487 	/*
488 	 * Countdown for host blocking with no commands outstanding.
489 	 */
490 	unsigned int max_host_blocked;
491 
492 	/*
493 	 * Default value for the blocking.  If the queue is empty,
494 	 * host_blocked counts down in the request_fn until it restarts
495 	 * host operations as zero is reached.
496 	 *
497 	 * FIXME: This should probably be a value in the template
498 	 */
499 #define SCSI_DEFAULT_HOST_BLOCKED	7
500 
501 	/*
502 	 * Pointer to the sysfs class properties for this host, NULL terminated.
503 	 */
504 	struct device_attribute **shost_attrs;
505 
506 	/*
507 	 * Pointer to the SCSI device properties for this host, NULL terminated.
508 	 */
509 	struct device_attribute **sdev_attrs;
510 
511 	/*
512 	 * List of hosts per template.
513 	 *
514 	 * This is only for use by scsi_module.c for legacy templates.
515 	 * For these access to it is synchronized implicitly by
516 	 * module_init/module_exit.
517 	 */
518 	struct list_head legacy_hosts;
519 
520 	/*
521 	 * Vendor Identifier associated with the host
522 	 *
523 	 * Note: When specifying vendor_id, be sure to read the
524 	 *   Vendor Type and ID formatting requirements specified in
525 	 *   scsi_netlink.h
526 	 */
527 	u64 vendor_id;
528 
529 	/*
530 	 * Additional per-command data allocated for the driver.
531 	 */
532 	unsigned int cmd_size;
533 	struct scsi_host_cmd_pool *cmd_pool;
534 };
535 
536 /*
537  * Temporary #define for host lock push down. Can be removed when all
538  * drivers have been updated to take advantage of unlocked
539  * queuecommand.
540  *
541  */
542 #define DEF_SCSI_QCMD(func_name) \
543 	int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)	\
544 	{								\
545 		unsigned long irq_flags;				\
546 		int rc;							\
547 		spin_lock_irqsave(shost->host_lock, irq_flags);		\
548 		scsi_cmd_get_serial(shost, cmd);			\
549 		rc = func_name##_lck (cmd, cmd->scsi_done);			\
550 		spin_unlock_irqrestore(shost->host_lock, irq_flags);	\
551 		return rc;						\
552 	}
553 
554 
555 /*
556  * shost state: If you alter this, you also need to alter scsi_sysfs.c
557  * (for the ascii descriptions) and the state model enforcer:
558  * scsi_host_set_state()
559  */
560 enum scsi_host_state {
561 	SHOST_CREATED = 1,
562 	SHOST_RUNNING,
563 	SHOST_CANCEL,
564 	SHOST_DEL,
565 	SHOST_RECOVERY,
566 	SHOST_CANCEL_RECOVERY,
567 	SHOST_DEL_RECOVERY,
568 };
569 
570 struct Scsi_Host {
571 	/*
572 	 * __devices is protected by the host_lock, but you should
573 	 * usually use scsi_device_lookup / shost_for_each_device
574 	 * to access it and don't care about locking yourself.
575 	 * In the rare case of beeing in irq context you can use
576 	 * their __ prefixed variants with the lock held. NEVER
577 	 * access this list directly from a driver.
578 	 */
579 	struct list_head	__devices;
580 	struct list_head	__targets;
581 
582 	struct scsi_host_cmd_pool *cmd_pool;
583 	spinlock_t		free_list_lock;
584 	struct list_head	free_list; /* backup store of cmd structs */
585 	struct list_head	starved_list;
586 
587 	spinlock_t		default_lock;
588 	spinlock_t		*host_lock;
589 
590 	struct mutex		scan_mutex;/* serialize scanning activity */
591 
592 	struct list_head	eh_cmd_q;
593 	struct task_struct    * ehandler;  /* Error recovery thread. */
594 	struct completion     * eh_action; /* Wait for specific actions on the
595 					      host. */
596 	wait_queue_head_t       host_wait;
597 	struct scsi_host_template *hostt;
598 	struct scsi_transport_template *transportt;
599 
600 	/*
601 	 * Area to keep a shared tag map (if needed, will be
602 	 * NULL if not).
603 	 */
604 	struct blk_queue_tag	*bqt;
605 
606 	/*
607 	 * The following two fields are protected with host_lock;
608 	 * however, eh routines can safely access during eh processing
609 	 * without acquiring the lock.
610 	 */
611 	unsigned int host_busy;		   /* commands actually active on low-level */
612 	unsigned int host_failed;	   /* commands that failed. */
613 	unsigned int host_eh_scheduled;    /* EH scheduled without command */
614 
615 	unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
616 
617 	/* next two fields are used to bound the time spent in error handling */
618 	int eh_deadline;
619 	unsigned long last_reset;
620 
621 
622 	/*
623 	 * These three parameters can be used to allow for wide scsi,
624 	 * and for host adapters that support multiple busses
625 	 * The first two should be set to 1 more than the actual max id
626 	 * or lun (i.e. 8 for normal systems).
627 	 */
628 	unsigned int max_id;
629 	unsigned int max_lun;
630 	unsigned int max_channel;
631 
632 	/*
633 	 * This is a unique identifier that must be assigned so that we
634 	 * have some way of identifying each detected host adapter properly
635 	 * and uniquely.  For hosts that do not support more than one card
636 	 * in the system at one time, this does not need to be set.  It is
637 	 * initialized to 0 in scsi_register.
638 	 */
639 	unsigned int unique_id;
640 
641 	/*
642 	 * The maximum length of SCSI commands that this host can accept.
643 	 * Probably 12 for most host adapters, but could be 16 for others.
644 	 * or 260 if the driver supports variable length cdbs.
645 	 * For drivers that don't set this field, a value of 12 is
646 	 * assumed.
647 	 */
648 	unsigned short max_cmd_len;
649 
650 	int this_id;
651 	int can_queue;
652 	short cmd_per_lun;
653 	short unsigned int sg_tablesize;
654 	short unsigned int sg_prot_tablesize;
655 	short unsigned int max_sectors;
656 	unsigned long dma_boundary;
657 	/*
658 	 * Used to assign serial numbers to the cmds.
659 	 * Protected by the host lock.
660 	 */
661 	unsigned long cmd_serial_number;
662 
663 	unsigned active_mode:2;
664 	unsigned unchecked_isa_dma:1;
665 	unsigned use_clustering:1;
666 	unsigned use_blk_tcq:1;
667 
668 	/*
669 	 * Host has requested that no further requests come through for the
670 	 * time being.
671 	 */
672 	unsigned host_self_blocked:1;
673 
674 	/*
675 	 * Host uses correct SCSI ordering not PC ordering. The bit is
676 	 * set for the minority of drivers whose authors actually read
677 	 * the spec ;).
678 	 */
679 	unsigned reverse_ordering:1;
680 
681 	/*
682 	 * Ordered write support
683 	 */
684 	unsigned ordered_tag:1;
685 
686 	/* Task mgmt function in progress */
687 	unsigned tmf_in_progress:1;
688 
689 	/* Asynchronous scan in progress */
690 	unsigned async_scan:1;
691 
692 	/* Don't resume host in EH */
693 	unsigned eh_noresume:1;
694 
695 	/* The controller does not support WRITE SAME */
696 	unsigned no_write_same:1;
697 
698 	/*
699 	 * Optional work queue to be utilized by the transport
700 	 */
701 	char work_q_name[20];
702 	struct workqueue_struct *work_q;
703 
704 	/*
705 	 * Task management function work queue
706 	 */
707 	struct workqueue_struct *tmf_work_q;
708 
709 	/*
710 	 * Host has rejected a command because it was busy.
711 	 */
712 	unsigned int host_blocked;
713 
714 	/*
715 	 * Value host_blocked counts down from
716 	 */
717 	unsigned int max_host_blocked;
718 
719 	/* Protection Information */
720 	unsigned int prot_capabilities;
721 	unsigned char prot_guard_type;
722 
723 	/*
724 	 * q used for scsi_tgt msgs, async events or any other requests that
725 	 * need to be processed in userspace
726 	 */
727 	struct request_queue *uspace_req_q;
728 
729 	/* legacy crap */
730 	unsigned long base;
731 	unsigned long io_port;
732 	unsigned char n_io_port;
733 	unsigned char dma_channel;
734 	unsigned int  irq;
735 
736 
737 	enum scsi_host_state shost_state;
738 
739 	/* ldm bits */
740 	struct device		shost_gendev, shost_dev;
741 
742 	/*
743 	 * List of hosts per template.
744 	 *
745 	 * This is only for use by scsi_module.c for legacy templates.
746 	 * For these access to it is synchronized implicitly by
747 	 * module_init/module_exit.
748 	 */
749 	struct list_head sht_legacy_list;
750 
751 	/*
752 	 * Points to the transport data (if any) which is allocated
753 	 * separately
754 	 */
755 	void *shost_data;
756 
757 	/*
758 	 * Points to the physical bus device we'd use to do DMA
759 	 * Needed just in case we have virtual hosts.
760 	 */
761 	struct device *dma_dev;
762 
763 	/*
764 	 * We should ensure that this is aligned, both for better performance
765 	 * and also because some compilers (m68k) don't automatically force
766 	 * alignment to a long boundary.
767 	 */
768 	unsigned long hostdata[0]  /* Used for storage of host specific stuff */
769 		__attribute__ ((aligned (sizeof(unsigned long))));
770 };
771 
772 #define		class_to_shost(d)	\
773 	container_of(d, struct Scsi_Host, shost_dev)
774 
775 #define shost_printk(prefix, shost, fmt, a...)	\
776 	dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
777 
778 static inline void *shost_priv(struct Scsi_Host *shost)
779 {
780 	return (void *)shost->hostdata;
781 }
782 
783 int scsi_is_host_device(const struct device *);
784 
785 static inline struct Scsi_Host *dev_to_shost(struct device *dev)
786 {
787 	while (!scsi_is_host_device(dev)) {
788 		if (!dev->parent)
789 			return NULL;
790 		dev = dev->parent;
791 	}
792 	return container_of(dev, struct Scsi_Host, shost_gendev);
793 }
794 
795 static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
796 {
797 	return shost->shost_state == SHOST_RECOVERY ||
798 		shost->shost_state == SHOST_CANCEL_RECOVERY ||
799 		shost->shost_state == SHOST_DEL_RECOVERY ||
800 		shost->tmf_in_progress;
801 }
802 
803 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
804 extern void scsi_flush_work(struct Scsi_Host *);
805 
806 extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
807 extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
808 					       struct device *,
809 					       struct device *);
810 extern void scsi_scan_host(struct Scsi_Host *);
811 extern void scsi_rescan_device(struct device *);
812 extern void scsi_remove_host(struct Scsi_Host *);
813 extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
814 extern void scsi_host_put(struct Scsi_Host *t);
815 extern struct Scsi_Host *scsi_host_lookup(unsigned short);
816 extern const char *scsi_host_state_name(enum scsi_host_state);
817 extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *);
818 
819 extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
820 
821 static inline int __must_check scsi_add_host(struct Scsi_Host *host,
822 					     struct device *dev)
823 {
824 	return scsi_add_host_with_dma(host, dev, dev);
825 }
826 
827 static inline struct device *scsi_get_device(struct Scsi_Host *shost)
828 {
829         return shost->shost_gendev.parent;
830 }
831 
832 /**
833  * scsi_host_scan_allowed - Is scanning of this host allowed
834  * @shost:	Pointer to Scsi_Host.
835  **/
836 static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
837 {
838 	return shost->shost_state == SHOST_RUNNING ||
839 	       shost->shost_state == SHOST_RECOVERY;
840 }
841 
842 extern void scsi_unblock_requests(struct Scsi_Host *);
843 extern void scsi_block_requests(struct Scsi_Host *);
844 
845 struct class_container;
846 
847 extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
848 						void (*) (struct request_queue *));
849 /*
850  * These two functions are used to allocate and free a pseudo device
851  * which will connect to the host adapter itself rather than any
852  * physical device.  You must deallocate when you are done with the
853  * thing.  This physical pseudo-device isn't real and won't be available
854  * from any high-level drivers.
855  */
856 extern void scsi_free_host_dev(struct scsi_device *);
857 extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
858 
859 /*
860  * DIF defines the exchange of protection information between
861  * initiator and SBC block device.
862  *
863  * DIX defines the exchange of protection information between OS and
864  * initiator.
865  */
866 enum scsi_host_prot_capabilities {
867 	SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
868 	SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
869 	SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
870 
871 	SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
872 	SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
873 	SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
874 	SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
875 };
876 
877 /*
878  * SCSI hosts which support the Data Integrity Extensions must
879  * indicate their capabilities by setting the prot_capabilities using
880  * this call.
881  */
882 static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
883 {
884 	shost->prot_capabilities = mask;
885 }
886 
887 static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
888 {
889 	return shost->prot_capabilities;
890 }
891 
892 static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
893 {
894 	return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
895 }
896 
897 static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
898 {
899 	static unsigned char cap[] = { 0,
900 				       SHOST_DIF_TYPE1_PROTECTION,
901 				       SHOST_DIF_TYPE2_PROTECTION,
902 				       SHOST_DIF_TYPE3_PROTECTION };
903 
904 	if (target_type >= ARRAY_SIZE(cap))
905 		return 0;
906 
907 	return shost->prot_capabilities & cap[target_type] ? target_type : 0;
908 }
909 
910 static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
911 {
912 #if defined(CONFIG_BLK_DEV_INTEGRITY)
913 	static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
914 				       SHOST_DIX_TYPE1_PROTECTION,
915 				       SHOST_DIX_TYPE2_PROTECTION,
916 				       SHOST_DIX_TYPE3_PROTECTION };
917 
918 	if (target_type >= ARRAY_SIZE(cap))
919 		return 0;
920 
921 	return shost->prot_capabilities & cap[target_type];
922 #endif
923 	return 0;
924 }
925 
926 /*
927  * All DIX-capable initiators must support the T10-mandated CRC
928  * checksum.  Controllers can optionally implement the IP checksum
929  * scheme which has much lower impact on system performance.  Note
930  * that the main rationale for the checksum is to match integrity
931  * metadata with data.  Detecting bit errors are a job for ECC memory
932  * and buses.
933  */
934 
935 enum scsi_host_guard_type {
936 	SHOST_DIX_GUARD_CRC = 1 << 0,
937 	SHOST_DIX_GUARD_IP  = 1 << 1,
938 };
939 
940 static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
941 {
942 	shost->prot_guard_type = type;
943 }
944 
945 static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
946 {
947 	return shost->prot_guard_type;
948 }
949 
950 /* legacy interfaces */
951 extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
952 extern void scsi_unregister(struct Scsi_Host *);
953 extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
954 
955 #endif /* _SCSI_SCSI_HOST_H */
956