xref: /openbmc/linux/drivers/scsi/aic7xxx/aic7xxx_osm.h (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  * Adaptec AIC7xxx device driver for Linux.
3  *
4  * Copyright (c) 1994 John Aycock
5  *   The University of Calgary Department of Computer Science.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2, or (at your option)
10  * any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; see the file COPYING.  If not, write to
19  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  * Copyright (c) 2000-2003 Adaptec Inc.
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions, and the following disclaimer,
29  *    without modification.
30  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
31  *    substantially similar to the "NO WARRANTY" disclaimer below
32  *    ("Disclaimer") and any redistribution must be conditioned upon
33  *    including a substantially similar Disclaimer requirement for further
34  *    binary redistribution.
35  * 3. Neither the names of the above-listed copyright holders nor the names
36  *    of any contributors may be used to endorse or promote products derived
37  *    from this software without specific prior written permission.
38  *
39  * Alternatively, this software may be distributed under the terms of the
40  * GNU General Public License ("GPL") version 2 as published by the Free
41  * Software Foundation.
42  *
43  * NO WARRANTY
44  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
45  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
46  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
47  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
48  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
52  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
53  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
54  * POSSIBILITY OF SUCH DAMAGES.
55  *
56  * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.h#151 $
57  *
58  */
59 #ifndef _AIC7XXX_LINUX_H_
60 #define _AIC7XXX_LINUX_H_
61 
62 #include <linux/types.h>
63 #include <linux/blkdev.h>
64 #include <linux/delay.h>
65 #include <linux/ioport.h>
66 #include <linux/pci.h>
67 #include <linux/smp_lock.h>
68 #include <linux/version.h>
69 #include <linux/module.h>
70 #include <asm/byteorder.h>
71 #include <asm/io.h>
72 
73 #include <linux/interrupt.h> /* For tasklet support. */
74 #include <linux/config.h>
75 #include <linux/slab.h>
76 
77 /* Core SCSI definitions */
78 #define AIC_LIB_PREFIX ahc
79 #include "scsi.h"
80 #include <scsi/scsi_host.h>
81 
82 /* Name space conflict with BSD queue macros */
83 #ifdef LIST_HEAD
84 #undef LIST_HEAD
85 #endif
86 
87 #include "cam.h"
88 #include "queue.h"
89 #include "scsi_message.h"
90 #include "aiclib.h"
91 
92 /*********************************** Debugging ********************************/
93 #ifdef CONFIG_AIC7XXX_DEBUG_ENABLE
94 #ifdef CONFIG_AIC7XXX_DEBUG_MASK
95 #define AHC_DEBUG 1
96 #define AHC_DEBUG_OPTS CONFIG_AIC7XXX_DEBUG_MASK
97 #else
98 /*
99  * Compile in debugging code, but do not enable any printfs.
100  */
101 #define AHC_DEBUG 1
102 #endif
103 /* No debugging code. */
104 #endif
105 
106 /************************* Forward Declarations *******************************/
107 struct ahc_softc;
108 typedef struct pci_dev *ahc_dev_softc_t;
109 typedef Scsi_Cmnd      *ahc_io_ctx_t;
110 
111 /******************************* Byte Order ***********************************/
112 #define ahc_htobe16(x)	cpu_to_be16(x)
113 #define ahc_htobe32(x)	cpu_to_be32(x)
114 #define ahc_htobe64(x)	cpu_to_be64(x)
115 #define ahc_htole16(x)	cpu_to_le16(x)
116 #define ahc_htole32(x)	cpu_to_le32(x)
117 #define ahc_htole64(x)	cpu_to_le64(x)
118 
119 #define ahc_be16toh(x)	be16_to_cpu(x)
120 #define ahc_be32toh(x)	be32_to_cpu(x)
121 #define ahc_be64toh(x)	be64_to_cpu(x)
122 #define ahc_le16toh(x)	le16_to_cpu(x)
123 #define ahc_le32toh(x)	le32_to_cpu(x)
124 #define ahc_le64toh(x)	le64_to_cpu(x)
125 
126 #ifndef LITTLE_ENDIAN
127 #define LITTLE_ENDIAN 1234
128 #endif
129 
130 #ifndef BIG_ENDIAN
131 #define BIG_ENDIAN 4321
132 #endif
133 
134 #ifndef BYTE_ORDER
135 #if defined(__BIG_ENDIAN)
136 #define BYTE_ORDER BIG_ENDIAN
137 #endif
138 #if defined(__LITTLE_ENDIAN)
139 #define BYTE_ORDER LITTLE_ENDIAN
140 #endif
141 #endif /* BYTE_ORDER */
142 
143 /************************* Configuration Data *********************************/
144 extern u_int aic7xxx_no_probe;
145 extern u_int aic7xxx_allow_memio;
146 extern int aic7xxx_detect_complete;
147 extern Scsi_Host_Template aic7xxx_driver_template;
148 
149 /***************************** Bus Space/DMA **********************************/
150 
151 typedef uint32_t bus_size_t;
152 
153 typedef enum {
154 	BUS_SPACE_MEMIO,
155 	BUS_SPACE_PIO
156 } bus_space_tag_t;
157 
158 typedef union {
159 	u_long		  ioport;
160 	volatile uint8_t __iomem *maddr;
161 } bus_space_handle_t;
162 
163 typedef struct bus_dma_segment
164 {
165 	dma_addr_t	ds_addr;
166 	bus_size_t	ds_len;
167 } bus_dma_segment_t;
168 
169 struct ahc_linux_dma_tag
170 {
171 	bus_size_t	alignment;
172 	bus_size_t	boundary;
173 	bus_size_t	maxsize;
174 };
175 typedef struct ahc_linux_dma_tag* bus_dma_tag_t;
176 
177 struct ahc_linux_dmamap
178 {
179 	dma_addr_t	bus_addr;
180 };
181 typedef struct ahc_linux_dmamap* bus_dmamap_t;
182 
183 typedef int bus_dma_filter_t(void*, dma_addr_t);
184 typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
185 
186 #define BUS_DMA_WAITOK		0x0
187 #define BUS_DMA_NOWAIT		0x1
188 #define BUS_DMA_ALLOCNOW	0x2
189 #define BUS_DMA_LOAD_SEGS	0x4	/*
190 					 * Argument is an S/G list not
191 					 * a single buffer.
192 					 */
193 
194 #define BUS_SPACE_MAXADDR	0xFFFFFFFF
195 #define BUS_SPACE_MAXADDR_32BIT	0xFFFFFFFF
196 #define BUS_SPACE_MAXSIZE_32BIT	0xFFFFFFFF
197 
198 int	ahc_dma_tag_create(struct ahc_softc *, bus_dma_tag_t /*parent*/,
199 			   bus_size_t /*alignment*/, bus_size_t /*boundary*/,
200 			   dma_addr_t /*lowaddr*/, dma_addr_t /*highaddr*/,
201 			   bus_dma_filter_t*/*filter*/, void */*filterarg*/,
202 			   bus_size_t /*maxsize*/, int /*nsegments*/,
203 			   bus_size_t /*maxsegsz*/, int /*flags*/,
204 			   bus_dma_tag_t */*dma_tagp*/);
205 
206 void	ahc_dma_tag_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/);
207 
208 int	ahc_dmamem_alloc(struct ahc_softc *, bus_dma_tag_t /*dmat*/,
209 			 void** /*vaddr*/, int /*flags*/,
210 			 bus_dmamap_t* /*mapp*/);
211 
212 void	ahc_dmamem_free(struct ahc_softc *, bus_dma_tag_t /*dmat*/,
213 			void* /*vaddr*/, bus_dmamap_t /*map*/);
214 
215 void	ahc_dmamap_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/,
216 			   bus_dmamap_t /*map*/);
217 
218 int	ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t /*dmat*/,
219 			bus_dmamap_t /*map*/, void * /*buf*/,
220 			bus_size_t /*buflen*/, bus_dmamap_callback_t *,
221 			void */*callback_arg*/, int /*flags*/);
222 
223 int	ahc_dmamap_unload(struct ahc_softc *, bus_dma_tag_t, bus_dmamap_t);
224 
225 /*
226  * Operations performed by ahc_dmamap_sync().
227  */
228 #define BUS_DMASYNC_PREREAD	0x01	/* pre-read synchronization */
229 #define BUS_DMASYNC_POSTREAD	0x02	/* post-read synchronization */
230 #define BUS_DMASYNC_PREWRITE	0x04	/* pre-write synchronization */
231 #define BUS_DMASYNC_POSTWRITE	0x08	/* post-write synchronization */
232 
233 /*
234  * XXX
235  * ahc_dmamap_sync is only used on buffers allocated with
236  * the pci_alloc_consistent() API.  Although I'm not sure how
237  * this works on architectures with a write buffer, Linux does
238  * not have an API to sync "coherent" memory.  Perhaps we need
239  * to do an mb()?
240  */
241 #define ahc_dmamap_sync(ahc, dma_tag, dmamap, offset, len, op)
242 
243 /************************** Timer DataStructures ******************************/
244 typedef struct timer_list ahc_timer_t;
245 
246 /********************************** Includes **********************************/
247 #ifdef CONFIG_AIC7XXX_REG_PRETTY_PRINT
248 #define AIC_DEBUG_REGISTERS 1
249 #else
250 #define AIC_DEBUG_REGISTERS 0
251 #endif
252 #include "aic7xxx.h"
253 
254 /***************************** Timer Facilities *******************************/
255 #define ahc_timer_init init_timer
256 #define ahc_timer_stop del_timer_sync
257 typedef void ahc_linux_callback_t (u_long);
258 static __inline void ahc_timer_reset(ahc_timer_t *timer, int usec,
259 				     ahc_callback_t *func, void *arg);
260 static __inline void ahc_scb_timer_reset(struct scb *scb, u_int usec);
261 
262 static __inline void
263 ahc_timer_reset(ahc_timer_t *timer, int usec, ahc_callback_t *func, void *arg)
264 {
265 	struct ahc_softc *ahc;
266 
267 	ahc = (struct ahc_softc *)arg;
268 	del_timer(timer);
269 	timer->data = (u_long)arg;
270 	timer->expires = jiffies + (usec * HZ)/1000000;
271 	timer->function = (ahc_linux_callback_t*)func;
272 	add_timer(timer);
273 }
274 
275 static __inline void
276 ahc_scb_timer_reset(struct scb *scb, u_int usec)
277 {
278 	mod_timer(&scb->io_ctx->eh_timeout, jiffies + (usec * HZ)/1000000);
279 }
280 
281 /***************************** SMP support ************************************/
282 #include <linux/spinlock.h>
283 
284 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
285 #define AHC_SCSI_HAS_HOST_LOCK 1
286 #else
287 #define AHC_SCSI_HAS_HOST_LOCK 0
288 #endif
289 
290 #define AIC7XXX_DRIVER_VERSION "6.2.36"
291 
292 /**************************** Front End Queues ********************************/
293 /*
294  * Data structure used to cast the Linux struct scsi_cmnd to something
295  * that allows us to use the queue macros.  The linux structure has
296  * plenty of space to hold the links fields as required by the queue
297  * macros, but the queue macors require them to have the correct type.
298  */
299 struct ahc_cmd_internal {
300 	/* Area owned by the Linux scsi layer. */
301 	uint8_t	private[offsetof(struct scsi_cmnd, SCp.Status)];
302 	union {
303 		STAILQ_ENTRY(ahc_cmd)	ste;
304 		LIST_ENTRY(ahc_cmd)	le;
305 		TAILQ_ENTRY(ahc_cmd)	tqe;
306 	} links;
307 	uint32_t			end;
308 };
309 
310 struct ahc_cmd {
311 	union {
312 		struct ahc_cmd_internal	icmd;
313 		struct scsi_cmnd	scsi_cmd;
314 	} un;
315 };
316 
317 #define acmd_icmd(cmd) ((cmd)->un.icmd)
318 #define acmd_scsi_cmd(cmd) ((cmd)->un.scsi_cmd)
319 #define acmd_links un.icmd.links
320 
321 /*************************** Device Data Structures ***************************/
322 /*
323  * A per probed device structure used to deal with some error recovery
324  * scenarios that the Linux mid-layer code just doesn't know how to
325  * handle.  The structure allocated for a device only becomes persistent
326  * after a successfully completed inquiry command to the target when
327  * that inquiry data indicates a lun is present.
328  */
329 TAILQ_HEAD(ahc_busyq, ahc_cmd);
330 typedef enum {
331 	AHC_DEV_UNCONFIGURED	 = 0x01,
332 	AHC_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
333 	AHC_DEV_TIMER_ACTIVE	 = 0x04, /* Our timer is active */
334 	AHC_DEV_ON_RUN_LIST	 = 0x08, /* Queued to be run later */
335 	AHC_DEV_Q_BASIC		 = 0x10, /* Allow basic device queuing */
336 	AHC_DEV_Q_TAGGED	 = 0x20, /* Allow full SCSI2 command queueing */
337 	AHC_DEV_PERIODIC_OTAG	 = 0x40, /* Send OTAG to prevent starvation */
338 	AHC_DEV_SLAVE_CONFIGURED = 0x80	 /* slave_configure() has been called */
339 } ahc_linux_dev_flags;
340 
341 struct ahc_linux_target;
342 struct ahc_linux_device {
343 	TAILQ_ENTRY(ahc_linux_device) links;
344 	struct		ahc_busyq busyq;
345 
346 	/*
347 	 * The number of transactions currently
348 	 * queued to the device.
349 	 */
350 	int			active;
351 
352 	/*
353 	 * The currently allowed number of
354 	 * transactions that can be queued to
355 	 * the device.  Must be signed for
356 	 * conversion from tagged to untagged
357 	 * mode where the device may have more
358 	 * than one outstanding active transaction.
359 	 */
360 	int			openings;
361 
362 	/*
363 	 * A positive count indicates that this
364 	 * device's queue is halted.
365 	 */
366 	u_int			qfrozen;
367 
368 	/*
369 	 * Cumulative command counter.
370 	 */
371 	u_long			commands_issued;
372 
373 	/*
374 	 * The number of tagged transactions when
375 	 * running at our current opening level
376 	 * that have been successfully received by
377 	 * this device since the last QUEUE FULL.
378 	 */
379 	u_int			tag_success_count;
380 #define AHC_TAG_SUCCESS_INTERVAL 50
381 
382 	ahc_linux_dev_flags	flags;
383 
384 	/*
385 	 * Per device timer.
386 	 */
387 	struct timer_list	timer;
388 
389 	/*
390 	 * The high limit for the tags variable.
391 	 */
392 	u_int			maxtags;
393 
394 	/*
395 	 * The computed number of tags outstanding
396 	 * at the time of the last QUEUE FULL event.
397 	 */
398 	u_int			tags_on_last_queuefull;
399 
400 	/*
401 	 * How many times we have seen a queue full
402 	 * with the same number of tags.  This is used
403 	 * to stop our adaptive queue depth algorithm
404 	 * on devices with a fixed number of tags.
405 	 */
406 	u_int			last_queuefull_same_count;
407 #define AHC_LOCK_TAGS_COUNT 50
408 
409 	/*
410 	 * How many transactions have been queued
411 	 * without the device going idle.  We use
412 	 * this statistic to determine when to issue
413 	 * an ordered tag to prevent transaction
414 	 * starvation.  This statistic is only updated
415 	 * if the AHC_DEV_PERIODIC_OTAG flag is set
416 	 * on this device.
417 	 */
418 	u_int			commands_since_idle_or_otag;
419 #define AHC_OTAG_THRESH	500
420 
421 	int			lun;
422 	Scsi_Device	       *scsi_device;
423 	struct			ahc_linux_target *target;
424 };
425 
426 typedef enum {
427 	AHC_DV_REQUIRED		 = 0x01,
428 	AHC_INQ_VALID		 = 0x02,
429 	AHC_BASIC_DV		 = 0x04,
430 	AHC_ENHANCED_DV		 = 0x08
431 } ahc_linux_targ_flags;
432 
433 /* DV States */
434 typedef enum {
435 	AHC_DV_STATE_EXIT = 0,
436 	AHC_DV_STATE_INQ_SHORT_ASYNC,
437 	AHC_DV_STATE_INQ_ASYNC,
438 	AHC_DV_STATE_INQ_ASYNC_VERIFY,
439 	AHC_DV_STATE_TUR,
440 	AHC_DV_STATE_REBD,
441 	AHC_DV_STATE_INQ_VERIFY,
442 	AHC_DV_STATE_WEB,
443 	AHC_DV_STATE_REB,
444 	AHC_DV_STATE_SU,
445 	AHC_DV_STATE_BUSY
446 } ahc_dv_state;
447 
448 struct ahc_linux_target {
449 	struct ahc_linux_device	 *devices[AHC_NUM_LUNS];
450 	int			  channel;
451 	int			  target;
452 	int			  refcount;
453 	struct ahc_transinfo	  last_tinfo;
454 	struct ahc_softc	 *ahc;
455 	ahc_linux_targ_flags	  flags;
456 	struct scsi_inquiry_data *inq_data;
457 	/*
458 	 * The next "fallback" period to use for narrow/wide transfers.
459 	 */
460 	uint8_t			  dv_next_narrow_period;
461 	uint8_t			  dv_next_wide_period;
462 	uint8_t			  dv_max_width;
463 	uint8_t			  dv_max_ppr_options;
464 	uint8_t			  dv_last_ppr_options;
465 	u_int			  dv_echo_size;
466 	ahc_dv_state		  dv_state;
467 	u_int			  dv_state_retry;
468 	char			 *dv_buffer;
469 	char			 *dv_buffer1;
470 };
471 
472 /********************* Definitions Required by the Core ***********************/
473 /*
474  * Number of SG segments we require.  So long as the S/G segments for
475  * a particular transaction are allocated in a physically contiguous
476  * manner and are allocated below 4GB, the number of S/G segments is
477  * unrestricted.
478  */
479 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
480 /*
481  * We dynamically adjust the number of segments in pre-2.5 kernels to
482  * avoid fragmentation issues in the SCSI mid-layer's private memory
483  * allocator.  See aic7xxx_osm.c ahc_linux_size_nseg() for details.
484  */
485 extern u_int ahc_linux_nseg;
486 #define	AHC_NSEG ahc_linux_nseg
487 #define	AHC_LINUX_MIN_NSEG 64
488 #else
489 #define	AHC_NSEG 128
490 #endif
491 
492 /*
493  * Per-SCB OSM storage.
494  */
495 typedef enum {
496 	AHC_UP_EH_SEMAPHORE = 0x1
497 } ahc_linux_scb_flags;
498 
499 struct scb_platform_data {
500 	struct ahc_linux_device	*dev;
501 	dma_addr_t		 buf_busaddr;
502 	uint32_t		 xfer_len;
503 	uint32_t		 sense_resid;	/* Auto-Sense residual */
504 	ahc_linux_scb_flags	 flags;
505 };
506 
507 /*
508  * Define a structure used for each host adapter.  All members are
509  * aligned on a boundary >= the size of the member to honor the
510  * alignment restrictions of the various platforms supported by
511  * this driver.
512  */
513 typedef enum {
514 	AHC_DV_WAIT_SIMQ_EMPTY	 = 0x01,
515 	AHC_DV_WAIT_SIMQ_RELEASE = 0x02,
516 	AHC_DV_ACTIVE		 = 0x04,
517 	AHC_DV_SHUTDOWN		 = 0x08,
518 	AHC_RUN_CMPLT_Q_TIMER	 = 0x10
519 } ahc_linux_softc_flags;
520 
521 TAILQ_HEAD(ahc_completeq, ahc_cmd);
522 
523 struct ahc_platform_data {
524 	/*
525 	 * Fields accessed from interrupt context.
526 	 */
527 	struct ahc_linux_target *targets[AHC_NUM_TARGETS];
528 	TAILQ_HEAD(, ahc_linux_device) device_runq;
529 	struct ahc_completeq	 completeq;
530 
531 	spinlock_t		 spin_lock;
532 	struct tasklet_struct	 runq_tasklet;
533 	u_int			 qfrozen;
534 	pid_t			 dv_pid;
535 	struct timer_list	 completeq_timer;
536 	struct timer_list	 reset_timer;
537 	struct semaphore	 eh_sem;
538 	struct semaphore	 dv_sem;
539 	struct semaphore	 dv_cmd_sem;	/* XXX This needs to be in
540 						 * the target struct
541 						 */
542 	struct scsi_device	*dv_scsi_dev;
543 	struct Scsi_Host        *host;		/* pointer to scsi host */
544 #define AHC_LINUX_NOIRQ	((uint32_t)~0)
545 	uint32_t		 irq;		/* IRQ for this adapter */
546 	uint32_t		 bios_address;
547 	uint32_t		 mem_busaddr;	/* Mem Base Addr */
548 	uint64_t		 hw_dma_mask;
549 	ahc_linux_softc_flags	 flags;
550 };
551 
552 /************************** OS Utility Wrappers *******************************/
553 #define printf printk
554 #define M_NOWAIT GFP_ATOMIC
555 #define M_WAITOK 0
556 #define malloc(size, type, flags) kmalloc(size, flags)
557 #define free(ptr, type) kfree(ptr)
558 
559 static __inline void ahc_delay(long);
560 static __inline void
561 ahc_delay(long usec)
562 {
563 	/*
564 	 * udelay on Linux can have problems for
565 	 * multi-millisecond waits.  Wait at most
566 	 * 1024us per call.
567 	 */
568 	while (usec > 0) {
569 		udelay(usec % 1024);
570 		usec -= 1024;
571 	}
572 }
573 
574 
575 /***************************** Low Level I/O **********************************/
576 static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port);
577 static __inline void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
578 static __inline void ahc_outsb(struct ahc_softc * ahc, long port,
579 			       uint8_t *, int count);
580 static __inline void ahc_insb(struct ahc_softc * ahc, long port,
581 			       uint8_t *, int count);
582 
583 static __inline uint8_t
584 ahc_inb(struct ahc_softc * ahc, long port)
585 {
586 	uint8_t x;
587 
588 	if (ahc->tag == BUS_SPACE_MEMIO) {
589 		x = readb(ahc->bsh.maddr + port);
590 	} else {
591 		x = inb(ahc->bsh.ioport + port);
592 	}
593 	mb();
594 	return (x);
595 }
596 
597 static __inline void
598 ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
599 {
600 	if (ahc->tag == BUS_SPACE_MEMIO) {
601 		writeb(val, ahc->bsh.maddr + port);
602 	} else {
603 		outb(val, ahc->bsh.ioport + port);
604 	}
605 	mb();
606 }
607 
608 static __inline void
609 ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
610 {
611 	int i;
612 
613 	/*
614 	 * There is probably a more efficient way to do this on Linux
615 	 * but we don't use this for anything speed critical and this
616 	 * should work.
617 	 */
618 	for (i = 0; i < count; i++)
619 		ahc_outb(ahc, port, *array++);
620 }
621 
622 static __inline void
623 ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
624 {
625 	int i;
626 
627 	/*
628 	 * There is probably a more efficient way to do this on Linux
629 	 * but we don't use this for anything speed critical and this
630 	 * should work.
631 	 */
632 	for (i = 0; i < count; i++)
633 		*array++ = ahc_inb(ahc, port);
634 }
635 
636 /**************************** Initialization **********************************/
637 int		ahc_linux_register_host(struct ahc_softc *,
638 					Scsi_Host_Template *);
639 
640 uint64_t	ahc_linux_get_memsize(void);
641 
642 /*************************** Pretty Printing **********************************/
643 struct info_str {
644 	char *buffer;
645 	int length;
646 	off_t offset;
647 	int pos;
648 };
649 
650 void	ahc_format_transinfo(struct info_str *info,
651 			     struct ahc_transinfo *tinfo);
652 
653 /******************************** Locking *************************************/
654 /* Lock protecting internal data structures */
655 static __inline void ahc_lockinit(struct ahc_softc *);
656 static __inline void ahc_lock(struct ahc_softc *, unsigned long *flags);
657 static __inline void ahc_unlock(struct ahc_softc *, unsigned long *flags);
658 
659 /* Lock acquisition and release of the above lock in midlayer entry points. */
660 static __inline void ahc_midlayer_entrypoint_lock(struct ahc_softc *,
661 						  unsigned long *flags);
662 static __inline void ahc_midlayer_entrypoint_unlock(struct ahc_softc *,
663 						    unsigned long *flags);
664 
665 /* Lock held during command compeletion to the upper layer */
666 static __inline void ahc_done_lockinit(struct ahc_softc *);
667 static __inline void ahc_done_lock(struct ahc_softc *, unsigned long *flags);
668 static __inline void ahc_done_unlock(struct ahc_softc *, unsigned long *flags);
669 
670 /* Lock held during ahc_list manipulation and ahc softc frees */
671 extern spinlock_t ahc_list_spinlock;
672 static __inline void ahc_list_lockinit(void);
673 static __inline void ahc_list_lock(unsigned long *flags);
674 static __inline void ahc_list_unlock(unsigned long *flags);
675 
676 static __inline void
677 ahc_lockinit(struct ahc_softc *ahc)
678 {
679 	spin_lock_init(&ahc->platform_data->spin_lock);
680 }
681 
682 static __inline void
683 ahc_lock(struct ahc_softc *ahc, unsigned long *flags)
684 {
685 	spin_lock_irqsave(&ahc->platform_data->spin_lock, *flags);
686 }
687 
688 static __inline void
689 ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
690 {
691 	spin_unlock_irqrestore(&ahc->platform_data->spin_lock, *flags);
692 }
693 
694 static __inline void
695 ahc_midlayer_entrypoint_lock(struct ahc_softc *ahc, unsigned long *flags)
696 {
697 	/*
698 	 * In 2.5.X and some 2.4.X versions, the midlayer takes our
699 	 * lock just before calling us, so we avoid locking again.
700 	 * For other kernel versions, the io_request_lock is taken
701 	 * just before our entry point is called.  In this case, we
702 	 * trade the io_request_lock for our per-softc lock.
703 	 */
704 #if AHC_SCSI_HAS_HOST_LOCK == 0
705 	spin_unlock(&io_request_lock);
706 	spin_lock(&ahc->platform_data->spin_lock);
707 #endif
708 }
709 
710 static __inline void
711 ahc_midlayer_entrypoint_unlock(struct ahc_softc *ahc, unsigned long *flags)
712 {
713 #if AHC_SCSI_HAS_HOST_LOCK == 0
714 	spin_unlock(&ahc->platform_data->spin_lock);
715 	spin_lock(&io_request_lock);
716 #endif
717 }
718 
719 static __inline void
720 ahc_done_lockinit(struct ahc_softc *ahc)
721 {
722 	/*
723 	 * In 2.5.X, our own lock is held during completions.
724 	 * In previous versions, the io_request_lock is used.
725 	 * In either case, we can't initialize this lock again.
726 	 */
727 }
728 
729 static __inline void
730 ahc_done_lock(struct ahc_softc *ahc, unsigned long *flags)
731 {
732 #if AHC_SCSI_HAS_HOST_LOCK == 0
733 	spin_lock_irqsave(&io_request_lock, *flags);
734 #endif
735 }
736 
737 static __inline void
738 ahc_done_unlock(struct ahc_softc *ahc, unsigned long *flags)
739 {
740 #if AHC_SCSI_HAS_HOST_LOCK == 0
741 	spin_unlock_irqrestore(&io_request_lock, *flags);
742 #endif
743 }
744 
745 static __inline void
746 ahc_list_lockinit(void)
747 {
748 	spin_lock_init(&ahc_list_spinlock);
749 }
750 
751 static __inline void
752 ahc_list_lock(unsigned long *flags)
753 {
754 	spin_lock_irqsave(&ahc_list_spinlock, *flags);
755 }
756 
757 static __inline void
758 ahc_list_unlock(unsigned long *flags)
759 {
760 	spin_unlock_irqrestore(&ahc_list_spinlock, *flags);
761 }
762 
763 /******************************* PCI Definitions ******************************/
764 /*
765  * PCIM_xxx: mask to locate subfield in register
766  * PCIR_xxx: config register offset
767  * PCIC_xxx: device class
768  * PCIS_xxx: device subclass
769  * PCIP_xxx: device programming interface
770  * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
771  * PCID_xxx: device ID
772  */
773 #define PCIR_DEVVENDOR		0x00
774 #define PCIR_VENDOR		0x00
775 #define PCIR_DEVICE		0x02
776 #define PCIR_COMMAND		0x04
777 #define PCIM_CMD_PORTEN		0x0001
778 #define PCIM_CMD_MEMEN		0x0002
779 #define PCIM_CMD_BUSMASTEREN	0x0004
780 #define PCIM_CMD_MWRICEN	0x0010
781 #define PCIM_CMD_PERRESPEN	0x0040
782 #define	PCIM_CMD_SERRESPEN	0x0100
783 #define PCIR_STATUS		0x06
784 #define PCIR_REVID		0x08
785 #define PCIR_PROGIF		0x09
786 #define PCIR_SUBCLASS		0x0a
787 #define PCIR_CLASS		0x0b
788 #define PCIR_CACHELNSZ		0x0c
789 #define PCIR_LATTIMER		0x0d
790 #define PCIR_HEADERTYPE		0x0e
791 #define PCIM_MFDEV		0x80
792 #define PCIR_BIST		0x0f
793 #define PCIR_CAP_PTR		0x34
794 
795 /* config registers for header type 0 devices */
796 #define PCIR_MAPS	0x10
797 #define PCIR_SUBVEND_0	0x2c
798 #define PCIR_SUBDEV_0	0x2e
799 
800 extern struct pci_driver aic7xxx_pci_driver;
801 
802 typedef enum
803 {
804 	AHC_POWER_STATE_D0,
805 	AHC_POWER_STATE_D1,
806 	AHC_POWER_STATE_D2,
807 	AHC_POWER_STATE_D3
808 } ahc_power_state;
809 
810 /**************************** VL/EISA Routines ********************************/
811 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) \
812   && (defined(__i386__) || defined(__alpha__)) \
813   && (!defined(CONFIG_EISA)))
814 #define CONFIG_EISA
815 #endif
816 
817 #ifdef CONFIG_EISA
818 extern uint32_t aic7xxx_probe_eisa_vl;
819 int			 ahc_linux_eisa_init(void);
820 void			 ahc_linux_eisa_exit(void);
821 int			 aic7770_map_registers(struct ahc_softc *ahc,
822 					       u_int port);
823 int			 aic7770_map_int(struct ahc_softc *ahc, u_int irq);
824 #else
825 static inline int	ahc_linux_eisa_init(void) {
826 	return -ENODEV;
827 }
828 static inline void	ahc_linux_eisa_exit(void) {
829 }
830 #endif
831 
832 /******************************* PCI Routines *********************************/
833 #ifdef CONFIG_PCI
834 int			 ahc_linux_pci_init(void);
835 void			 ahc_linux_pci_exit(void);
836 int			 ahc_pci_map_registers(struct ahc_softc *ahc);
837 int			 ahc_pci_map_int(struct ahc_softc *ahc);
838 
839 static __inline uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
840 					     int reg, int width);
841 
842 static __inline uint32_t
843 ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
844 {
845 	switch (width) {
846 	case 1:
847 	{
848 		uint8_t retval;
849 
850 		pci_read_config_byte(pci, reg, &retval);
851 		return (retval);
852 	}
853 	case 2:
854 	{
855 		uint16_t retval;
856 		pci_read_config_word(pci, reg, &retval);
857 		return (retval);
858 	}
859 	case 4:
860 	{
861 		uint32_t retval;
862 		pci_read_config_dword(pci, reg, &retval);
863 		return (retval);
864 	}
865 	default:
866 		panic("ahc_pci_read_config: Read size too big");
867 		/* NOTREACHED */
868 		return (0);
869 	}
870 }
871 
872 static __inline void ahc_pci_write_config(ahc_dev_softc_t pci,
873 					  int reg, uint32_t value,
874 					  int width);
875 
876 static __inline void
877 ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
878 {
879 	switch (width) {
880 	case 1:
881 		pci_write_config_byte(pci, reg, value);
882 		break;
883 	case 2:
884 		pci_write_config_word(pci, reg, value);
885 		break;
886 	case 4:
887 		pci_write_config_dword(pci, reg, value);
888 		break;
889 	default:
890 		panic("ahc_pci_write_config: Write size too big");
891 		/* NOTREACHED */
892 	}
893 }
894 
895 static __inline int ahc_get_pci_function(ahc_dev_softc_t);
896 static __inline int
897 ahc_get_pci_function(ahc_dev_softc_t pci)
898 {
899 	return (PCI_FUNC(pci->devfn));
900 }
901 
902 static __inline int ahc_get_pci_slot(ahc_dev_softc_t);
903 static __inline int
904 ahc_get_pci_slot(ahc_dev_softc_t pci)
905 {
906 	return (PCI_SLOT(pci->devfn));
907 }
908 
909 static __inline int ahc_get_pci_bus(ahc_dev_softc_t);
910 static __inline int
911 ahc_get_pci_bus(ahc_dev_softc_t pci)
912 {
913 	return (pci->bus->number);
914 }
915 #else
916 static inline int ahc_linux_pci_init(void) {
917 	return 0;
918 }
919 static inline void ahc_linux_pci_exit(void) {
920 }
921 #endif
922 
923 static __inline void ahc_flush_device_writes(struct ahc_softc *);
924 static __inline void
925 ahc_flush_device_writes(struct ahc_softc *ahc)
926 {
927 	/* XXX Is this sufficient for all architectures??? */
928 	ahc_inb(ahc, INTSTAT);
929 }
930 
931 /**************************** Proc FS Support *********************************/
932 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
933 int	ahc_linux_proc_info(char *, char **, off_t, int, int, int);
934 #else
935 int	ahc_linux_proc_info(struct Scsi_Host *, char *, char **,
936 			    off_t, int, int);
937 #endif
938 
939 /*************************** Domain Validation ********************************/
940 #define AHC_DV_CMD(cmd) ((cmd)->scsi_done == ahc_linux_dv_complete)
941 #define AHC_DV_SIMQ_FROZEN(ahc)					\
942 	((((ahc)->platform_data->flags & AHC_DV_ACTIVE) != 0)	\
943 	 && (ahc)->platform_data->qfrozen == 1)
944 
945 /*********************** Transaction Access Wrappers *************************/
946 static __inline void ahc_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t);
947 static __inline void ahc_set_transaction_status(struct scb *, uint32_t);
948 static __inline void ahc_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t);
949 static __inline void ahc_set_scsi_status(struct scb *, uint32_t);
950 static __inline uint32_t ahc_cmd_get_transaction_status(Scsi_Cmnd *cmd);
951 static __inline uint32_t ahc_get_transaction_status(struct scb *);
952 static __inline uint32_t ahc_cmd_get_scsi_status(Scsi_Cmnd *cmd);
953 static __inline uint32_t ahc_get_scsi_status(struct scb *);
954 static __inline void ahc_set_transaction_tag(struct scb *, int, u_int);
955 static __inline u_long ahc_get_transfer_length(struct scb *);
956 static __inline int ahc_get_transfer_dir(struct scb *);
957 static __inline void ahc_set_residual(struct scb *, u_long);
958 static __inline void ahc_set_sense_residual(struct scb *scb, u_long resid);
959 static __inline u_long ahc_get_residual(struct scb *);
960 static __inline u_long ahc_get_sense_residual(struct scb *);
961 static __inline int ahc_perform_autosense(struct scb *);
962 static __inline uint32_t ahc_get_sense_bufsize(struct ahc_softc *,
963 					       struct scb *);
964 static __inline void ahc_notify_xfer_settings_change(struct ahc_softc *,
965 						     struct ahc_devinfo *);
966 static __inline void ahc_platform_scb_free(struct ahc_softc *ahc,
967 					   struct scb *scb);
968 static __inline void ahc_freeze_scb(struct scb *scb);
969 
970 static __inline
971 void ahc_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status)
972 {
973 	cmd->result &= ~(CAM_STATUS_MASK << 16);
974 	cmd->result |= status << 16;
975 }
976 
977 static __inline
978 void ahc_set_transaction_status(struct scb *scb, uint32_t status)
979 {
980 	ahc_cmd_set_transaction_status(scb->io_ctx,status);
981 }
982 
983 static __inline
984 void ahc_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status)
985 {
986 	cmd->result &= ~0xFFFF;
987 	cmd->result |= status;
988 }
989 
990 static __inline
991 void ahc_set_scsi_status(struct scb *scb, uint32_t status)
992 {
993 	ahc_cmd_set_scsi_status(scb->io_ctx, status);
994 }
995 
996 static __inline
997 uint32_t ahc_cmd_get_transaction_status(Scsi_Cmnd *cmd)
998 {
999 	return ((cmd->result >> 16) & CAM_STATUS_MASK);
1000 }
1001 
1002 static __inline
1003 uint32_t ahc_get_transaction_status(struct scb *scb)
1004 {
1005 	return (ahc_cmd_get_transaction_status(scb->io_ctx));
1006 }
1007 
1008 static __inline
1009 uint32_t ahc_cmd_get_scsi_status(Scsi_Cmnd *cmd)
1010 {
1011 	return (cmd->result & 0xFFFF);
1012 }
1013 
1014 static __inline
1015 uint32_t ahc_get_scsi_status(struct scb *scb)
1016 {
1017 	return (ahc_cmd_get_scsi_status(scb->io_ctx));
1018 }
1019 
1020 static __inline
1021 void ahc_set_transaction_tag(struct scb *scb, int enabled, u_int type)
1022 {
1023 	/*
1024 	 * Nothing to do for linux as the incoming transaction
1025 	 * has no concept of tag/non tagged, etc.
1026 	 */
1027 }
1028 
1029 static __inline
1030 u_long ahc_get_transfer_length(struct scb *scb)
1031 {
1032 	return (scb->platform_data->xfer_len);
1033 }
1034 
1035 static __inline
1036 int ahc_get_transfer_dir(struct scb *scb)
1037 {
1038 	return (scb->io_ctx->sc_data_direction);
1039 }
1040 
1041 static __inline
1042 void ahc_set_residual(struct scb *scb, u_long resid)
1043 {
1044 	scb->io_ctx->resid = resid;
1045 }
1046 
1047 static __inline
1048 void ahc_set_sense_residual(struct scb *scb, u_long resid)
1049 {
1050 	scb->platform_data->sense_resid = resid;
1051 }
1052 
1053 static __inline
1054 u_long ahc_get_residual(struct scb *scb)
1055 {
1056 	return (scb->io_ctx->resid);
1057 }
1058 
1059 static __inline
1060 u_long ahc_get_sense_residual(struct scb *scb)
1061 {
1062 	return (scb->platform_data->sense_resid);
1063 }
1064 
1065 static __inline
1066 int ahc_perform_autosense(struct scb *scb)
1067 {
1068 	/*
1069 	 * We always perform autosense in Linux.
1070 	 * On other platforms this is set on a
1071 	 * per-transaction basis.
1072 	 */
1073 	return (1);
1074 }
1075 
1076 static __inline uint32_t
1077 ahc_get_sense_bufsize(struct ahc_softc *ahc, struct scb *scb)
1078 {
1079 	return (sizeof(struct scsi_sense_data));
1080 }
1081 
1082 static __inline void
1083 ahc_notify_xfer_settings_change(struct ahc_softc *ahc,
1084 				struct ahc_devinfo *devinfo)
1085 {
1086 	/* Nothing to do here for linux */
1087 }
1088 
1089 static __inline void
1090 ahc_platform_scb_free(struct ahc_softc *ahc, struct scb *scb)
1091 {
1092 	ahc->flags &= ~AHC_RESOURCE_SHORTAGE;
1093 }
1094 
1095 int	ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg);
1096 void	ahc_platform_free(struct ahc_softc *ahc);
1097 void	ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
1098 
1099 static __inline void
1100 ahc_freeze_scb(struct scb *scb)
1101 {
1102 	if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
1103                 scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
1104                 scb->platform_data->dev->qfrozen++;
1105         }
1106 }
1107 
1108 void	ahc_platform_set_tags(struct ahc_softc *ahc,
1109 			      struct ahc_devinfo *devinfo, ahc_queue_alg);
1110 int	ahc_platform_abort_scbs(struct ahc_softc *ahc, int target,
1111 				char channel, int lun, u_int tag,
1112 				role_t role, uint32_t status);
1113 irqreturn_t
1114 	ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs);
1115 void	ahc_platform_flushwork(struct ahc_softc *ahc);
1116 int	ahc_softc_comp(struct ahc_softc *, struct ahc_softc *);
1117 void	ahc_done(struct ahc_softc*, struct scb*);
1118 void	ahc_send_async(struct ahc_softc *, char channel,
1119 		       u_int target, u_int lun, ac_code, void *);
1120 void	ahc_print_path(struct ahc_softc *, struct scb *);
1121 void	ahc_platform_dump_card_state(struct ahc_softc *ahc);
1122 
1123 #ifdef CONFIG_PCI
1124 #define AHC_PCI_CONFIG 1
1125 #else
1126 #define AHC_PCI_CONFIG 0
1127 #endif
1128 #define bootverbose aic7xxx_verbose
1129 extern u_int aic7xxx_verbose;
1130 #endif /* _AIC7XXX_LINUX_H_ */
1131