1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ALPHA_IO_H
3 #define __ALPHA_IO_H
4
5 #ifdef __KERNEL__
6
7 #include <linux/kernel.h>
8 #include <linux/mm.h>
9 #include <asm/compiler.h>
10 #include <asm/machvec.h>
11 #include <asm/hwrpb.h>
12
13 /* The generic header contains only prototypes. Including it ensures that
14 the implementation we have here matches that interface. */
15 #include <asm-generic/iomap.h>
16
17 /*
18 * Virtual -> physical identity mapping starts at this offset
19 */
20 #ifdef USE_48_BIT_KSEG
21 #define IDENT_ADDR 0xffff800000000000UL
22 #else
23 #define IDENT_ADDR 0xfffffc0000000000UL
24 #endif
25
26 /*
27 * We try to avoid hae updates (thus the cache), but when we
28 * do need to update the hae, we need to do it atomically, so
29 * that any interrupts wouldn't get confused with the hae
30 * register not being up-to-date with respect to the hardware
31 * value.
32 */
__set_hae(unsigned long new_hae)33 extern inline void __set_hae(unsigned long new_hae)
34 {
35 unsigned long flags = swpipl(IPL_MAX);
36
37 barrier();
38
39 alpha_mv.hae_cache = new_hae;
40 *alpha_mv.hae_register = new_hae;
41 mb();
42 /* Re-read to make sure it was written. */
43 new_hae = *alpha_mv.hae_register;
44
45 setipl(flags);
46 barrier();
47 }
48
set_hae(unsigned long new_hae)49 extern inline void set_hae(unsigned long new_hae)
50 {
51 if (new_hae != alpha_mv.hae_cache)
52 __set_hae(new_hae);
53 }
54
55 /*
56 * Change virtual addresses to physical addresses and vv.
57 */
58 #ifdef USE_48_BIT_KSEG
virt_to_phys(volatile void * address)59 static inline unsigned long virt_to_phys(volatile void *address)
60 {
61 return (unsigned long)address - IDENT_ADDR;
62 }
63
phys_to_virt(unsigned long address)64 static inline void * phys_to_virt(unsigned long address)
65 {
66 return (void *) (address + IDENT_ADDR);
67 }
68 #else
virt_to_phys(volatile void * address)69 static inline unsigned long virt_to_phys(volatile void *address)
70 {
71 unsigned long phys = (unsigned long)address;
72
73 /* Sign-extend from bit 41. */
74 phys <<= (64 - 41);
75 phys = (long)phys >> (64 - 41);
76
77 /* Crop to the physical address width of the processor. */
78 phys &= (1ul << hwrpb->pa_bits) - 1;
79
80 return phys;
81 }
82
phys_to_virt(unsigned long address)83 static inline void * phys_to_virt(unsigned long address)
84 {
85 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
86 }
87 #endif
88
89 #define virt_to_phys virt_to_phys
90 #define phys_to_virt phys_to_virt
91 #define page_to_phys(page) page_to_pa(page)
92
93 /* Maximum PIO space address supported? */
94 #define IO_SPACE_LIMIT 0xffff
95
96 /*
97 * Change addresses as seen by the kernel (virtual) to addresses as
98 * seen by a device (bus), and vice versa.
99 *
100 * Note that this only works for a limited range of kernel addresses,
101 * and very well may not span all memory. Consider this interface
102 * deprecated in favour of the DMA-mapping API.
103 */
104 extern unsigned long __direct_map_base;
105 extern unsigned long __direct_map_size;
106
isa_virt_to_bus(volatile void * address)107 static inline unsigned long __deprecated isa_virt_to_bus(volatile void *address)
108 {
109 unsigned long phys = virt_to_phys(address);
110 unsigned long bus = phys + __direct_map_base;
111 return phys <= __direct_map_size ? bus : 0;
112 }
113 #define isa_virt_to_bus isa_virt_to_bus
114
isa_bus_to_virt(unsigned long address)115 static inline void * __deprecated isa_bus_to_virt(unsigned long address)
116 {
117 void *virt;
118
119 /* This check is a sanity check but also ensures that bus address 0
120 maps to virtual address 0 which is useful to detect null pointers
121 (the NCR driver is much simpler if NULL pointers are preserved). */
122 address -= __direct_map_base;
123 virt = phys_to_virt(address);
124 return (long)address <= 0 ? NULL : virt;
125 }
126 #define isa_bus_to_virt isa_bus_to_virt
127
128 /*
129 * There are different chipsets to interface the Alpha CPUs to the world.
130 */
131
132 #define IO_CONCAT(a,b) _IO_CONCAT(a,b)
133 #define _IO_CONCAT(a,b) a ## _ ## b
134
135 #ifdef CONFIG_ALPHA_GENERIC
136
137 /* In a generic kernel, we always go through the machine vector. */
138
139 #define REMAP1(TYPE, NAME, QUAL) \
140 static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
141 { \
142 return alpha_mv.mv_##NAME(addr); \
143 }
144
145 #define REMAP2(TYPE, NAME, QUAL) \
146 static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
147 { \
148 alpha_mv.mv_##NAME(b, addr); \
149 }
150
REMAP1(unsigned int,ioread8,const)151 REMAP1(unsigned int, ioread8, const)
152 REMAP1(unsigned int, ioread16, const)
153 REMAP1(unsigned int, ioread32, const)
154 REMAP1(u64, ioread64, const)
155 REMAP1(u8, readb, const volatile)
156 REMAP1(u16, readw, const volatile)
157 REMAP1(u32, readl, const volatile)
158 REMAP1(u64, readq, const volatile)
159
160 REMAP2(u8, iowrite8, /**/)
161 REMAP2(u16, iowrite16, /**/)
162 REMAP2(u32, iowrite32, /**/)
163 REMAP2(u64, iowrite64, /**/)
164 REMAP2(u8, writeb, volatile)
165 REMAP2(u16, writew, volatile)
166 REMAP2(u32, writel, volatile)
167 REMAP2(u64, writeq, volatile)
168
169 #undef REMAP1
170 #undef REMAP2
171
172 extern inline void __iomem *generic_ioportmap(unsigned long a)
173 {
174 return alpha_mv.mv_ioportmap(a);
175 }
176
generic_ioremap(unsigned long a,unsigned long s)177 static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
178 {
179 return alpha_mv.mv_ioremap(a, s);
180 }
181
generic_iounmap(volatile void __iomem * a)182 static inline void generic_iounmap(volatile void __iomem *a)
183 {
184 return alpha_mv.mv_iounmap(a);
185 }
186
generic_is_ioaddr(unsigned long a)187 static inline int generic_is_ioaddr(unsigned long a)
188 {
189 return alpha_mv.mv_is_ioaddr(a);
190 }
191
generic_is_mmio(const volatile void __iomem * a)192 static inline int generic_is_mmio(const volatile void __iomem *a)
193 {
194 return alpha_mv.mv_is_mmio(a);
195 }
196
197 #define __IO_PREFIX generic
198 #define generic_trivial_rw_bw 0
199 #define generic_trivial_rw_lq 0
200 #define generic_trivial_io_bw 0
201 #define generic_trivial_io_lq 0
202 #define generic_trivial_iounmap 0
203
204 #else
205
206 #if defined(CONFIG_ALPHA_APECS)
207 # include <asm/core_apecs.h>
208 #elif defined(CONFIG_ALPHA_CIA)
209 # include <asm/core_cia.h>
210 #elif defined(CONFIG_ALPHA_IRONGATE)
211 # include <asm/core_irongate.h>
212 #elif defined(CONFIG_ALPHA_JENSEN)
213 # include <asm/jensen.h>
214 #elif defined(CONFIG_ALPHA_LCA)
215 # include <asm/core_lca.h>
216 #elif defined(CONFIG_ALPHA_MARVEL)
217 # include <asm/core_marvel.h>
218 #elif defined(CONFIG_ALPHA_MCPCIA)
219 # include <asm/core_mcpcia.h>
220 #elif defined(CONFIG_ALPHA_POLARIS)
221 # include <asm/core_polaris.h>
222 #elif defined(CONFIG_ALPHA_T2)
223 # include <asm/core_t2.h>
224 #elif defined(CONFIG_ALPHA_TSUNAMI)
225 # include <asm/core_tsunami.h>
226 #elif defined(CONFIG_ALPHA_TITAN)
227 # include <asm/core_titan.h>
228 #elif defined(CONFIG_ALPHA_WILDFIRE)
229 # include <asm/core_wildfire.h>
230 #else
231 #error "What system is this?"
232 #endif
233
234 #endif /* GENERIC */
235
236 /*
237 * We always have external versions of these routines.
238 */
239 extern u8 inb(unsigned long port);
240 extern u16 inw(unsigned long port);
241 extern u32 inl(unsigned long port);
242 extern void outb(u8 b, unsigned long port);
243 extern void outw(u16 b, unsigned long port);
244 extern void outl(u32 b, unsigned long port);
245 #define inb inb
246 #define inw inw
247 #define inl inl
248 #define outb outb
249 #define outw outw
250 #define outl outl
251
252 extern u8 readb(const volatile void __iomem *addr);
253 extern u16 readw(const volatile void __iomem *addr);
254 extern u32 readl(const volatile void __iomem *addr);
255 extern u64 readq(const volatile void __iomem *addr);
256 extern void writeb(u8 b, volatile void __iomem *addr);
257 extern void writew(u16 b, volatile void __iomem *addr);
258 extern void writel(u32 b, volatile void __iomem *addr);
259 extern void writeq(u64 b, volatile void __iomem *addr);
260 #define readb readb
261 #define readw readw
262 #define readl readl
263 #define readq readq
264 #define writeb writeb
265 #define writew writew
266 #define writel writel
267 #define writeq writeq
268
269 extern u8 __raw_readb(const volatile void __iomem *addr);
270 extern u16 __raw_readw(const volatile void __iomem *addr);
271 extern u32 __raw_readl(const volatile void __iomem *addr);
272 extern u64 __raw_readq(const volatile void __iomem *addr);
273 extern void __raw_writeb(u8 b, volatile void __iomem *addr);
274 extern void __raw_writew(u16 b, volatile void __iomem *addr);
275 extern void __raw_writel(u32 b, volatile void __iomem *addr);
276 extern void __raw_writeq(u64 b, volatile void __iomem *addr);
277 #define __raw_readb __raw_readb
278 #define __raw_readw __raw_readw
279 #define __raw_readl __raw_readl
280 #define __raw_readq __raw_readq
281 #define __raw_writeb __raw_writeb
282 #define __raw_writew __raw_writew
283 #define __raw_writel __raw_writel
284 #define __raw_writeq __raw_writeq
285
286 /*
287 * Mapping from port numbers to __iomem space is pretty easy.
288 */
289
290 /* These two have to be extern inline because of the extern prototype from
291 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for
292 the same declaration. */
ioport_map(unsigned long port,unsigned int size)293 extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
294 {
295 return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
296 }
297
ioport_unmap(void __iomem * addr)298 extern inline void ioport_unmap(void __iomem *addr)
299 {
300 }
301
302 #define ioport_map ioport_map
303 #define ioport_unmap ioport_unmap
304
ioremap(unsigned long port,unsigned long size)305 static inline void __iomem *ioremap(unsigned long port, unsigned long size)
306 {
307 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
308 }
309
310 #define ioremap_wc ioremap
311 #define ioremap_uc ioremap
312
iounmap(volatile void __iomem * addr)313 static inline void iounmap(volatile void __iomem *addr)
314 {
315 IO_CONCAT(__IO_PREFIX,iounmap)(addr);
316 }
317
__is_ioaddr(unsigned long addr)318 static inline int __is_ioaddr(unsigned long addr)
319 {
320 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
321 }
322 #define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
323
__is_mmio(const volatile void __iomem * addr)324 static inline int __is_mmio(const volatile void __iomem *addr)
325 {
326 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
327 }
328
329
330 /*
331 * If the actual I/O bits are sufficiently trivial, then expand inline.
332 */
333
334 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
ioread8(const void __iomem * addr)335 extern inline unsigned int ioread8(const void __iomem *addr)
336 {
337 unsigned int ret;
338 mb();
339 ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
340 mb();
341 return ret;
342 }
343
ioread16(const void __iomem * addr)344 extern inline unsigned int ioread16(const void __iomem *addr)
345 {
346 unsigned int ret;
347 mb();
348 ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
349 mb();
350 return ret;
351 }
352
iowrite8(u8 b,void __iomem * addr)353 extern inline void iowrite8(u8 b, void __iomem *addr)
354 {
355 mb();
356 IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr);
357 }
358
iowrite16(u16 b,void __iomem * addr)359 extern inline void iowrite16(u16 b, void __iomem *addr)
360 {
361 mb();
362 IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr);
363 }
364
inb(unsigned long port)365 extern inline u8 inb(unsigned long port)
366 {
367 return ioread8(ioport_map(port, 1));
368 }
369
inw(unsigned long port)370 extern inline u16 inw(unsigned long port)
371 {
372 return ioread16(ioport_map(port, 2));
373 }
374
outb(u8 b,unsigned long port)375 extern inline void outb(u8 b, unsigned long port)
376 {
377 iowrite8(b, ioport_map(port, 1));
378 }
379
outw(u16 b,unsigned long port)380 extern inline void outw(u16 b, unsigned long port)
381 {
382 iowrite16(b, ioport_map(port, 2));
383 }
384 #endif
385
386 #define ioread8 ioread8
387 #define ioread16 ioread16
388 #define iowrite8 iowrite8
389 #define iowrite16 iowrite16
390
391 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
ioread32(const void __iomem * addr)392 extern inline unsigned int ioread32(const void __iomem *addr)
393 {
394 unsigned int ret;
395 mb();
396 ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
397 mb();
398 return ret;
399 }
400
ioread64(const void __iomem * addr)401 extern inline u64 ioread64(const void __iomem *addr)
402 {
403 unsigned int ret;
404 mb();
405 ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr);
406 mb();
407 return ret;
408 }
409
iowrite32(u32 b,void __iomem * addr)410 extern inline void iowrite32(u32 b, void __iomem *addr)
411 {
412 mb();
413 IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
414 }
415
iowrite64(u64 b,void __iomem * addr)416 extern inline void iowrite64(u64 b, void __iomem *addr)
417 {
418 mb();
419 IO_CONCAT(__IO_PREFIX, iowrite64)(b, addr);
420 }
421
inl(unsigned long port)422 extern inline u32 inl(unsigned long port)
423 {
424 return ioread32(ioport_map(port, 4));
425 }
426
outl(u32 b,unsigned long port)427 extern inline void outl(u32 b, unsigned long port)
428 {
429 iowrite32(b, ioport_map(port, 4));
430 }
431 #endif
432
433 #define ioread32 ioread32
434 #define ioread64 ioread64
435 #define iowrite32 iowrite32
436 #define iowrite64 iowrite64
437
438 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
__raw_readb(const volatile void __iomem * addr)439 extern inline u8 __raw_readb(const volatile void __iomem *addr)
440 {
441 return IO_CONCAT(__IO_PREFIX,readb)(addr);
442 }
443
__raw_readw(const volatile void __iomem * addr)444 extern inline u16 __raw_readw(const volatile void __iomem *addr)
445 {
446 return IO_CONCAT(__IO_PREFIX,readw)(addr);
447 }
448
__raw_writeb(u8 b,volatile void __iomem * addr)449 extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
450 {
451 IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
452 }
453
__raw_writew(u16 b,volatile void __iomem * addr)454 extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
455 {
456 IO_CONCAT(__IO_PREFIX,writew)(b, addr);
457 }
458
readb(const volatile void __iomem * addr)459 extern inline u8 readb(const volatile void __iomem *addr)
460 {
461 u8 ret;
462 mb();
463 ret = __raw_readb(addr);
464 mb();
465 return ret;
466 }
467
readw(const volatile void __iomem * addr)468 extern inline u16 readw(const volatile void __iomem *addr)
469 {
470 u16 ret;
471 mb();
472 ret = __raw_readw(addr);
473 mb();
474 return ret;
475 }
476
writeb(u8 b,volatile void __iomem * addr)477 extern inline void writeb(u8 b, volatile void __iomem *addr)
478 {
479 mb();
480 __raw_writeb(b, addr);
481 }
482
writew(u16 b,volatile void __iomem * addr)483 extern inline void writew(u16 b, volatile void __iomem *addr)
484 {
485 mb();
486 __raw_writew(b, addr);
487 }
488 #endif
489
490 #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
__raw_readl(const volatile void __iomem * addr)491 extern inline u32 __raw_readl(const volatile void __iomem *addr)
492 {
493 return IO_CONCAT(__IO_PREFIX,readl)(addr);
494 }
495
__raw_readq(const volatile void __iomem * addr)496 extern inline u64 __raw_readq(const volatile void __iomem *addr)
497 {
498 return IO_CONCAT(__IO_PREFIX,readq)(addr);
499 }
500
__raw_writel(u32 b,volatile void __iomem * addr)501 extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
502 {
503 IO_CONCAT(__IO_PREFIX,writel)(b, addr);
504 }
505
__raw_writeq(u64 b,volatile void __iomem * addr)506 extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
507 {
508 IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
509 }
510
readl(const volatile void __iomem * addr)511 extern inline u32 readl(const volatile void __iomem *addr)
512 {
513 u32 ret;
514 mb();
515 ret = __raw_readl(addr);
516 mb();
517 return ret;
518 }
519
readq(const volatile void __iomem * addr)520 extern inline u64 readq(const volatile void __iomem *addr)
521 {
522 u64 ret;
523 mb();
524 ret = __raw_readq(addr);
525 mb();
526 return ret;
527 }
528
writel(u32 b,volatile void __iomem * addr)529 extern inline void writel(u32 b, volatile void __iomem *addr)
530 {
531 mb();
532 __raw_writel(b, addr);
533 }
534
writeq(u64 b,volatile void __iomem * addr)535 extern inline void writeq(u64 b, volatile void __iomem *addr)
536 {
537 mb();
538 __raw_writeq(b, addr);
539 }
540 #endif
541
542 #define ioread16be(p) swab16(ioread16(p))
543 #define ioread32be(p) swab32(ioread32(p))
544 #define iowrite16be(v,p) iowrite16(swab16(v), (p))
545 #define iowrite32be(v,p) iowrite32(swab32(v), (p))
546
547 #define inb_p inb
548 #define inw_p inw
549 #define inl_p inl
550 #define outb_p outb
551 #define outw_p outw
552 #define outl_p outl
553
554 extern u8 readb_relaxed(const volatile void __iomem *addr);
555 extern u16 readw_relaxed(const volatile void __iomem *addr);
556 extern u32 readl_relaxed(const volatile void __iomem *addr);
557 extern u64 readq_relaxed(const volatile void __iomem *addr);
558 #define readb_relaxed readb_relaxed
559 #define readw_relaxed readw_relaxed
560 #define readl_relaxed readl_relaxed
561 #define readq_relaxed readq_relaxed
562
563 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
readb_relaxed(const volatile void __iomem * addr)564 extern inline u8 readb_relaxed(const volatile void __iomem *addr)
565 {
566 mb();
567 return __raw_readb(addr);
568 }
569
readw_relaxed(const volatile void __iomem * addr)570 extern inline u16 readw_relaxed(const volatile void __iomem *addr)
571 {
572 mb();
573 return __raw_readw(addr);
574 }
575 #endif
576
577 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
readl_relaxed(const volatile void __iomem * addr)578 extern inline u32 readl_relaxed(const volatile void __iomem *addr)
579 {
580 mb();
581 return __raw_readl(addr);
582 }
583
readq_relaxed(const volatile void __iomem * addr)584 extern inline u64 readq_relaxed(const volatile void __iomem *addr)
585 {
586 mb();
587 return __raw_readq(addr);
588 }
589 #endif
590
591 #define writeb_relaxed writeb
592 #define writew_relaxed writew
593 #define writel_relaxed writel
594 #define writeq_relaxed writeq
595
596 /*
597 * String version of IO memory access ops:
598 */
599 extern void memcpy_fromio(void *, const volatile void __iomem *, long);
600 extern void memcpy_toio(volatile void __iomem *, const void *, long);
601 extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
602
memset_io(volatile void __iomem * addr,u8 c,long len)603 static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
604 {
605 _memset_c_io(addr, 0x0101010101010101UL * c, len);
606 }
607
608 #define __HAVE_ARCH_MEMSETW_IO
memsetw_io(volatile void __iomem * addr,u16 c,long len)609 static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
610 {
611 _memset_c_io(addr, 0x0001000100010001UL * c, len);
612 }
613
614 #define memset_io memset_io
615 #define memcpy_fromio memcpy_fromio
616 #define memcpy_toio memcpy_toio
617
618 /*
619 * String versions of in/out ops:
620 */
621 extern void insb (unsigned long port, void *dst, unsigned long count);
622 extern void insw (unsigned long port, void *dst, unsigned long count);
623 extern void insl (unsigned long port, void *dst, unsigned long count);
624 extern void outsb (unsigned long port, const void *src, unsigned long count);
625 extern void outsw (unsigned long port, const void *src, unsigned long count);
626 extern void outsl (unsigned long port, const void *src, unsigned long count);
627
628 #define insb insb
629 #define insw insw
630 #define insl insl
631 #define outsb outsb
632 #define outsw outsw
633 #define outsl outsl
634
635 /*
636 * The Alpha Jensen hardware for some rather strange reason puts
637 * the RTC clock at 0x170 instead of 0x70. Probably due to some
638 * misguided idea about using 0x70 for NMI stuff.
639 *
640 * These defines will override the defaults when doing RTC queries
641 */
642
643 #ifdef CONFIG_ALPHA_GENERIC
644 # define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
645 #else
646 # ifdef CONFIG_ALPHA_JENSEN
647 # define RTC_PORT(x) (0x170+(x))
648 # else
649 # define RTC_PORT(x) (0x70 + (x))
650 # endif
651 #endif
652 #define RTC_ALWAYS_BCD 0
653
654 /*
655 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
656 * access
657 */
658 #define xlate_dev_mem_ptr(p) __va(p)
659
660 /*
661 * These get provided from <asm-generic/iomap.h> since alpha does not
662 * select GENERIC_IOMAP.
663 */
664 #define ioread64 ioread64
665 #define iowrite64 iowrite64
666 #define ioread64be ioread64be
667 #define iowrite64be iowrite64be
668 #define ioread8_rep ioread8_rep
669 #define ioread16_rep ioread16_rep
670 #define ioread32_rep ioread32_rep
671 #define iowrite8_rep iowrite8_rep
672 #define iowrite16_rep iowrite16_rep
673 #define iowrite32_rep iowrite32_rep
674 #define pci_iounmap pci_iounmap
675
676 #include <asm-generic/io.h>
677
678 #endif /* __KERNEL__ */
679
680 #endif /* __ALPHA_IO_H */
681