xref: /openbmc/linux/include/asm-generic/io.h (revision 3fc41476)
1 /* Generic I/O port emulation.
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 #ifndef __ASM_GENERIC_IO_H
12 #define __ASM_GENERIC_IO_H
13 
14 #include <asm/page.h> /* I/O is all done through memory accesses */
15 #include <linux/string.h> /* for memset() and memcpy() */
16 #include <linux/types.h>
17 
18 #ifdef CONFIG_GENERIC_IOMAP
19 #include <asm-generic/iomap.h>
20 #endif
21 
22 #include <asm/mmiowb.h>
23 #include <asm-generic/pci_iomap.h>
24 
25 #ifndef __io_br
26 #define __io_br()      barrier()
27 #endif
28 
29 /* prevent prefetching of coherent DMA data ahead of a dma-complete */
30 #ifndef __io_ar
31 #ifdef rmb
32 #define __io_ar(v)      rmb()
33 #else
34 #define __io_ar(v)      barrier()
35 #endif
36 #endif
37 
38 /* flush writes to coherent DMA data before possibly triggering a DMA read */
39 #ifndef __io_bw
40 #ifdef wmb
41 #define __io_bw()      wmb()
42 #else
43 #define __io_bw()      barrier()
44 #endif
45 #endif
46 
47 /* serialize device access against a spin_unlock, usually handled there. */
48 #ifndef __io_aw
49 #define __io_aw()      mmiowb_set_pending()
50 #endif
51 
52 #ifndef __io_pbw
53 #define __io_pbw()     __io_bw()
54 #endif
55 
56 #ifndef __io_paw
57 #define __io_paw()     __io_aw()
58 #endif
59 
60 #ifndef __io_pbr
61 #define __io_pbr()     __io_br()
62 #endif
63 
64 #ifndef __io_par
65 #define __io_par(v)     __io_ar(v)
66 #endif
67 
68 
69 /*
70  * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
71  *
72  * On some architectures memory mapped IO needs to be accessed differently.
73  * On the simple architectures, we just read/write the memory location
74  * directly.
75  */
76 
77 #ifndef __raw_readb
78 #define __raw_readb __raw_readb
79 static inline u8 __raw_readb(const volatile void __iomem *addr)
80 {
81 	return *(const volatile u8 __force *)addr;
82 }
83 #endif
84 
85 #ifndef __raw_readw
86 #define __raw_readw __raw_readw
87 static inline u16 __raw_readw(const volatile void __iomem *addr)
88 {
89 	return *(const volatile u16 __force *)addr;
90 }
91 #endif
92 
93 #ifndef __raw_readl
94 #define __raw_readl __raw_readl
95 static inline u32 __raw_readl(const volatile void __iomem *addr)
96 {
97 	return *(const volatile u32 __force *)addr;
98 }
99 #endif
100 
101 #ifdef CONFIG_64BIT
102 #ifndef __raw_readq
103 #define __raw_readq __raw_readq
104 static inline u64 __raw_readq(const volatile void __iomem *addr)
105 {
106 	return *(const volatile u64 __force *)addr;
107 }
108 #endif
109 #endif /* CONFIG_64BIT */
110 
111 #ifndef __raw_writeb
112 #define __raw_writeb __raw_writeb
113 static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
114 {
115 	*(volatile u8 __force *)addr = value;
116 }
117 #endif
118 
119 #ifndef __raw_writew
120 #define __raw_writew __raw_writew
121 static inline void __raw_writew(u16 value, volatile void __iomem *addr)
122 {
123 	*(volatile u16 __force *)addr = value;
124 }
125 #endif
126 
127 #ifndef __raw_writel
128 #define __raw_writel __raw_writel
129 static inline void __raw_writel(u32 value, volatile void __iomem *addr)
130 {
131 	*(volatile u32 __force *)addr = value;
132 }
133 #endif
134 
135 #ifdef CONFIG_64BIT
136 #ifndef __raw_writeq
137 #define __raw_writeq __raw_writeq
138 static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
139 {
140 	*(volatile u64 __force *)addr = value;
141 }
142 #endif
143 #endif /* CONFIG_64BIT */
144 
145 /*
146  * {read,write}{b,w,l,q}() access little endian memory and return result in
147  * native endianness.
148  */
149 
150 #ifndef readb
151 #define readb readb
152 static inline u8 readb(const volatile void __iomem *addr)
153 {
154 	u8 val;
155 
156 	__io_br();
157 	val = __raw_readb(addr);
158 	__io_ar(val);
159 	return val;
160 }
161 #endif
162 
163 #ifndef readw
164 #define readw readw
165 static inline u16 readw(const volatile void __iomem *addr)
166 {
167 	u16 val;
168 
169 	__io_br();
170 	val = __le16_to_cpu(__raw_readw(addr));
171 	__io_ar(val);
172 	return val;
173 }
174 #endif
175 
176 #ifndef readl
177 #define readl readl
178 static inline u32 readl(const volatile void __iomem *addr)
179 {
180 	u32 val;
181 
182 	__io_br();
183 	val = __le32_to_cpu(__raw_readl(addr));
184 	__io_ar(val);
185 	return val;
186 }
187 #endif
188 
189 #ifdef CONFIG_64BIT
190 #ifndef readq
191 #define readq readq
192 static inline u64 readq(const volatile void __iomem *addr)
193 {
194 	u64 val;
195 
196 	__io_br();
197 	val = __le64_to_cpu(__raw_readq(addr));
198 	__io_ar(val);
199 	return val;
200 }
201 #endif
202 #endif /* CONFIG_64BIT */
203 
204 #ifndef writeb
205 #define writeb writeb
206 static inline void writeb(u8 value, volatile void __iomem *addr)
207 {
208 	__io_bw();
209 	__raw_writeb(value, addr);
210 	__io_aw();
211 }
212 #endif
213 
214 #ifndef writew
215 #define writew writew
216 static inline void writew(u16 value, volatile void __iomem *addr)
217 {
218 	__io_bw();
219 	__raw_writew(cpu_to_le16(value), addr);
220 	__io_aw();
221 }
222 #endif
223 
224 #ifndef writel
225 #define writel writel
226 static inline void writel(u32 value, volatile void __iomem *addr)
227 {
228 	__io_bw();
229 	__raw_writel(__cpu_to_le32(value), addr);
230 	__io_aw();
231 }
232 #endif
233 
234 #ifdef CONFIG_64BIT
235 #ifndef writeq
236 #define writeq writeq
237 static inline void writeq(u64 value, volatile void __iomem *addr)
238 {
239 	__io_bw();
240 	__raw_writeq(__cpu_to_le64(value), addr);
241 	__io_aw();
242 }
243 #endif
244 #endif /* CONFIG_64BIT */
245 
246 /*
247  * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
248  * are not guaranteed to provide ordering against spinlocks or memory
249  * accesses.
250  */
251 #ifndef readb_relaxed
252 #define readb_relaxed readb_relaxed
253 static inline u8 readb_relaxed(const volatile void __iomem *addr)
254 {
255 	return __raw_readb(addr);
256 }
257 #endif
258 
259 #ifndef readw_relaxed
260 #define readw_relaxed readw_relaxed
261 static inline u16 readw_relaxed(const volatile void __iomem *addr)
262 {
263 	return __le16_to_cpu(__raw_readw(addr));
264 }
265 #endif
266 
267 #ifndef readl_relaxed
268 #define readl_relaxed readl_relaxed
269 static inline u32 readl_relaxed(const volatile void __iomem *addr)
270 {
271 	return __le32_to_cpu(__raw_readl(addr));
272 }
273 #endif
274 
275 #if defined(readq) && !defined(readq_relaxed)
276 #define readq_relaxed readq_relaxed
277 static inline u64 readq_relaxed(const volatile void __iomem *addr)
278 {
279 	return __le64_to_cpu(__raw_readq(addr));
280 }
281 #endif
282 
283 #ifndef writeb_relaxed
284 #define writeb_relaxed writeb_relaxed
285 static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
286 {
287 	__raw_writeb(value, addr);
288 }
289 #endif
290 
291 #ifndef writew_relaxed
292 #define writew_relaxed writew_relaxed
293 static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
294 {
295 	__raw_writew(cpu_to_le16(value), addr);
296 }
297 #endif
298 
299 #ifndef writel_relaxed
300 #define writel_relaxed writel_relaxed
301 static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
302 {
303 	__raw_writel(__cpu_to_le32(value), addr);
304 }
305 #endif
306 
307 #if defined(writeq) && !defined(writeq_relaxed)
308 #define writeq_relaxed writeq_relaxed
309 static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
310 {
311 	__raw_writeq(__cpu_to_le64(value), addr);
312 }
313 #endif
314 
315 /*
316  * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
317  * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
318  */
319 #ifndef readsb
320 #define readsb readsb
321 static inline void readsb(const volatile void __iomem *addr, void *buffer,
322 			  unsigned int count)
323 {
324 	if (count) {
325 		u8 *buf = buffer;
326 
327 		do {
328 			u8 x = __raw_readb(addr);
329 			*buf++ = x;
330 		} while (--count);
331 	}
332 }
333 #endif
334 
335 #ifndef readsw
336 #define readsw readsw
337 static inline void readsw(const volatile void __iomem *addr, void *buffer,
338 			  unsigned int count)
339 {
340 	if (count) {
341 		u16 *buf = buffer;
342 
343 		do {
344 			u16 x = __raw_readw(addr);
345 			*buf++ = x;
346 		} while (--count);
347 	}
348 }
349 #endif
350 
351 #ifndef readsl
352 #define readsl readsl
353 static inline void readsl(const volatile void __iomem *addr, void *buffer,
354 			  unsigned int count)
355 {
356 	if (count) {
357 		u32 *buf = buffer;
358 
359 		do {
360 			u32 x = __raw_readl(addr);
361 			*buf++ = x;
362 		} while (--count);
363 	}
364 }
365 #endif
366 
367 #ifdef CONFIG_64BIT
368 #ifndef readsq
369 #define readsq readsq
370 static inline void readsq(const volatile void __iomem *addr, void *buffer,
371 			  unsigned int count)
372 {
373 	if (count) {
374 		u64 *buf = buffer;
375 
376 		do {
377 			u64 x = __raw_readq(addr);
378 			*buf++ = x;
379 		} while (--count);
380 	}
381 }
382 #endif
383 #endif /* CONFIG_64BIT */
384 
385 #ifndef writesb
386 #define writesb writesb
387 static inline void writesb(volatile void __iomem *addr, const void *buffer,
388 			   unsigned int count)
389 {
390 	if (count) {
391 		const u8 *buf = buffer;
392 
393 		do {
394 			__raw_writeb(*buf++, addr);
395 		} while (--count);
396 	}
397 }
398 #endif
399 
400 #ifndef writesw
401 #define writesw writesw
402 static inline void writesw(volatile void __iomem *addr, const void *buffer,
403 			   unsigned int count)
404 {
405 	if (count) {
406 		const u16 *buf = buffer;
407 
408 		do {
409 			__raw_writew(*buf++, addr);
410 		} while (--count);
411 	}
412 }
413 #endif
414 
415 #ifndef writesl
416 #define writesl writesl
417 static inline void writesl(volatile void __iomem *addr, const void *buffer,
418 			   unsigned int count)
419 {
420 	if (count) {
421 		const u32 *buf = buffer;
422 
423 		do {
424 			__raw_writel(*buf++, addr);
425 		} while (--count);
426 	}
427 }
428 #endif
429 
430 #ifdef CONFIG_64BIT
431 #ifndef writesq
432 #define writesq writesq
433 static inline void writesq(volatile void __iomem *addr, const void *buffer,
434 			   unsigned int count)
435 {
436 	if (count) {
437 		const u64 *buf = buffer;
438 
439 		do {
440 			__raw_writeq(*buf++, addr);
441 		} while (--count);
442 	}
443 }
444 #endif
445 #endif /* CONFIG_64BIT */
446 
447 #ifndef PCI_IOBASE
448 #define PCI_IOBASE ((void __iomem *)0)
449 #endif
450 
451 #ifndef IO_SPACE_LIMIT
452 #define IO_SPACE_LIMIT 0xffff
453 #endif
454 
455 #include <linux/logic_pio.h>
456 
457 /*
458  * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
459  * implemented on hardware that needs an additional delay for I/O accesses to
460  * take effect.
461  */
462 
463 #ifndef inb
464 #define inb inb
465 static inline u8 inb(unsigned long addr)
466 {
467 	u8 val;
468 
469 	__io_pbr();
470 	val = __raw_readb(PCI_IOBASE + addr);
471 	__io_par(val);
472 	return val;
473 }
474 #endif
475 
476 #ifndef inw
477 #define inw inw
478 static inline u16 inw(unsigned long addr)
479 {
480 	u16 val;
481 
482 	__io_pbr();
483 	val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
484 	__io_par(val);
485 	return val;
486 }
487 #endif
488 
489 #ifndef inl
490 #define inl inl
491 static inline u32 inl(unsigned long addr)
492 {
493 	u32 val;
494 
495 	__io_pbr();
496 	val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
497 	__io_par(val);
498 	return val;
499 }
500 #endif
501 
502 #ifndef outb
503 #define outb outb
504 static inline void outb(u8 value, unsigned long addr)
505 {
506 	__io_pbw();
507 	__raw_writeb(value, PCI_IOBASE + addr);
508 	__io_paw();
509 }
510 #endif
511 
512 #ifndef outw
513 #define outw outw
514 static inline void outw(u16 value, unsigned long addr)
515 {
516 	__io_pbw();
517 	__raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
518 	__io_paw();
519 }
520 #endif
521 
522 #ifndef outl
523 #define outl outl
524 static inline void outl(u32 value, unsigned long addr)
525 {
526 	__io_pbw();
527 	__raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
528 	__io_paw();
529 }
530 #endif
531 
532 #ifndef inb_p
533 #define inb_p inb_p
534 static inline u8 inb_p(unsigned long addr)
535 {
536 	return inb(addr);
537 }
538 #endif
539 
540 #ifndef inw_p
541 #define inw_p inw_p
542 static inline u16 inw_p(unsigned long addr)
543 {
544 	return inw(addr);
545 }
546 #endif
547 
548 #ifndef inl_p
549 #define inl_p inl_p
550 static inline u32 inl_p(unsigned long addr)
551 {
552 	return inl(addr);
553 }
554 #endif
555 
556 #ifndef outb_p
557 #define outb_p outb_p
558 static inline void outb_p(u8 value, unsigned long addr)
559 {
560 	outb(value, addr);
561 }
562 #endif
563 
564 #ifndef outw_p
565 #define outw_p outw_p
566 static inline void outw_p(u16 value, unsigned long addr)
567 {
568 	outw(value, addr);
569 }
570 #endif
571 
572 #ifndef outl_p
573 #define outl_p outl_p
574 static inline void outl_p(u32 value, unsigned long addr)
575 {
576 	outl(value, addr);
577 }
578 #endif
579 
580 /*
581  * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
582  * single I/O port multiple times.
583  */
584 
585 #ifndef insb
586 #define insb insb
587 static inline void insb(unsigned long addr, void *buffer, unsigned int count)
588 {
589 	readsb(PCI_IOBASE + addr, buffer, count);
590 }
591 #endif
592 
593 #ifndef insw
594 #define insw insw
595 static inline void insw(unsigned long addr, void *buffer, unsigned int count)
596 {
597 	readsw(PCI_IOBASE + addr, buffer, count);
598 }
599 #endif
600 
601 #ifndef insl
602 #define insl insl
603 static inline void insl(unsigned long addr, void *buffer, unsigned int count)
604 {
605 	readsl(PCI_IOBASE + addr, buffer, count);
606 }
607 #endif
608 
609 #ifndef outsb
610 #define outsb outsb
611 static inline void outsb(unsigned long addr, const void *buffer,
612 			 unsigned int count)
613 {
614 	writesb(PCI_IOBASE + addr, buffer, count);
615 }
616 #endif
617 
618 #ifndef outsw
619 #define outsw outsw
620 static inline void outsw(unsigned long addr, const void *buffer,
621 			 unsigned int count)
622 {
623 	writesw(PCI_IOBASE + addr, buffer, count);
624 }
625 #endif
626 
627 #ifndef outsl
628 #define outsl outsl
629 static inline void outsl(unsigned long addr, const void *buffer,
630 			 unsigned int count)
631 {
632 	writesl(PCI_IOBASE + addr, buffer, count);
633 }
634 #endif
635 
636 #ifndef insb_p
637 #define insb_p insb_p
638 static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
639 {
640 	insb(addr, buffer, count);
641 }
642 #endif
643 
644 #ifndef insw_p
645 #define insw_p insw_p
646 static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
647 {
648 	insw(addr, buffer, count);
649 }
650 #endif
651 
652 #ifndef insl_p
653 #define insl_p insl_p
654 static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
655 {
656 	insl(addr, buffer, count);
657 }
658 #endif
659 
660 #ifndef outsb_p
661 #define outsb_p outsb_p
662 static inline void outsb_p(unsigned long addr, const void *buffer,
663 			   unsigned int count)
664 {
665 	outsb(addr, buffer, count);
666 }
667 #endif
668 
669 #ifndef outsw_p
670 #define outsw_p outsw_p
671 static inline void outsw_p(unsigned long addr, const void *buffer,
672 			   unsigned int count)
673 {
674 	outsw(addr, buffer, count);
675 }
676 #endif
677 
678 #ifndef outsl_p
679 #define outsl_p outsl_p
680 static inline void outsl_p(unsigned long addr, const void *buffer,
681 			   unsigned int count)
682 {
683 	outsl(addr, buffer, count);
684 }
685 #endif
686 
687 #ifndef CONFIG_GENERIC_IOMAP
688 #ifndef ioread8
689 #define ioread8 ioread8
690 static inline u8 ioread8(const volatile void __iomem *addr)
691 {
692 	return readb(addr);
693 }
694 #endif
695 
696 #ifndef ioread16
697 #define ioread16 ioread16
698 static inline u16 ioread16(const volatile void __iomem *addr)
699 {
700 	return readw(addr);
701 }
702 #endif
703 
704 #ifndef ioread32
705 #define ioread32 ioread32
706 static inline u32 ioread32(const volatile void __iomem *addr)
707 {
708 	return readl(addr);
709 }
710 #endif
711 
712 #ifdef CONFIG_64BIT
713 #ifndef ioread64
714 #define ioread64 ioread64
715 static inline u64 ioread64(const volatile void __iomem *addr)
716 {
717 	return readq(addr);
718 }
719 #endif
720 #endif /* CONFIG_64BIT */
721 
722 #ifndef iowrite8
723 #define iowrite8 iowrite8
724 static inline void iowrite8(u8 value, volatile void __iomem *addr)
725 {
726 	writeb(value, addr);
727 }
728 #endif
729 
730 #ifndef iowrite16
731 #define iowrite16 iowrite16
732 static inline void iowrite16(u16 value, volatile void __iomem *addr)
733 {
734 	writew(value, addr);
735 }
736 #endif
737 
738 #ifndef iowrite32
739 #define iowrite32 iowrite32
740 static inline void iowrite32(u32 value, volatile void __iomem *addr)
741 {
742 	writel(value, addr);
743 }
744 #endif
745 
746 #ifdef CONFIG_64BIT
747 #ifndef iowrite64
748 #define iowrite64 iowrite64
749 static inline void iowrite64(u64 value, volatile void __iomem *addr)
750 {
751 	writeq(value, addr);
752 }
753 #endif
754 #endif /* CONFIG_64BIT */
755 
756 #ifndef ioread16be
757 #define ioread16be ioread16be
758 static inline u16 ioread16be(const volatile void __iomem *addr)
759 {
760 	return swab16(readw(addr));
761 }
762 #endif
763 
764 #ifndef ioread32be
765 #define ioread32be ioread32be
766 static inline u32 ioread32be(const volatile void __iomem *addr)
767 {
768 	return swab32(readl(addr));
769 }
770 #endif
771 
772 #ifdef CONFIG_64BIT
773 #ifndef ioread64be
774 #define ioread64be ioread64be
775 static inline u64 ioread64be(const volatile void __iomem *addr)
776 {
777 	return swab64(readq(addr));
778 }
779 #endif
780 #endif /* CONFIG_64BIT */
781 
782 #ifndef iowrite16be
783 #define iowrite16be iowrite16be
784 static inline void iowrite16be(u16 value, void volatile __iomem *addr)
785 {
786 	writew(swab16(value), addr);
787 }
788 #endif
789 
790 #ifndef iowrite32be
791 #define iowrite32be iowrite32be
792 static inline void iowrite32be(u32 value, volatile void __iomem *addr)
793 {
794 	writel(swab32(value), addr);
795 }
796 #endif
797 
798 #ifdef CONFIG_64BIT
799 #ifndef iowrite64be
800 #define iowrite64be iowrite64be
801 static inline void iowrite64be(u64 value, volatile void __iomem *addr)
802 {
803 	writeq(swab64(value), addr);
804 }
805 #endif
806 #endif /* CONFIG_64BIT */
807 
808 #ifndef ioread8_rep
809 #define ioread8_rep ioread8_rep
810 static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
811 			       unsigned int count)
812 {
813 	readsb(addr, buffer, count);
814 }
815 #endif
816 
817 #ifndef ioread16_rep
818 #define ioread16_rep ioread16_rep
819 static inline void ioread16_rep(const volatile void __iomem *addr,
820 				void *buffer, unsigned int count)
821 {
822 	readsw(addr, buffer, count);
823 }
824 #endif
825 
826 #ifndef ioread32_rep
827 #define ioread32_rep ioread32_rep
828 static inline void ioread32_rep(const volatile void __iomem *addr,
829 				void *buffer, unsigned int count)
830 {
831 	readsl(addr, buffer, count);
832 }
833 #endif
834 
835 #ifdef CONFIG_64BIT
836 #ifndef ioread64_rep
837 #define ioread64_rep ioread64_rep
838 static inline void ioread64_rep(const volatile void __iomem *addr,
839 				void *buffer, unsigned int count)
840 {
841 	readsq(addr, buffer, count);
842 }
843 #endif
844 #endif /* CONFIG_64BIT */
845 
846 #ifndef iowrite8_rep
847 #define iowrite8_rep iowrite8_rep
848 static inline void iowrite8_rep(volatile void __iomem *addr,
849 				const void *buffer,
850 				unsigned int count)
851 {
852 	writesb(addr, buffer, count);
853 }
854 #endif
855 
856 #ifndef iowrite16_rep
857 #define iowrite16_rep iowrite16_rep
858 static inline void iowrite16_rep(volatile void __iomem *addr,
859 				 const void *buffer,
860 				 unsigned int count)
861 {
862 	writesw(addr, buffer, count);
863 }
864 #endif
865 
866 #ifndef iowrite32_rep
867 #define iowrite32_rep iowrite32_rep
868 static inline void iowrite32_rep(volatile void __iomem *addr,
869 				 const void *buffer,
870 				 unsigned int count)
871 {
872 	writesl(addr, buffer, count);
873 }
874 #endif
875 
876 #ifdef CONFIG_64BIT
877 #ifndef iowrite64_rep
878 #define iowrite64_rep iowrite64_rep
879 static inline void iowrite64_rep(volatile void __iomem *addr,
880 				 const void *buffer,
881 				 unsigned int count)
882 {
883 	writesq(addr, buffer, count);
884 }
885 #endif
886 #endif /* CONFIG_64BIT */
887 #endif /* CONFIG_GENERIC_IOMAP */
888 
889 #ifdef __KERNEL__
890 
891 #include <linux/vmalloc.h>
892 #define __io_virt(x) ((void __force *)(x))
893 
894 #ifndef CONFIG_GENERIC_IOMAP
895 struct pci_dev;
896 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
897 
898 #ifndef pci_iounmap
899 #define pci_iounmap pci_iounmap
900 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
901 {
902 }
903 #endif
904 #endif /* CONFIG_GENERIC_IOMAP */
905 
906 /*
907  * Change virtual addresses to physical addresses and vv.
908  * These are pretty trivial
909  */
910 #ifndef virt_to_phys
911 #define virt_to_phys virt_to_phys
912 static inline unsigned long virt_to_phys(volatile void *address)
913 {
914 	return __pa((unsigned long)address);
915 }
916 #endif
917 
918 #ifndef phys_to_virt
919 #define phys_to_virt phys_to_virt
920 static inline void *phys_to_virt(unsigned long address)
921 {
922 	return __va(address);
923 }
924 #endif
925 
926 /**
927  * DOC: ioremap() and ioremap_*() variants
928  *
929  * If you have an IOMMU your architecture is expected to have both ioremap()
930  * and iounmap() implemented otherwise the asm-generic helpers will provide a
931  * direct mapping.
932  *
933  * There are ioremap_*() call variants, if you have no IOMMU we naturally will
934  * default to direct mapping for all of them, you can override these defaults.
935  * If you have an IOMMU you are highly encouraged to provide your own
936  * ioremap variant implementation as there currently is no safe architecture
937  * agnostic default. To avoid possible improper behaviour default asm-generic
938  * ioremap_*() variants all return NULL when an IOMMU is available. If you've
939  * defined your own ioremap_*() variant you must then declare your own
940  * ioremap_*() variant as defined to itself to avoid the default NULL return.
941  */
942 
943 #ifdef CONFIG_MMU
944 
945 #ifndef ioremap_uc
946 #define ioremap_uc ioremap_uc
947 static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
948 {
949 	return NULL;
950 }
951 #endif
952 
953 #else /* !CONFIG_MMU */
954 
955 /*
956  * Change "struct page" to physical address.
957  *
958  * This implementation is for the no-MMU case only... if you have an MMU
959  * you'll need to provide your own definitions.
960  */
961 
962 #ifndef ioremap
963 #define ioremap ioremap
964 static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
965 {
966 	return (void __iomem *)(unsigned long)offset;
967 }
968 #endif
969 
970 #ifndef __ioremap
971 #define __ioremap __ioremap
972 static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
973 				      unsigned long flags)
974 {
975 	return ioremap(offset, size);
976 }
977 #endif
978 
979 #ifndef iounmap
980 #define iounmap iounmap
981 
982 static inline void iounmap(void __iomem *addr)
983 {
984 }
985 #endif
986 #endif /* CONFIG_MMU */
987 #ifndef ioremap_nocache
988 void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
989 #define ioremap_nocache ioremap_nocache
990 static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
991 {
992 	return ioremap(offset, size);
993 }
994 #endif
995 
996 #ifndef ioremap_uc
997 #define ioremap_uc ioremap_uc
998 static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
999 {
1000 	return ioremap_nocache(offset, size);
1001 }
1002 #endif
1003 
1004 #ifndef ioremap_wc
1005 #define ioremap_wc ioremap_wc
1006 static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
1007 {
1008 	return ioremap_nocache(offset, size);
1009 }
1010 #endif
1011 
1012 #ifndef ioremap_wt
1013 #define ioremap_wt ioremap_wt
1014 static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
1015 {
1016 	return ioremap_nocache(offset, size);
1017 }
1018 #endif
1019 
1020 #ifdef CONFIG_HAS_IOPORT_MAP
1021 #ifndef CONFIG_GENERIC_IOMAP
1022 #ifndef ioport_map
1023 #define ioport_map ioport_map
1024 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
1025 {
1026 	port &= IO_SPACE_LIMIT;
1027 	return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
1028 }
1029 #endif
1030 
1031 #ifndef ioport_unmap
1032 #define ioport_unmap ioport_unmap
1033 static inline void ioport_unmap(void __iomem *p)
1034 {
1035 }
1036 #endif
1037 #else /* CONFIG_GENERIC_IOMAP */
1038 extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
1039 extern void ioport_unmap(void __iomem *p);
1040 #endif /* CONFIG_GENERIC_IOMAP */
1041 #endif /* CONFIG_HAS_IOPORT_MAP */
1042 
1043 /*
1044  * Convert a virtual cached pointer to an uncached pointer
1045  */
1046 #ifndef xlate_dev_kmem_ptr
1047 #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
1048 static inline void *xlate_dev_kmem_ptr(void *addr)
1049 {
1050 	return addr;
1051 }
1052 #endif
1053 
1054 #ifndef xlate_dev_mem_ptr
1055 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
1056 static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
1057 {
1058 	return __va(addr);
1059 }
1060 #endif
1061 
1062 #ifndef unxlate_dev_mem_ptr
1063 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
1064 static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
1065 {
1066 }
1067 #endif
1068 
1069 #ifdef CONFIG_VIRT_TO_BUS
1070 #ifndef virt_to_bus
1071 static inline unsigned long virt_to_bus(void *address)
1072 {
1073 	return (unsigned long)address;
1074 }
1075 
1076 static inline void *bus_to_virt(unsigned long address)
1077 {
1078 	return (void *)address;
1079 }
1080 #endif
1081 #endif
1082 
1083 #ifndef memset_io
1084 #define memset_io memset_io
1085 /**
1086  * memset_io	Set a range of I/O memory to a constant value
1087  * @addr:	The beginning of the I/O-memory range to set
1088  * @val:	The value to set the memory to
1089  * @count:	The number of bytes to set
1090  *
1091  * Set a range of I/O memory to a given value.
1092  */
1093 static inline void memset_io(volatile void __iomem *addr, int value,
1094 			     size_t size)
1095 {
1096 	memset(__io_virt(addr), value, size);
1097 }
1098 #endif
1099 
1100 #ifndef memcpy_fromio
1101 #define memcpy_fromio memcpy_fromio
1102 /**
1103  * memcpy_fromio	Copy a block of data from I/O memory
1104  * @dst:		The (RAM) destination for the copy
1105  * @src:		The (I/O memory) source for the data
1106  * @count:		The number of bytes to copy
1107  *
1108  * Copy a block of data from I/O memory.
1109  */
1110 static inline void memcpy_fromio(void *buffer,
1111 				 const volatile void __iomem *addr,
1112 				 size_t size)
1113 {
1114 	memcpy(buffer, __io_virt(addr), size);
1115 }
1116 #endif
1117 
1118 #ifndef memcpy_toio
1119 #define memcpy_toio memcpy_toio
1120 /**
1121  * memcpy_toio		Copy a block of data into I/O memory
1122  * @dst:		The (I/O memory) destination for the copy
1123  * @src:		The (RAM) source for the data
1124  * @count:		The number of bytes to copy
1125  *
1126  * Copy a block of data to I/O memory.
1127  */
1128 static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
1129 			       size_t size)
1130 {
1131 	memcpy(__io_virt(addr), buffer, size);
1132 }
1133 #endif
1134 
1135 #endif /* __KERNEL__ */
1136 
1137 #endif /* __ASM_GENERIC_IO_H */
1138