1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Alpha IO and memory functions.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/string.h>
9 #include <linux/module.h>
10 #include <asm/io.h>
11
12 /* Out-of-line versions of the i/o routines that redirect into the
13 platform-specific version. Note that "platform-specific" may mean
14 "generic", which bumps through the machine vector. */
15
16 unsigned int
ioread8(const void __iomem * addr)17 ioread8(const void __iomem *addr)
18 {
19 unsigned int ret;
20 mb();
21 ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
22 mb();
23 return ret;
24 }
25
ioread16(const void __iomem * addr)26 unsigned int ioread16(const void __iomem *addr)
27 {
28 unsigned int ret;
29 mb();
30 ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
31 mb();
32 return ret;
33 }
34
ioread32(const void __iomem * addr)35 unsigned int ioread32(const void __iomem *addr)
36 {
37 unsigned int ret;
38 mb();
39 ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
40 mb();
41 return ret;
42 }
43
ioread64(const void __iomem * addr)44 u64 ioread64(const void __iomem *addr)
45 {
46 unsigned int ret;
47 mb();
48 ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr);
49 mb();
50 return ret;
51 }
52
iowrite8(u8 b,void __iomem * addr)53 void iowrite8(u8 b, void __iomem *addr)
54 {
55 mb();
56 IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
57 }
58
iowrite16(u16 b,void __iomem * addr)59 void iowrite16(u16 b, void __iomem *addr)
60 {
61 mb();
62 IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
63 }
64
iowrite32(u32 b,void __iomem * addr)65 void iowrite32(u32 b, void __iomem *addr)
66 {
67 mb();
68 IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
69 }
70
iowrite64(u64 b,void __iomem * addr)71 void iowrite64(u64 b, void __iomem *addr)
72 {
73 mb();
74 IO_CONCAT(__IO_PREFIX,iowrite64)(b, addr);
75 }
76
77 EXPORT_SYMBOL(ioread8);
78 EXPORT_SYMBOL(ioread16);
79 EXPORT_SYMBOL(ioread32);
80 EXPORT_SYMBOL(ioread64);
81 EXPORT_SYMBOL(iowrite8);
82 EXPORT_SYMBOL(iowrite16);
83 EXPORT_SYMBOL(iowrite32);
84 EXPORT_SYMBOL(iowrite64);
85
inb(unsigned long port)86 u8 inb(unsigned long port)
87 {
88 return ioread8(ioport_map(port, 1));
89 }
90
inw(unsigned long port)91 u16 inw(unsigned long port)
92 {
93 return ioread16(ioport_map(port, 2));
94 }
95
inl(unsigned long port)96 u32 inl(unsigned long port)
97 {
98 return ioread32(ioport_map(port, 4));
99 }
100
outb(u8 b,unsigned long port)101 void outb(u8 b, unsigned long port)
102 {
103 iowrite8(b, ioport_map(port, 1));
104 }
105
outw(u16 b,unsigned long port)106 void outw(u16 b, unsigned long port)
107 {
108 iowrite16(b, ioport_map(port, 2));
109 }
110
outl(u32 b,unsigned long port)111 void outl(u32 b, unsigned long port)
112 {
113 iowrite32(b, ioport_map(port, 4));
114 }
115
116 EXPORT_SYMBOL(inb);
117 EXPORT_SYMBOL(inw);
118 EXPORT_SYMBOL(inl);
119 EXPORT_SYMBOL(outb);
120 EXPORT_SYMBOL(outw);
121 EXPORT_SYMBOL(outl);
122
__raw_readb(const volatile void __iomem * addr)123 u8 __raw_readb(const volatile void __iomem *addr)
124 {
125 return IO_CONCAT(__IO_PREFIX,readb)(addr);
126 }
127
__raw_readw(const volatile void __iomem * addr)128 u16 __raw_readw(const volatile void __iomem *addr)
129 {
130 return IO_CONCAT(__IO_PREFIX,readw)(addr);
131 }
132
__raw_readl(const volatile void __iomem * addr)133 u32 __raw_readl(const volatile void __iomem *addr)
134 {
135 return IO_CONCAT(__IO_PREFIX,readl)(addr);
136 }
137
__raw_readq(const volatile void __iomem * addr)138 u64 __raw_readq(const volatile void __iomem *addr)
139 {
140 return IO_CONCAT(__IO_PREFIX,readq)(addr);
141 }
142
__raw_writeb(u8 b,volatile void __iomem * addr)143 void __raw_writeb(u8 b, volatile void __iomem *addr)
144 {
145 IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
146 }
147
__raw_writew(u16 b,volatile void __iomem * addr)148 void __raw_writew(u16 b, volatile void __iomem *addr)
149 {
150 IO_CONCAT(__IO_PREFIX,writew)(b, addr);
151 }
152
__raw_writel(u32 b,volatile void __iomem * addr)153 void __raw_writel(u32 b, volatile void __iomem *addr)
154 {
155 IO_CONCAT(__IO_PREFIX,writel)(b, addr);
156 }
157
__raw_writeq(u64 b,volatile void __iomem * addr)158 void __raw_writeq(u64 b, volatile void __iomem *addr)
159 {
160 IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
161 }
162
163 EXPORT_SYMBOL(__raw_readb);
164 EXPORT_SYMBOL(__raw_readw);
165 EXPORT_SYMBOL(__raw_readl);
166 EXPORT_SYMBOL(__raw_readq);
167 EXPORT_SYMBOL(__raw_writeb);
168 EXPORT_SYMBOL(__raw_writew);
169 EXPORT_SYMBOL(__raw_writel);
170 EXPORT_SYMBOL(__raw_writeq);
171
readb(const volatile void __iomem * addr)172 u8 readb(const volatile void __iomem *addr)
173 {
174 u8 ret;
175 mb();
176 ret = __raw_readb(addr);
177 mb();
178 return ret;
179 }
180
readw(const volatile void __iomem * addr)181 u16 readw(const volatile void __iomem *addr)
182 {
183 u16 ret;
184 mb();
185 ret = __raw_readw(addr);
186 mb();
187 return ret;
188 }
189
readl(const volatile void __iomem * addr)190 u32 readl(const volatile void __iomem *addr)
191 {
192 u32 ret;
193 mb();
194 ret = __raw_readl(addr);
195 mb();
196 return ret;
197 }
198
readq(const volatile void __iomem * addr)199 u64 readq(const volatile void __iomem *addr)
200 {
201 u64 ret;
202 mb();
203 ret = __raw_readq(addr);
204 mb();
205 return ret;
206 }
207
writeb(u8 b,volatile void __iomem * addr)208 void writeb(u8 b, volatile void __iomem *addr)
209 {
210 mb();
211 __raw_writeb(b, addr);
212 }
213
writew(u16 b,volatile void __iomem * addr)214 void writew(u16 b, volatile void __iomem *addr)
215 {
216 mb();
217 __raw_writew(b, addr);
218 }
219
writel(u32 b,volatile void __iomem * addr)220 void writel(u32 b, volatile void __iomem *addr)
221 {
222 mb();
223 __raw_writel(b, addr);
224 }
225
writeq(u64 b,volatile void __iomem * addr)226 void writeq(u64 b, volatile void __iomem *addr)
227 {
228 mb();
229 __raw_writeq(b, addr);
230 }
231
232 EXPORT_SYMBOL(readb);
233 EXPORT_SYMBOL(readw);
234 EXPORT_SYMBOL(readl);
235 EXPORT_SYMBOL(readq);
236 EXPORT_SYMBOL(writeb);
237 EXPORT_SYMBOL(writew);
238 EXPORT_SYMBOL(writel);
239 EXPORT_SYMBOL(writeq);
240
241 /*
242 * The _relaxed functions must be ordered w.r.t. each other, but they don't
243 * have to be ordered w.r.t. other memory accesses.
244 */
readb_relaxed(const volatile void __iomem * addr)245 u8 readb_relaxed(const volatile void __iomem *addr)
246 {
247 mb();
248 return __raw_readb(addr);
249 }
250
readw_relaxed(const volatile void __iomem * addr)251 u16 readw_relaxed(const volatile void __iomem *addr)
252 {
253 mb();
254 return __raw_readw(addr);
255 }
256
readl_relaxed(const volatile void __iomem * addr)257 u32 readl_relaxed(const volatile void __iomem *addr)
258 {
259 mb();
260 return __raw_readl(addr);
261 }
262
readq_relaxed(const volatile void __iomem * addr)263 u64 readq_relaxed(const volatile void __iomem *addr)
264 {
265 mb();
266 return __raw_readq(addr);
267 }
268
269 EXPORT_SYMBOL(readb_relaxed);
270 EXPORT_SYMBOL(readw_relaxed);
271 EXPORT_SYMBOL(readl_relaxed);
272 EXPORT_SYMBOL(readq_relaxed);
273
274 /*
275 * Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
276 */
ioread8_rep(const void __iomem * port,void * dst,unsigned long count)277 void ioread8_rep(const void __iomem *port, void *dst, unsigned long count)
278 {
279 while ((unsigned long)dst & 0x3) {
280 if (!count)
281 return;
282 count--;
283 *(unsigned char *)dst = ioread8(port);
284 dst += 1;
285 }
286
287 while (count >= 4) {
288 unsigned int w;
289 count -= 4;
290 w = ioread8(port);
291 w |= ioread8(port) << 8;
292 w |= ioread8(port) << 16;
293 w |= ioread8(port) << 24;
294 *(unsigned int *)dst = w;
295 dst += 4;
296 }
297
298 while (count) {
299 --count;
300 *(unsigned char *)dst = ioread8(port);
301 dst += 1;
302 }
303 }
304
insb(unsigned long port,void * dst,unsigned long count)305 void insb(unsigned long port, void *dst, unsigned long count)
306 {
307 ioread8_rep(ioport_map(port, 1), dst, count);
308 }
309
310 EXPORT_SYMBOL(ioread8_rep);
311 EXPORT_SYMBOL(insb);
312
313 /*
314 * Read COUNT 16-bit words from port PORT into memory starting at
315 * SRC. SRC must be at least short aligned. This is used by the
316 * IDE driver to read disk sectors. Performance is important, but
317 * the interfaces seems to be slow: just using the inlined version
318 * of the inw() breaks things.
319 */
ioread16_rep(const void __iomem * port,void * dst,unsigned long count)320 void ioread16_rep(const void __iomem *port, void *dst, unsigned long count)
321 {
322 if (unlikely((unsigned long)dst & 0x3)) {
323 if (!count)
324 return;
325 BUG_ON((unsigned long)dst & 0x1);
326 count--;
327 *(unsigned short *)dst = ioread16(port);
328 dst += 2;
329 }
330
331 while (count >= 2) {
332 unsigned int w;
333 count -= 2;
334 w = ioread16(port);
335 w |= ioread16(port) << 16;
336 *(unsigned int *)dst = w;
337 dst += 4;
338 }
339
340 if (count) {
341 *(unsigned short*)dst = ioread16(port);
342 }
343 }
344
insw(unsigned long port,void * dst,unsigned long count)345 void insw(unsigned long port, void *dst, unsigned long count)
346 {
347 ioread16_rep(ioport_map(port, 2), dst, count);
348 }
349
350 EXPORT_SYMBOL(ioread16_rep);
351 EXPORT_SYMBOL(insw);
352
353
354 /*
355 * Read COUNT 32-bit words from port PORT into memory starting at
356 * SRC. Now works with any alignment in SRC. Performance is important,
357 * but the interfaces seems to be slow: just using the inlined version
358 * of the inl() breaks things.
359 */
ioread32_rep(const void __iomem * port,void * dst,unsigned long count)360 void ioread32_rep(const void __iomem *port, void *dst, unsigned long count)
361 {
362 if (unlikely((unsigned long)dst & 0x3)) {
363 while (count--) {
364 struct S { int x __attribute__((packed)); };
365 ((struct S *)dst)->x = ioread32(port);
366 dst += 4;
367 }
368 } else {
369 /* Buffer 32-bit aligned. */
370 while (count--) {
371 *(unsigned int *)dst = ioread32(port);
372 dst += 4;
373 }
374 }
375 }
376
insl(unsigned long port,void * dst,unsigned long count)377 void insl(unsigned long port, void *dst, unsigned long count)
378 {
379 ioread32_rep(ioport_map(port, 4), dst, count);
380 }
381
382 EXPORT_SYMBOL(ioread32_rep);
383 EXPORT_SYMBOL(insl);
384
385
386 /*
387 * Like insb but in the opposite direction.
388 * Don't worry as much about doing aligned memory transfers:
389 * doing byte reads the "slow" way isn't nearly as slow as
390 * doing byte writes the slow way (no r-m-w cycle).
391 */
iowrite8_rep(void __iomem * port,const void * xsrc,unsigned long count)392 void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count)
393 {
394 const unsigned char *src = xsrc;
395 while (count--)
396 iowrite8(*src++, port);
397 }
398
outsb(unsigned long port,const void * src,unsigned long count)399 void outsb(unsigned long port, const void *src, unsigned long count)
400 {
401 iowrite8_rep(ioport_map(port, 1), src, count);
402 }
403
404 EXPORT_SYMBOL(iowrite8_rep);
405 EXPORT_SYMBOL(outsb);
406
407
408 /*
409 * Like insw but in the opposite direction. This is used by the IDE
410 * driver to write disk sectors. Performance is important, but the
411 * interfaces seems to be slow: just using the inlined version of the
412 * outw() breaks things.
413 */
iowrite16_rep(void __iomem * port,const void * src,unsigned long count)414 void iowrite16_rep(void __iomem *port, const void *src, unsigned long count)
415 {
416 if (unlikely((unsigned long)src & 0x3)) {
417 if (!count)
418 return;
419 BUG_ON((unsigned long)src & 0x1);
420 iowrite16(*(unsigned short *)src, port);
421 src += 2;
422 --count;
423 }
424
425 while (count >= 2) {
426 unsigned int w;
427 count -= 2;
428 w = *(unsigned int *)src;
429 src += 4;
430 iowrite16(w >> 0, port);
431 iowrite16(w >> 16, port);
432 }
433
434 if (count) {
435 iowrite16(*(unsigned short *)src, port);
436 }
437 }
438
outsw(unsigned long port,const void * src,unsigned long count)439 void outsw(unsigned long port, const void *src, unsigned long count)
440 {
441 iowrite16_rep(ioport_map(port, 2), src, count);
442 }
443
444 EXPORT_SYMBOL(iowrite16_rep);
445 EXPORT_SYMBOL(outsw);
446
447
448 /*
449 * Like insl but in the opposite direction. This is used by the IDE
450 * driver to write disk sectors. Works with any alignment in SRC.
451 * Performance is important, but the interfaces seems to be slow:
452 * just using the inlined version of the outl() breaks things.
453 */
iowrite32_rep(void __iomem * port,const void * src,unsigned long count)454 void iowrite32_rep(void __iomem *port, const void *src, unsigned long count)
455 {
456 if (unlikely((unsigned long)src & 0x3)) {
457 while (count--) {
458 struct S { int x __attribute__((packed)); };
459 iowrite32(((struct S *)src)->x, port);
460 src += 4;
461 }
462 } else {
463 /* Buffer 32-bit aligned. */
464 while (count--) {
465 iowrite32(*(unsigned int *)src, port);
466 src += 4;
467 }
468 }
469 }
470
outsl(unsigned long port,const void * src,unsigned long count)471 void outsl(unsigned long port, const void *src, unsigned long count)
472 {
473 iowrite32_rep(ioport_map(port, 4), src, count);
474 }
475
476 EXPORT_SYMBOL(iowrite32_rep);
477 EXPORT_SYMBOL(outsl);
478
479
480 /*
481 * Copy data from IO memory space to "real" memory space.
482 * This needs to be optimized.
483 */
memcpy_fromio(void * to,const volatile void __iomem * from,long count)484 void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
485 {
486 /* Optimize co-aligned transfers. Everything else gets handled
487 a byte at a time. */
488
489 if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
490 count -= 8;
491 do {
492 *(u64 *)to = __raw_readq(from);
493 count -= 8;
494 to += 8;
495 from += 8;
496 } while (count >= 0);
497 count += 8;
498 }
499
500 if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
501 count -= 4;
502 do {
503 *(u32 *)to = __raw_readl(from);
504 count -= 4;
505 to += 4;
506 from += 4;
507 } while (count >= 0);
508 count += 4;
509 }
510
511 if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
512 count -= 2;
513 do {
514 *(u16 *)to = __raw_readw(from);
515 count -= 2;
516 to += 2;
517 from += 2;
518 } while (count >= 0);
519 count += 2;
520 }
521
522 while (count > 0) {
523 *(u8 *) to = __raw_readb(from);
524 count--;
525 to++;
526 from++;
527 }
528 mb();
529 }
530
531 EXPORT_SYMBOL(memcpy_fromio);
532
533
534 /*
535 * Copy data from "real" memory space to IO memory space.
536 * This needs to be optimized.
537 */
memcpy_toio(volatile void __iomem * to,const void * from,long count)538 void memcpy_toio(volatile void __iomem *to, const void *from, long count)
539 {
540 /* Optimize co-aligned transfers. Everything else gets handled
541 a byte at a time. */
542 /* FIXME -- align FROM. */
543
544 if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
545 count -= 8;
546 do {
547 __raw_writeq(*(const u64 *)from, to);
548 count -= 8;
549 to += 8;
550 from += 8;
551 } while (count >= 0);
552 count += 8;
553 }
554
555 if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
556 count -= 4;
557 do {
558 __raw_writel(*(const u32 *)from, to);
559 count -= 4;
560 to += 4;
561 from += 4;
562 } while (count >= 0);
563 count += 4;
564 }
565
566 if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
567 count -= 2;
568 do {
569 __raw_writew(*(const u16 *)from, to);
570 count -= 2;
571 to += 2;
572 from += 2;
573 } while (count >= 0);
574 count += 2;
575 }
576
577 while (count > 0) {
578 __raw_writeb(*(const u8 *) from, to);
579 count--;
580 to++;
581 from++;
582 }
583 mb();
584 }
585
586 EXPORT_SYMBOL(memcpy_toio);
587
588
589 /*
590 * "memset" on IO memory space.
591 */
_memset_c_io(volatile void __iomem * to,unsigned long c,long count)592 void _memset_c_io(volatile void __iomem *to, unsigned long c, long count)
593 {
594 /* Handle any initial odd byte */
595 if (count > 0 && ((u64)to & 1)) {
596 __raw_writeb(c, to);
597 to++;
598 count--;
599 }
600
601 /* Handle any initial odd halfword */
602 if (count >= 2 && ((u64)to & 2)) {
603 __raw_writew(c, to);
604 to += 2;
605 count -= 2;
606 }
607
608 /* Handle any initial odd word */
609 if (count >= 4 && ((u64)to & 4)) {
610 __raw_writel(c, to);
611 to += 4;
612 count -= 4;
613 }
614
615 /* Handle all full-sized quadwords: we're aligned
616 (or have a small count) */
617 count -= 8;
618 if (count >= 0) {
619 do {
620 __raw_writeq(c, to);
621 to += 8;
622 count -= 8;
623 } while (count >= 0);
624 }
625 count += 8;
626
627 /* The tail is word-aligned if we still have count >= 4 */
628 if (count >= 4) {
629 __raw_writel(c, to);
630 to += 4;
631 count -= 4;
632 }
633
634 /* The tail is half-word aligned if we have count >= 2 */
635 if (count >= 2) {
636 __raw_writew(c, to);
637 to += 2;
638 count -= 2;
639 }
640
641 /* And finally, one last byte.. */
642 if (count) {
643 __raw_writeb(c, to);
644 }
645 mb();
646 }
647
648 EXPORT_SYMBOL(_memset_c_io);
649
650 /* A version of memcpy used by the vga console routines to move data around
651 arbitrarily between screen and main memory. */
652
653 void
scr_memcpyw(u16 * d,const u16 * s,unsigned int count)654 scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
655 {
656 const u16 __iomem *ios = (const u16 __iomem *) s;
657 u16 __iomem *iod = (u16 __iomem *) d;
658 int s_isio = __is_ioaddr(s);
659 int d_isio = __is_ioaddr(d);
660
661 if (s_isio) {
662 if (d_isio) {
663 /* FIXME: Should handle unaligned ops and
664 operation widening. */
665
666 count /= 2;
667 while (count--) {
668 u16 tmp = __raw_readw(ios++);
669 __raw_writew(tmp, iod++);
670 }
671 }
672 else
673 memcpy_fromio(d, ios, count);
674 } else {
675 if (d_isio)
676 memcpy_toio(iod, s, count);
677 else
678 memcpy(d, s, count);
679 }
680 }
681
682 EXPORT_SYMBOL(scr_memcpyw);
683
ioport_map(unsigned long port,unsigned int size)684 void __iomem *ioport_map(unsigned long port, unsigned int size)
685 {
686 return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
687 }
688
ioport_unmap(void __iomem * addr)689 void ioport_unmap(void __iomem *addr)
690 {
691 }
692
693 EXPORT_SYMBOL(ioport_map);
694 EXPORT_SYMBOL(ioport_unmap);
695