1 /* Generic I/O port emulation, based on MN10300 code 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public Licence 8 * as published by the Free Software Foundation; either version 9 * 2 of the Licence, or (at your option) any later version. 10 */ 11 #ifndef __ASM_GENERIC_IO_H 12 #define __ASM_GENERIC_IO_H 13 14 #include <asm/page.h> /* I/O is all done through memory accesses */ 15 #include <linux/string.h> /* for memset() and memcpy() */ 16 #include <linux/types.h> 17 18 #ifdef CONFIG_GENERIC_IOMAP 19 #include <asm-generic/iomap.h> 20 #endif 21 22 #include <asm-generic/pci_iomap.h> 23 24 #ifndef mmiowb 25 #define mmiowb() do {} while (0) 26 #endif 27 28 /* 29 * __raw_{read,write}{b,w,l,q}() access memory in native endianness. 30 * 31 * On some architectures memory mapped IO needs to be accessed differently. 32 * On the simple architectures, we just read/write the memory location 33 * directly. 34 */ 35 36 #ifndef __raw_readb 37 #define __raw_readb __raw_readb 38 static inline u8 __raw_readb(const volatile void __iomem *addr) 39 { 40 return *(const volatile u8 __force *)addr; 41 } 42 #endif 43 44 #ifndef __raw_readw 45 #define __raw_readw __raw_readw 46 static inline u16 __raw_readw(const volatile void __iomem *addr) 47 { 48 return *(const volatile u16 __force *)addr; 49 } 50 #endif 51 52 #ifndef __raw_readl 53 #define __raw_readl __raw_readl 54 static inline u32 __raw_readl(const volatile void __iomem *addr) 55 { 56 return *(const volatile u32 __force *)addr; 57 } 58 #endif 59 60 #ifdef CONFIG_64BIT 61 #ifndef __raw_readq 62 #define __raw_readq __raw_readq 63 static inline u64 __raw_readq(const volatile void __iomem *addr) 64 { 65 return *(const volatile u64 __force *)addr; 66 } 67 #endif 68 #endif /* CONFIG_64BIT */ 69 70 #ifndef __raw_writeb 71 #define __raw_writeb __raw_writeb 72 static inline void __raw_writeb(u8 value, volatile void __iomem *addr) 73 { 74 *(volatile u8 __force *)addr = value; 75 } 76 #endif 77 78 #ifndef __raw_writew 79 #define __raw_writew __raw_writew 80 static inline void __raw_writew(u16 value, volatile void __iomem *addr) 81 { 82 *(volatile u16 __force *)addr = value; 83 } 84 #endif 85 86 #ifndef __raw_writel 87 #define __raw_writel __raw_writel 88 static inline void __raw_writel(u32 value, volatile void __iomem *addr) 89 { 90 *(volatile u32 __force *)addr = value; 91 } 92 #endif 93 94 #ifdef CONFIG_64BIT 95 #ifndef __raw_writeq 96 #define __raw_writeq __raw_writeq 97 static inline void __raw_writeq(u64 value, volatile void __iomem *addr) 98 { 99 *(volatile u64 __force *)addr = value; 100 } 101 #endif 102 #endif /* CONFIG_64BIT */ 103 104 /* 105 * {read,write}{b,w,l,q}() access little endian memory and return result in 106 * native endianness. 107 */ 108 109 #ifndef readb 110 #define readb readb 111 static inline u8 readb(const volatile void __iomem *addr) 112 { 113 return __raw_readb(addr); 114 } 115 #endif 116 117 #ifndef readw 118 #define readw readw 119 static inline u16 readw(const volatile void __iomem *addr) 120 { 121 return __le16_to_cpu(__raw_readw(addr)); 122 } 123 #endif 124 125 #ifndef readl 126 #define readl readl 127 static inline u32 readl(const volatile void __iomem *addr) 128 { 129 return __le32_to_cpu(__raw_readl(addr)); 130 } 131 #endif 132 133 #ifdef CONFIG_64BIT 134 #ifndef readq 135 #define readq readq 136 static inline u64 readq(const volatile void __iomem *addr) 137 { 138 return __le64_to_cpu(__raw_readq(addr)); 139 } 140 #endif 141 #endif /* CONFIG_64BIT */ 142 143 #ifndef writeb 144 #define writeb writeb 145 static inline void writeb(u8 value, volatile void __iomem *addr) 146 { 147 __raw_writeb(value, addr); 148 } 149 #endif 150 151 #ifndef writew 152 #define writew writew 153 static inline void writew(u16 value, volatile void __iomem *addr) 154 { 155 __raw_writew(cpu_to_le16(value), addr); 156 } 157 #endif 158 159 #ifndef writel 160 #define writel writel 161 static inline void writel(u32 value, volatile void __iomem *addr) 162 { 163 __raw_writel(__cpu_to_le32(value), addr); 164 } 165 #endif 166 167 #ifdef CONFIG_64BIT 168 #ifndef writeq 169 #define writeq writeq 170 static inline void writeq(u64 value, volatile void __iomem *addr) 171 { 172 __raw_writeq(__cpu_to_le64(value), addr); 173 } 174 #endif 175 #endif /* CONFIG_64BIT */ 176 177 /* 178 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but 179 * are not guaranteed to provide ordering against spinlocks or memory 180 * accesses. 181 */ 182 #ifndef readb_relaxed 183 #define readb_relaxed readb 184 #endif 185 186 #ifndef readw_relaxed 187 #define readw_relaxed readw 188 #endif 189 190 #ifndef readl_relaxed 191 #define readl_relaxed readl 192 #endif 193 194 #ifndef readq_relaxed 195 #define readq_relaxed readq 196 #endif 197 198 #ifndef writeb_relaxed 199 #define writeb_relaxed writeb 200 #endif 201 202 #ifndef writew_relaxed 203 #define writew_relaxed writew 204 #endif 205 206 #ifndef writel_relaxed 207 #define writel_relaxed writel 208 #endif 209 210 #ifndef writeq_relaxed 211 #define writeq_relaxed writeq 212 #endif 213 214 /* 215 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in 216 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). 217 */ 218 #ifndef readsb 219 #define readsb readsb 220 static inline void readsb(const volatile void __iomem *addr, void *buffer, 221 unsigned int count) 222 { 223 if (count) { 224 u8 *buf = buffer; 225 226 do { 227 u8 x = __raw_readb(addr); 228 *buf++ = x; 229 } while (--count); 230 } 231 } 232 #endif 233 234 #ifndef readsw 235 #define readsw readsw 236 static inline void readsw(const volatile void __iomem *addr, void *buffer, 237 unsigned int count) 238 { 239 if (count) { 240 u16 *buf = buffer; 241 242 do { 243 u16 x = __raw_readw(addr); 244 *buf++ = x; 245 } while (--count); 246 } 247 } 248 #endif 249 250 #ifndef readsl 251 #define readsl readsl 252 static inline void readsl(const volatile void __iomem *addr, void *buffer, 253 unsigned int count) 254 { 255 if (count) { 256 u32 *buf = buffer; 257 258 do { 259 u32 x = __raw_readl(addr); 260 *buf++ = x; 261 } while (--count); 262 } 263 } 264 #endif 265 266 #ifdef CONFIG_64BIT 267 #ifndef readsq 268 #define readsq readsq 269 static inline void readsq(const volatile void __iomem *addr, void *buffer, 270 unsigned int count) 271 { 272 if (count) { 273 u64 *buf = buffer; 274 275 do { 276 u64 x = __raw_readq(addr); 277 *buf++ = x; 278 } while (--count); 279 } 280 } 281 #endif 282 #endif /* CONFIG_64BIT */ 283 284 #ifndef writesb 285 #define writesb writesb 286 static inline void writesb(volatile void __iomem *addr, const void *buffer, 287 unsigned int count) 288 { 289 if (count) { 290 const u8 *buf = buffer; 291 292 do { 293 __raw_writeb(*buf++, addr); 294 } while (--count); 295 } 296 } 297 #endif 298 299 #ifndef writesw 300 #define writesw writesw 301 static inline void writesw(volatile void __iomem *addr, const void *buffer, 302 unsigned int count) 303 { 304 if (count) { 305 const u16 *buf = buffer; 306 307 do { 308 __raw_writew(*buf++, addr); 309 } while (--count); 310 } 311 } 312 #endif 313 314 #ifndef writesl 315 #define writesl writesl 316 static inline void writesl(volatile void __iomem *addr, const void *buffer, 317 unsigned int count) 318 { 319 if (count) { 320 const u32 *buf = buffer; 321 322 do { 323 __raw_writel(*buf++, addr); 324 } while (--count); 325 } 326 } 327 #endif 328 329 #ifdef CONFIG_64BIT 330 #ifndef writesq 331 #define writesq writesq 332 static inline void writesq(volatile void __iomem *addr, const void *buffer, 333 unsigned int count) 334 { 335 if (count) { 336 const u64 *buf = buffer; 337 338 do { 339 __raw_writeq(*buf++, addr); 340 } while (--count); 341 } 342 } 343 #endif 344 #endif /* CONFIG_64BIT */ 345 346 #ifndef PCI_IOBASE 347 #define PCI_IOBASE ((void __iomem *)0) 348 #endif 349 350 #ifndef IO_SPACE_LIMIT 351 #define IO_SPACE_LIMIT 0xffff 352 #endif 353 354 /* 355 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be 356 * implemented on hardware that needs an additional delay for I/O accesses to 357 * take effect. 358 */ 359 360 #ifndef inb 361 #define inb inb 362 static inline u8 inb(unsigned long addr) 363 { 364 return readb(PCI_IOBASE + addr); 365 } 366 #endif 367 368 #ifndef inw 369 #define inw inw 370 static inline u16 inw(unsigned long addr) 371 { 372 return readw(PCI_IOBASE + addr); 373 } 374 #endif 375 376 #ifndef inl 377 #define inl inl 378 static inline u32 inl(unsigned long addr) 379 { 380 return readl(PCI_IOBASE + addr); 381 } 382 #endif 383 384 #ifndef outb 385 #define outb outb 386 static inline void outb(u8 value, unsigned long addr) 387 { 388 writeb(value, PCI_IOBASE + addr); 389 } 390 #endif 391 392 #ifndef outw 393 #define outw outw 394 static inline void outw(u16 value, unsigned long addr) 395 { 396 writew(value, PCI_IOBASE + addr); 397 } 398 #endif 399 400 #ifndef outl 401 #define outl outl 402 static inline void outl(u32 value, unsigned long addr) 403 { 404 writel(value, PCI_IOBASE + addr); 405 } 406 #endif 407 408 #ifndef inb_p 409 #define inb_p inb_p 410 static inline u8 inb_p(unsigned long addr) 411 { 412 return inb(addr); 413 } 414 #endif 415 416 #ifndef inw_p 417 #define inw_p inw_p 418 static inline u16 inw_p(unsigned long addr) 419 { 420 return inw(addr); 421 } 422 #endif 423 424 #ifndef inl_p 425 #define inl_p inl_p 426 static inline u32 inl_p(unsigned long addr) 427 { 428 return inl(addr); 429 } 430 #endif 431 432 #ifndef outb_p 433 #define outb_p outb_p 434 static inline void outb_p(u8 value, unsigned long addr) 435 { 436 outb(value, addr); 437 } 438 #endif 439 440 #ifndef outw_p 441 #define outw_p outw_p 442 static inline void outw_p(u16 value, unsigned long addr) 443 { 444 outw(value, addr); 445 } 446 #endif 447 448 #ifndef outl_p 449 #define outl_p outl_p 450 static inline void outl_p(u32 value, unsigned long addr) 451 { 452 outl(value, addr); 453 } 454 #endif 455 456 /* 457 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a 458 * single I/O port multiple times. 459 */ 460 461 #ifndef insb 462 #define insb insb 463 static inline void insb(unsigned long addr, void *buffer, unsigned int count) 464 { 465 readsb(PCI_IOBASE + addr, buffer, count); 466 } 467 #endif 468 469 #ifndef insw 470 #define insw insw 471 static inline void insw(unsigned long addr, void *buffer, unsigned int count) 472 { 473 readsw(PCI_IOBASE + addr, buffer, count); 474 } 475 #endif 476 477 #ifndef insl 478 #define insl insl 479 static inline void insl(unsigned long addr, void *buffer, unsigned int count) 480 { 481 readsl(PCI_IOBASE + addr, buffer, count); 482 } 483 #endif 484 485 #ifndef outsb 486 #define outsb outsb 487 static inline void outsb(unsigned long addr, const void *buffer, 488 unsigned int count) 489 { 490 writesb(PCI_IOBASE + addr, buffer, count); 491 } 492 #endif 493 494 #ifndef outsw 495 #define outsw outsw 496 static inline void outsw(unsigned long addr, const void *buffer, 497 unsigned int count) 498 { 499 writesw(PCI_IOBASE + addr, buffer, count); 500 } 501 #endif 502 503 #ifndef outsl 504 #define outsl outsl 505 static inline void outsl(unsigned long addr, const void *buffer, 506 unsigned int count) 507 { 508 writesl(PCI_IOBASE + addr, buffer, count); 509 } 510 #endif 511 512 #ifndef insb_p 513 #define insb_p insb_p 514 static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) 515 { 516 insb(addr, buffer, count); 517 } 518 #endif 519 520 #ifndef insw_p 521 #define insw_p insw_p 522 static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) 523 { 524 insw(addr, buffer, count); 525 } 526 #endif 527 528 #ifndef insl_p 529 #define insl_p insl_p 530 static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) 531 { 532 insl(addr, buffer, count); 533 } 534 #endif 535 536 #ifndef outsb_p 537 #define outsb_p outsb_p 538 static inline void outsb_p(unsigned long addr, const void *buffer, 539 unsigned int count) 540 { 541 outsb(addr, buffer, count); 542 } 543 #endif 544 545 #ifndef outsw_p 546 #define outsw_p outsw_p 547 static inline void outsw_p(unsigned long addr, const void *buffer, 548 unsigned int count) 549 { 550 outsw(addr, buffer, count); 551 } 552 #endif 553 554 #ifndef outsl_p 555 #define outsl_p outsl_p 556 static inline void outsl_p(unsigned long addr, const void *buffer, 557 unsigned int count) 558 { 559 outsl(addr, buffer, count); 560 } 561 #endif 562 563 #ifndef CONFIG_GENERIC_IOMAP 564 #ifndef ioread8 565 #define ioread8 ioread8 566 static inline u8 ioread8(const volatile void __iomem *addr) 567 { 568 return readb(addr); 569 } 570 #endif 571 572 #ifndef ioread16 573 #define ioread16 ioread16 574 static inline u16 ioread16(const volatile void __iomem *addr) 575 { 576 return readw(addr); 577 } 578 #endif 579 580 #ifndef ioread32 581 #define ioread32 ioread32 582 static inline u32 ioread32(const volatile void __iomem *addr) 583 { 584 return readl(addr); 585 } 586 #endif 587 588 #ifndef iowrite8 589 #define iowrite8 iowrite8 590 static inline void iowrite8(u8 value, volatile void __iomem *addr) 591 { 592 writeb(value, addr); 593 } 594 #endif 595 596 #ifndef iowrite16 597 #define iowrite16 iowrite16 598 static inline void iowrite16(u16 value, volatile void __iomem *addr) 599 { 600 writew(value, addr); 601 } 602 #endif 603 604 #ifndef iowrite32 605 #define iowrite32 iowrite32 606 static inline void iowrite32(u32 value, volatile void __iomem *addr) 607 { 608 writel(value, addr); 609 } 610 #endif 611 612 #ifndef ioread16be 613 #define ioread16be ioread16be 614 static inline u16 ioread16be(const volatile void __iomem *addr) 615 { 616 return __be16_to_cpu(__raw_readw(addr)); 617 } 618 #endif 619 620 #ifndef ioread32be 621 #define ioread32be ioread32be 622 static inline u32 ioread32be(const volatile void __iomem *addr) 623 { 624 return __be32_to_cpu(__raw_readl(addr)); 625 } 626 #endif 627 628 #ifndef iowrite16be 629 #define iowrite16be iowrite16be 630 static inline void iowrite16be(u16 value, void volatile __iomem *addr) 631 { 632 __raw_writew(__cpu_to_be16(value), addr); 633 } 634 #endif 635 636 #ifndef iowrite32be 637 #define iowrite32be iowrite32be 638 static inline void iowrite32be(u32 value, volatile void __iomem *addr) 639 { 640 __raw_writel(__cpu_to_be32(value), addr); 641 } 642 #endif 643 644 #ifndef ioread8_rep 645 #define ioread8_rep ioread8_rep 646 static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, 647 unsigned int count) 648 { 649 readsb(addr, buffer, count); 650 } 651 #endif 652 653 #ifndef ioread16_rep 654 #define ioread16_rep ioread16_rep 655 static inline void ioread16_rep(const volatile void __iomem *addr, 656 void *buffer, unsigned int count) 657 { 658 readsw(addr, buffer, count); 659 } 660 #endif 661 662 #ifndef ioread32_rep 663 #define ioread32_rep ioread32_rep 664 static inline void ioread32_rep(const volatile void __iomem *addr, 665 void *buffer, unsigned int count) 666 { 667 readsl(addr, buffer, count); 668 } 669 #endif 670 671 #ifndef iowrite8_rep 672 #define iowrite8_rep iowrite8_rep 673 static inline void iowrite8_rep(volatile void __iomem *addr, 674 const void *buffer, 675 unsigned int count) 676 { 677 writesb(addr, buffer, count); 678 } 679 #endif 680 681 #ifndef iowrite16_rep 682 #define iowrite16_rep iowrite16_rep 683 static inline void iowrite16_rep(volatile void __iomem *addr, 684 const void *buffer, 685 unsigned int count) 686 { 687 writesw(addr, buffer, count); 688 } 689 #endif 690 691 #ifndef iowrite32_rep 692 #define iowrite32_rep iowrite32_rep 693 static inline void iowrite32_rep(volatile void __iomem *addr, 694 const void *buffer, 695 unsigned int count) 696 { 697 writesl(addr, buffer, count); 698 } 699 #endif 700 #endif /* CONFIG_GENERIC_IOMAP */ 701 702 #ifdef __KERNEL__ 703 704 #include <linux/vmalloc.h> 705 #define __io_virt(x) ((void __force *)(x)) 706 707 #ifndef CONFIG_GENERIC_IOMAP 708 struct pci_dev; 709 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 710 711 #ifndef pci_iounmap 712 #define pci_iounmap pci_iounmap 713 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) 714 { 715 } 716 #endif 717 #endif /* CONFIG_GENERIC_IOMAP */ 718 719 /* 720 * Change virtual addresses to physical addresses and vv. 721 * These are pretty trivial 722 */ 723 #ifndef virt_to_phys 724 #define virt_to_phys virt_to_phys 725 static inline unsigned long virt_to_phys(volatile void *address) 726 { 727 return __pa((unsigned long)address); 728 } 729 #endif 730 731 #ifndef phys_to_virt 732 #define phys_to_virt phys_to_virt 733 static inline void *phys_to_virt(unsigned long address) 734 { 735 return __va(address); 736 } 737 #endif 738 739 /* 740 * Change "struct page" to physical address. 741 * 742 * This implementation is for the no-MMU case only... if you have an MMU 743 * you'll need to provide your own definitions. 744 */ 745 746 #ifndef CONFIG_MMU 747 #ifndef ioremap 748 #define ioremap ioremap 749 static inline void __iomem *ioremap(phys_addr_t offset, size_t size) 750 { 751 return (void __iomem *)(unsigned long)offset; 752 } 753 #endif 754 755 #ifndef __ioremap 756 #define __ioremap __ioremap 757 static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, 758 unsigned long flags) 759 { 760 return ioremap(offset, size); 761 } 762 #endif 763 764 #ifndef ioremap_nocache 765 #define ioremap_nocache ioremap_nocache 766 static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) 767 { 768 return ioremap(offset, size); 769 } 770 #endif 771 772 #ifndef ioremap_uc 773 #define ioremap_uc ioremap_uc 774 static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) 775 { 776 return ioremap_nocache(offset, size); 777 } 778 #endif 779 780 #ifndef ioremap_wc 781 #define ioremap_wc ioremap_wc 782 static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) 783 { 784 return ioremap_nocache(offset, size); 785 } 786 #endif 787 788 #ifndef ioremap_wt 789 #define ioremap_wt ioremap_wt 790 static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) 791 { 792 return ioremap_nocache(offset, size); 793 } 794 #endif 795 796 #ifndef iounmap 797 #define iounmap iounmap 798 799 static inline void iounmap(void __iomem *addr) 800 { 801 } 802 #endif 803 #endif /* CONFIG_MMU */ 804 805 #ifdef CONFIG_HAS_IOPORT_MAP 806 #ifndef CONFIG_GENERIC_IOMAP 807 #ifndef ioport_map 808 #define ioport_map ioport_map 809 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 810 { 811 return PCI_IOBASE + (port & IO_SPACE_LIMIT); 812 } 813 #endif 814 815 #ifndef ioport_unmap 816 #define ioport_unmap ioport_unmap 817 static inline void ioport_unmap(void __iomem *p) 818 { 819 } 820 #endif 821 #else /* CONFIG_GENERIC_IOMAP */ 822 extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 823 extern void ioport_unmap(void __iomem *p); 824 #endif /* CONFIG_GENERIC_IOMAP */ 825 #endif /* CONFIG_HAS_IOPORT_MAP */ 826 827 #ifndef xlate_dev_kmem_ptr 828 #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr 829 static inline void *xlate_dev_kmem_ptr(void *addr) 830 { 831 return addr; 832 } 833 #endif 834 835 #ifndef xlate_dev_mem_ptr 836 #define xlate_dev_mem_ptr xlate_dev_mem_ptr 837 static inline void *xlate_dev_mem_ptr(phys_addr_t addr) 838 { 839 return __va(addr); 840 } 841 #endif 842 843 #ifndef unxlate_dev_mem_ptr 844 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr 845 static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 846 { 847 } 848 #endif 849 850 #ifdef CONFIG_VIRT_TO_BUS 851 #ifndef virt_to_bus 852 static inline unsigned long virt_to_bus(void *address) 853 { 854 return (unsigned long)address; 855 } 856 857 static inline void *bus_to_virt(unsigned long address) 858 { 859 return (void *)address; 860 } 861 #endif 862 #endif 863 864 #ifndef memset_io 865 #define memset_io memset_io 866 static inline void memset_io(volatile void __iomem *addr, int value, 867 size_t size) 868 { 869 memset(__io_virt(addr), value, size); 870 } 871 #endif 872 873 #ifndef memcpy_fromio 874 #define memcpy_fromio memcpy_fromio 875 static inline void memcpy_fromio(void *buffer, 876 const volatile void __iomem *addr, 877 size_t size) 878 { 879 memcpy(buffer, __io_virt(addr), size); 880 } 881 #endif 882 883 #ifndef memcpy_toio 884 #define memcpy_toio memcpy_toio 885 static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, 886 size_t size) 887 { 888 memcpy(__io_virt(addr), buffer, size); 889 } 890 #endif 891 892 #endif /* __KERNEL__ */ 893 894 #endif /* __ASM_GENERIC_IO_H */ 895