1*62d6b66eSPaul Mundt /* 2*62d6b66eSPaul Mundt * arch/sh/lib/io.c - SH32 optimized I/O routines 3*62d6b66eSPaul Mundt * 4*62d6b66eSPaul Mundt * Copyright (C) 2000 Stuart Menefy 5*62d6b66eSPaul Mundt * Copyright (C) 2005 Paul Mundt 6*62d6b66eSPaul Mundt * 7*62d6b66eSPaul Mundt * Provide real functions which expand to whatever the header file defined. 8*62d6b66eSPaul Mundt * Also definitions of machine independent IO functions. 9*62d6b66eSPaul Mundt * 10*62d6b66eSPaul Mundt * This file is subject to the terms and conditions of the GNU General Public 11*62d6b66eSPaul Mundt * License. See the file "COPYING" in the main directory of this archive 12*62d6b66eSPaul Mundt * for more details. 13*62d6b66eSPaul Mundt */ 14*62d6b66eSPaul Mundt #include <linux/module.h> 15*62d6b66eSPaul Mundt #include <linux/io.h> 16*62d6b66eSPaul Mundt 17*62d6b66eSPaul Mundt void __raw_readsl(unsigned long addr, void *datap, int len) 18*62d6b66eSPaul Mundt { 19*62d6b66eSPaul Mundt u32 *data; 20*62d6b66eSPaul Mundt 21*62d6b66eSPaul Mundt for (data = datap; (len != 0) && (((u32)data & 0x1f) != 0); len--) 22*62d6b66eSPaul Mundt *data++ = ctrl_inl(addr); 23*62d6b66eSPaul Mundt 24*62d6b66eSPaul Mundt if (likely(len >= (0x20 >> 2))) { 25*62d6b66eSPaul Mundt int tmp2, tmp3, tmp4, tmp5, tmp6; 26*62d6b66eSPaul Mundt 27*62d6b66eSPaul Mundt __asm__ __volatile__( 28*62d6b66eSPaul Mundt "1: \n\t" 29*62d6b66eSPaul Mundt "mov.l @%7, r0 \n\t" 30*62d6b66eSPaul Mundt "mov.l @%7, %2 \n\t" 31*62d6b66eSPaul Mundt #ifdef CONFIG_CPU_SH4 32*62d6b66eSPaul Mundt "movca.l r0, @%0 \n\t" 33*62d6b66eSPaul Mundt #else 34*62d6b66eSPaul Mundt "mov.l r0, @%0 \n\t" 35*62d6b66eSPaul Mundt #endif 36*62d6b66eSPaul Mundt "mov.l @%7, %3 \n\t" 37*62d6b66eSPaul Mundt "mov.l @%7, %4 \n\t" 38*62d6b66eSPaul Mundt "mov.l @%7, %5 \n\t" 39*62d6b66eSPaul Mundt "mov.l @%7, %6 \n\t" 40*62d6b66eSPaul Mundt "mov.l @%7, r7 \n\t" 41*62d6b66eSPaul Mundt "mov.l @%7, r0 \n\t" 42*62d6b66eSPaul Mundt "mov.l %2, @(0x04,%0) \n\t" 43*62d6b66eSPaul Mundt "mov #0x20>>2, %2 \n\t" 44*62d6b66eSPaul Mundt "mov.l %3, @(0x08,%0) \n\t" 45*62d6b66eSPaul Mundt "sub %2, %1 \n\t" 46*62d6b66eSPaul Mundt "mov.l %4, @(0x0c,%0) \n\t" 47*62d6b66eSPaul Mundt "cmp/hi %1, %2 ! T if 32 > len \n\t" 48*62d6b66eSPaul Mundt "mov.l %5, @(0x10,%0) \n\t" 49*62d6b66eSPaul Mundt "mov.l %6, @(0x14,%0) \n\t" 50*62d6b66eSPaul Mundt "mov.l r7, @(0x18,%0) \n\t" 51*62d6b66eSPaul Mundt "mov.l r0, @(0x1c,%0) \n\t" 52*62d6b66eSPaul Mundt "bf.s 1b \n\t" 53*62d6b66eSPaul Mundt " add #0x20, %0 \n\t" 54*62d6b66eSPaul Mundt : "=&r" (data), "=&r" (len), 55*62d6b66eSPaul Mundt "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4), 56*62d6b66eSPaul Mundt "=&r" (tmp5), "=&r" (tmp6) 57*62d6b66eSPaul Mundt : "r"(addr), "0" (data), "1" (len) 58*62d6b66eSPaul Mundt : "r0", "r7", "t", "memory"); 59*62d6b66eSPaul Mundt } 60*62d6b66eSPaul Mundt 61*62d6b66eSPaul Mundt for (; len != 0; len--) 62*62d6b66eSPaul Mundt *data++ = ctrl_inl(addr); 63*62d6b66eSPaul Mundt } 64*62d6b66eSPaul Mundt EXPORT_SYMBOL(__raw_readsl); 65*62d6b66eSPaul Mundt 66*62d6b66eSPaul Mundt void __raw_writesl(unsigned long addr, const void *data, int len) 67*62d6b66eSPaul Mundt { 68*62d6b66eSPaul Mundt if (likely(len != 0)) { 69*62d6b66eSPaul Mundt int tmp1; 70*62d6b66eSPaul Mundt 71*62d6b66eSPaul Mundt __asm__ __volatile__ ( 72*62d6b66eSPaul Mundt "1: \n\t" 73*62d6b66eSPaul Mundt "mov.l @%0+, %1 \n\t" 74*62d6b66eSPaul Mundt "dt %3 \n\t" 75*62d6b66eSPaul Mundt "bf.s 1b \n\t" 76*62d6b66eSPaul Mundt " mov.l %1, @%4 \n\t" 77*62d6b66eSPaul Mundt : "=&r" (data), "=&r" (tmp1) 78*62d6b66eSPaul Mundt : "0" (data), "r" (len), "r"(addr) 79*62d6b66eSPaul Mundt : "t", "memory"); 80*62d6b66eSPaul Mundt } 81*62d6b66eSPaul Mundt } 82*62d6b66eSPaul Mundt EXPORT_SYMBOL(__raw_writesl); 83