1 /* 2 * Copyright (C) 2006 Freescale Semiconductor, Inc. 3 * 4 * Dave Liu <daveliu@freescale.com> 5 * based on source code of Shlomi Gridish 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 of 10 * the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 20 * MA 02111-1307 USA 21 */ 22 23 #include "common.h" 24 #include "asm/errno.h" 25 #include "asm/io.h" 26 #include "asm/immap_qe.h" 27 #include "qe.h" 28 29 #if defined(CONFIG_QE) 30 qe_map_t *qe_immr = NULL; 31 static qe_snum_t snums[QE_NUM_OF_SNUM]; 32 33 void qe_issue_cmd(uint cmd, uint sbc, u8 mcn, u32 cmd_data) 34 { 35 u32 cecr; 36 37 if (cmd == QE_RESET) { 38 out_be32(&qe_immr->cp.cecr,(u32) (cmd | QE_CR_FLG)); 39 } else { 40 out_be32(&qe_immr->cp.cecdr, cmd_data); 41 out_be32(&qe_immr->cp.cecr, (sbc | QE_CR_FLG | 42 ((u32) mcn<<QE_CR_PROTOCOL_SHIFT) | cmd)); 43 } 44 /* Wait for the QE_CR_FLG to clear */ 45 do { 46 cecr = in_be32(&qe_immr->cp.cecr); 47 } while (cecr & QE_CR_FLG); 48 49 return; 50 } 51 52 uint qe_muram_alloc(uint size, uint align) 53 { 54 DECLARE_GLOBAL_DATA_PTR; 55 56 uint retloc; 57 uint align_mask, off; 58 uint savebase; 59 60 align_mask = align - 1; 61 savebase = gd->mp_alloc_base; 62 63 if ((off = (gd->mp_alloc_base & align_mask)) != 0) 64 gd->mp_alloc_base += (align - off); 65 66 if ((off = size & align_mask) != 0) 67 size += (align - off); 68 69 if ((gd->mp_alloc_base + size) >= gd->mp_alloc_top) { 70 gd->mp_alloc_base = savebase; 71 printf("%s: ran out of ram.\n", __FUNCTION__); 72 } 73 74 retloc = gd->mp_alloc_base; 75 gd->mp_alloc_base += size; 76 77 memset((void *)&qe_immr->muram[retloc], 0, size); 78 79 __asm__ __volatile__("sync"); 80 81 return retloc; 82 } 83 84 void *qe_muram_addr(uint offset) 85 { 86 return (void *)&qe_immr->muram[offset]; 87 } 88 89 static void qe_sdma_init(void) 90 { 91 volatile sdma_t *p; 92 uint sdma_buffer_base; 93 94 p = (volatile sdma_t *)&qe_immr->sdma; 95 96 /* All of DMA transaction in bus 1 */ 97 out_be32(&p->sdaqr, 0); 98 out_be32(&p->sdaqmr, 0); 99 100 /* Allocate 2KB temporary buffer for sdma */ 101 sdma_buffer_base = qe_muram_alloc(2048, 64); 102 out_be32(&p->sdwbcr, sdma_buffer_base & QE_SDEBCR_BA_MASK); 103 104 /* Clear sdma status */ 105 out_be32(&p->sdsr, 0x03000000); 106 107 /* Enable global mode on bus 1, and 2KB buffer size */ 108 out_be32(&p->sdmr, QE_SDMR_GLB_1_MSK | (0x3 << QE_SDMR_CEN_SHIFT)); 109 } 110 111 static u8 thread_snum[QE_NUM_OF_SNUM] = { 112 0x04, 0x05, 0x0c, 0x0d, 113 0x14, 0x15, 0x1c, 0x1d, 114 0x24, 0x25, 0x2c, 0x2d, 115 0x34, 0x35, 0x88, 0x89, 116 0x98, 0x99, 0xa8, 0xa9, 117 0xb8, 0xb9, 0xc8, 0xc9, 118 0xd8, 0xd9, 0xe8, 0xe9 119 }; 120 121 static void qe_snums_init(void) 122 { 123 int i; 124 125 for (i = 0; i < QE_NUM_OF_SNUM; i++) { 126 snums[i].state = QE_SNUM_STATE_FREE; 127 snums[i].num = thread_snum[i]; 128 } 129 } 130 131 int qe_get_snum(void) 132 { 133 int snum = -EBUSY; 134 int i; 135 136 for (i = 0; i < QE_NUM_OF_SNUM; i++) { 137 if (snums[i].state == QE_SNUM_STATE_FREE) { 138 snums[i].state = QE_SNUM_STATE_USED; 139 snum = snums[i].num; 140 break; 141 } 142 } 143 144 return snum; 145 } 146 147 void qe_put_snum(u8 snum) 148 { 149 int i; 150 151 for (i = 0; i < QE_NUM_OF_SNUM; i++) { 152 if (snums[i].num == snum) { 153 snums[i].state = QE_SNUM_STATE_FREE; 154 break; 155 } 156 } 157 } 158 159 void qe_init(uint qe_base) 160 { 161 DECLARE_GLOBAL_DATA_PTR; 162 163 /* Init the QE IMMR base */ 164 qe_immr = (qe_map_t *)qe_base; 165 166 gd->mp_alloc_base = QE_DATAONLY_BASE; 167 gd->mp_alloc_top = gd->mp_alloc_base + QE_DATAONLY_SIZE; 168 169 qe_sdma_init(); 170 qe_snums_init(); 171 } 172 173 void qe_reset(void) 174 { 175 qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID, 176 (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0); 177 } 178 179 void qe_assign_page(uint snum, uint para_ram_base) 180 { 181 u32 cecr; 182 183 out_be32(&qe_immr->cp.cecdr, para_ram_base); 184 out_be32(&qe_immr->cp.cecr, ((u32) snum<<QE_CR_ASSIGN_PAGE_SNUM_SHIFT) 185 | QE_CR_FLG | QE_ASSIGN_PAGE); 186 187 /* Wait for the QE_CR_FLG to clear */ 188 do { 189 cecr = in_be32(&qe_immr->cp.cecr); 190 } while (cecr & QE_CR_FLG ); 191 192 return; 193 } 194 195 /* 196 * brg: 0~15 as BRG1~BRG16 197 rate: baud rate 198 * BRG input clock comes from the BRGCLK (internal clock generated from 199 the QE clock, it is one-half of the QE clock), If need the clock source 200 from CLKn pin, we have te change the function. 201 */ 202 203 #define BRG_CLK (gd->brg_clk) 204 205 int qe_set_brg(uint brg, uint rate) 206 { 207 DECLARE_GLOBAL_DATA_PTR; 208 volatile uint *bp; 209 u32 divisor; 210 int div16 = 0; 211 212 if (brg >= QE_NUM_OF_BRGS) 213 return -EINVAL; 214 bp = (uint *)&qe_immr->brg.brgc1; 215 bp += brg; 216 217 divisor = (BRG_CLK / rate); 218 if (divisor > QE_BRGC_DIVISOR_MAX + 1) { 219 div16 = 1; 220 divisor /= 16; 221 } 222 223 *bp = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE; 224 __asm__ __volatile__("sync"); 225 226 if (div16) { 227 *bp |= QE_BRGC_DIV16; 228 __asm__ __volatile__("sync"); 229 } 230 231 return 0; 232 } 233 234 /* Set ethernet MII clock master 235 */ 236 int qe_set_mii_clk_src(int ucc_num) 237 { 238 u32 cmxgcr; 239 240 /* check if the UCC number is in range. */ 241 if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) { 242 printf("%s: ucc num not in ranges\n", __FUNCTION__); 243 return -EINVAL; 244 } 245 246 cmxgcr = in_be32(&qe_immr->qmx.cmxgcr); 247 cmxgcr &= ~QE_CMXGCR_MII_ENET_MNG_MASK; 248 cmxgcr |= (ucc_num <<QE_CMXGCR_MII_ENET_MNG_SHIFT); 249 out_be32(&qe_immr->qmx.cmxgcr, cmxgcr); 250 251 return 0; 252 } 253 254 #endif /* CONFIG_QE */ 255