1 /* 2 * Freescale SPI controller driver cpm functions. 3 * 4 * Maintainer: Kumar Gala 5 * 6 * Copyright (C) 2006 Polycom, Inc. 7 * Copyright 2010 Freescale Semiconductor, Inc. 8 * 9 * CPM SPI and QE buffer descriptors mode support: 10 * Copyright (c) 2009 MontaVista Software, Inc. 11 * Author: Anton Vorontsov <avorontsov@ru.mvista.com> 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the 15 * Free Software Foundation; either version 2 of the License, or (at your 16 * option) any later version. 17 */ 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/spi/spi.h> 21 #include <linux/fsl_devices.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/of_address.h> 24 #include <asm/cpm.h> 25 #include <asm/qe.h> 26 27 #include "spi-fsl-lib.h" 28 #include "spi-fsl-cpm.h" 29 #include "spi-fsl-spi.h" 30 31 /* CPM1 and CPM2 are mutually exclusive. */ 32 #ifdef CONFIG_CPM1 33 #include <asm/cpm1.h> 34 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0) 35 #else 36 #include <asm/cpm2.h> 37 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0) 38 #endif 39 40 #define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */ 41 #define SPIE_RXB 0x00000100 /* Last char is written to rx buf */ 42 43 /* SPCOM register values */ 44 #define SPCOM_STR (1 << 23) /* Start transmit */ 45 46 #define SPI_PRAM_SIZE 0x100 47 #define SPI_MRBLR ((unsigned int)PAGE_SIZE) 48 49 static void *fsl_dummy_rx; 50 static DEFINE_MUTEX(fsl_dummy_rx_lock); 51 static int fsl_dummy_rx_refcnt; 52 53 void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi) 54 { 55 if (mspi->flags & SPI_QE) { 56 qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock, 57 QE_CR_PROTOCOL_UNSPECIFIED, 0); 58 } else { 59 cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX); 60 if (mspi->flags & SPI_CPM1) { 61 out_be16(&mspi->pram->rbptr, 62 in_be16(&mspi->pram->rbase)); 63 out_be16(&mspi->pram->tbptr, 64 in_be16(&mspi->pram->tbase)); 65 } 66 } 67 } 68 69 static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) 70 { 71 struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; 72 struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; 73 unsigned int xfer_len = min(mspi->count, SPI_MRBLR); 74 unsigned int xfer_ofs; 75 struct fsl_spi_reg *reg_base = mspi->reg_base; 76 77 xfer_ofs = mspi->xfer_in_progress->len - mspi->count; 78 79 if (mspi->rx_dma == mspi->dma_dummy_rx) 80 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); 81 else 82 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); 83 out_be16(&rx_bd->cbd_datlen, 0); 84 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); 85 86 if (mspi->tx_dma == mspi->dma_dummy_tx) 87 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); 88 else 89 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); 90 out_be16(&tx_bd->cbd_datlen, xfer_len); 91 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | 92 BD_SC_LAST); 93 94 /* start transfer */ 95 mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); 96 } 97 98 int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, 99 struct spi_transfer *t, bool is_dma_mapped) 100 { 101 struct device *dev = mspi->dev; 102 struct fsl_spi_reg *reg_base = mspi->reg_base; 103 104 if (is_dma_mapped) { 105 mspi->map_tx_dma = 0; 106 mspi->map_rx_dma = 0; 107 } else { 108 mspi->map_tx_dma = 1; 109 mspi->map_rx_dma = 1; 110 } 111 112 if (!t->tx_buf) { 113 mspi->tx_dma = mspi->dma_dummy_tx; 114 mspi->map_tx_dma = 0; 115 } 116 117 if (!t->rx_buf) { 118 mspi->rx_dma = mspi->dma_dummy_rx; 119 mspi->map_rx_dma = 0; 120 } 121 122 if (mspi->map_tx_dma) { 123 void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */ 124 125 mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len, 126 DMA_TO_DEVICE); 127 if (dma_mapping_error(dev, mspi->tx_dma)) { 128 dev_err(dev, "unable to map tx dma\n"); 129 return -ENOMEM; 130 } 131 } else if (t->tx_buf) { 132 mspi->tx_dma = t->tx_dma; 133 } 134 135 if (mspi->map_rx_dma) { 136 mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len, 137 DMA_FROM_DEVICE); 138 if (dma_mapping_error(dev, mspi->rx_dma)) { 139 dev_err(dev, "unable to map rx dma\n"); 140 goto err_rx_dma; 141 } 142 } else if (t->rx_buf) { 143 mspi->rx_dma = t->rx_dma; 144 } 145 146 /* enable rx ints */ 147 mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB); 148 149 mspi->xfer_in_progress = t; 150 mspi->count = t->len; 151 152 /* start CPM transfers */ 153 fsl_spi_cpm_bufs_start(mspi); 154 155 return 0; 156 157 err_rx_dma: 158 if (mspi->map_tx_dma) 159 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); 160 return -ENOMEM; 161 } 162 163 void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) 164 { 165 struct device *dev = mspi->dev; 166 struct spi_transfer *t = mspi->xfer_in_progress; 167 168 if (mspi->map_tx_dma) 169 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); 170 if (mspi->map_rx_dma) 171 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); 172 mspi->xfer_in_progress = NULL; 173 } 174 175 void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) 176 { 177 u16 len; 178 struct fsl_spi_reg *reg_base = mspi->reg_base; 179 180 dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, 181 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); 182 183 len = in_be16(&mspi->rx_bd->cbd_datlen); 184 if (len > mspi->count) { 185 WARN_ON(1); 186 len = mspi->count; 187 } 188 189 /* Clear the events */ 190 mpc8xxx_spi_write_reg(®_base->event, events); 191 192 mspi->count -= len; 193 if (mspi->count) 194 fsl_spi_cpm_bufs_start(mspi); 195 else 196 complete(&mspi->done); 197 } 198 199 static void *fsl_spi_alloc_dummy_rx(void) 200 { 201 mutex_lock(&fsl_dummy_rx_lock); 202 203 if (!fsl_dummy_rx) 204 fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); 205 if (fsl_dummy_rx) 206 fsl_dummy_rx_refcnt++; 207 208 mutex_unlock(&fsl_dummy_rx_lock); 209 210 return fsl_dummy_rx; 211 } 212 213 static void fsl_spi_free_dummy_rx(void) 214 { 215 mutex_lock(&fsl_dummy_rx_lock); 216 217 switch (fsl_dummy_rx_refcnt) { 218 case 0: 219 WARN_ON(1); 220 break; 221 case 1: 222 kfree(fsl_dummy_rx); 223 fsl_dummy_rx = NULL; 224 /* fall through */ 225 default: 226 fsl_dummy_rx_refcnt--; 227 break; 228 } 229 230 mutex_unlock(&fsl_dummy_rx_lock); 231 } 232 233 static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) 234 { 235 struct device *dev = mspi->dev; 236 struct device_node *np = dev->of_node; 237 const u32 *iprop; 238 int size; 239 void __iomem *spi_base; 240 unsigned long pram_ofs = -ENOMEM; 241 242 /* Can't use of_address_to_resource(), QE muram isn't at 0. */ 243 iprop = of_get_property(np, "reg", &size); 244 245 /* QE with a fixed pram location? */ 246 if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4) 247 return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE); 248 249 /* QE but with a dynamic pram location? */ 250 if (mspi->flags & SPI_QE) { 251 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); 252 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock, 253 QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs); 254 return pram_ofs; 255 } 256 257 spi_base = of_iomap(np, 1); 258 if (spi_base == NULL) 259 return -EINVAL; 260 261 if (mspi->flags & SPI_CPM2) { 262 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); 263 out_be16(spi_base, pram_ofs); 264 } else { 265 struct spi_pram __iomem *pram = spi_base; 266 u16 rpbase = in_be16(&pram->rpbase); 267 268 /* Microcode relocation patch applied? */ 269 if (rpbase) { 270 pram_ofs = rpbase; 271 } else { 272 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); 273 out_be16(spi_base, pram_ofs); 274 } 275 } 276 277 iounmap(spi_base); 278 return pram_ofs; 279 } 280 281 int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) 282 { 283 struct device *dev = mspi->dev; 284 struct device_node *np = dev->of_node; 285 const u32 *iprop; 286 int size; 287 unsigned long pram_ofs; 288 unsigned long bds_ofs; 289 290 if (!(mspi->flags & SPI_CPM_MODE)) 291 return 0; 292 293 if (!fsl_spi_alloc_dummy_rx()) 294 return -ENOMEM; 295 296 if (mspi->flags & SPI_QE) { 297 iprop = of_get_property(np, "cell-index", &size); 298 if (iprop && size == sizeof(*iprop)) 299 mspi->subblock = *iprop; 300 301 switch (mspi->subblock) { 302 default: 303 dev_warn(dev, "cell-index unspecified, assuming SPI1\n"); 304 /* fall through */ 305 case 0: 306 mspi->subblock = QE_CR_SUBBLOCK_SPI1; 307 break; 308 case 1: 309 mspi->subblock = QE_CR_SUBBLOCK_SPI2; 310 break; 311 } 312 } 313 314 pram_ofs = fsl_spi_cpm_get_pram(mspi); 315 if (IS_ERR_VALUE(pram_ofs)) { 316 dev_err(dev, "can't allocate spi parameter ram\n"); 317 goto err_pram; 318 } 319 320 bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) + 321 sizeof(*mspi->rx_bd), 8); 322 if (IS_ERR_VALUE(bds_ofs)) { 323 dev_err(dev, "can't allocate bds\n"); 324 goto err_bds; 325 } 326 327 mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE, 328 DMA_TO_DEVICE); 329 if (dma_mapping_error(dev, mspi->dma_dummy_tx)) { 330 dev_err(dev, "unable to map dummy tx buffer\n"); 331 goto err_dummy_tx; 332 } 333 334 mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, 335 DMA_FROM_DEVICE); 336 if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { 337 dev_err(dev, "unable to map dummy rx buffer\n"); 338 goto err_dummy_rx; 339 } 340 341 mspi->pram = cpm_muram_addr(pram_ofs); 342 343 mspi->tx_bd = cpm_muram_addr(bds_ofs); 344 mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); 345 346 /* Initialize parameter ram. */ 347 out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd)); 348 out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd)); 349 out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL); 350 out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL); 351 out_be16(&mspi->pram->mrblr, SPI_MRBLR); 352 out_be32(&mspi->pram->rstate, 0); 353 out_be32(&mspi->pram->rdp, 0); 354 out_be16(&mspi->pram->rbptr, 0); 355 out_be16(&mspi->pram->rbc, 0); 356 out_be32(&mspi->pram->rxtmp, 0); 357 out_be32(&mspi->pram->tstate, 0); 358 out_be32(&mspi->pram->tdp, 0); 359 out_be16(&mspi->pram->tbptr, 0); 360 out_be16(&mspi->pram->tbc, 0); 361 out_be32(&mspi->pram->txtmp, 0); 362 363 return 0; 364 365 err_dummy_rx: 366 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); 367 err_dummy_tx: 368 cpm_muram_free(bds_ofs); 369 err_bds: 370 cpm_muram_free(pram_ofs); 371 err_pram: 372 fsl_spi_free_dummy_rx(); 373 return -ENOMEM; 374 } 375 376 void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) 377 { 378 struct device *dev = mspi->dev; 379 380 if (!(mspi->flags & SPI_CPM_MODE)) 381 return; 382 383 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); 384 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); 385 cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); 386 cpm_muram_free(cpm_muram_offset(mspi->pram)); 387 fsl_spi_free_dummy_rx(); 388 } 389