1 /* 2 * Freescale SPI controller driver. 3 * 4 * Maintainer: Kumar Gala 5 * 6 * Copyright (C) 2006 Polycom, Inc. 7 * Copyright 2010 Freescale Semiconductor, Inc. 8 * 9 * CPM SPI and QE buffer descriptors mode support: 10 * Copyright (c) 2009 MontaVista Software, Inc. 11 * Author: Anton Vorontsov <avorontsov@ru.mvista.com> 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the 15 * Free Software Foundation; either version 2 of the License, or (at your 16 * option) any later version. 17 */ 18 #include <linux/module.h> 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/interrupt.h> 22 #include <linux/delay.h> 23 #include <linux/irq.h> 24 #include <linux/spi/spi.h> 25 #include <linux/spi/spi_bitbang.h> 26 #include <linux/platform_device.h> 27 #include <linux/fsl_devices.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/mm.h> 30 #include <linux/mutex.h> 31 #include <linux/of.h> 32 #include <linux/of_platform.h> 33 #include <linux/gpio.h> 34 #include <linux/of_gpio.h> 35 36 #include <sysdev/fsl_soc.h> 37 #include <asm/cpm.h> 38 #include <asm/qe.h> 39 40 #include "spi-fsl-lib.h" 41 42 /* CPM1 and CPM2 are mutually exclusive. */ 43 #ifdef CONFIG_CPM1 44 #include <asm/cpm1.h> 45 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0) 46 #else 47 #include <asm/cpm2.h> 48 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0) 49 #endif 50 51 /* SPI Controller registers */ 52 struct fsl_spi_reg { 53 u8 res1[0x20]; 54 __be32 mode; 55 __be32 event; 56 __be32 mask; 57 __be32 command; 58 __be32 transmit; 59 __be32 receive; 60 }; 61 62 /* SPI Controller mode register definitions */ 63 #define SPMODE_LOOP (1 << 30) 64 #define SPMODE_CI_INACTIVEHIGH (1 << 29) 65 #define SPMODE_CP_BEGIN_EDGECLK (1 << 28) 66 #define SPMODE_DIV16 (1 << 27) 67 #define SPMODE_REV (1 << 26) 68 #define SPMODE_MS (1 << 25) 69 #define SPMODE_ENABLE (1 << 24) 70 #define SPMODE_LEN(x) ((x) << 20) 71 #define SPMODE_PM(x) ((x) << 16) 72 #define SPMODE_OP (1 << 14) 73 #define SPMODE_CG(x) ((x) << 7) 74 75 /* 76 * Default for SPI Mode: 77 * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk 78 */ 79 #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ 80 SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) 81 82 /* SPIE register values */ 83 #define SPIE_NE 0x00000200 /* Not empty */ 84 #define SPIE_NF 0x00000100 /* Not full */ 85 86 /* SPIM register values */ 87 #define SPIM_NE 0x00000200 /* Not empty */ 88 #define SPIM_NF 0x00000100 /* Not full */ 89 90 #define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */ 91 #define SPIE_RXB 0x00000100 /* Last char is written to rx buf */ 92 93 /* SPCOM register values */ 94 #define SPCOM_STR (1 << 23) /* Start transmit */ 95 96 #define SPI_PRAM_SIZE 0x100 97 #define SPI_MRBLR ((unsigned int)PAGE_SIZE) 98 99 static void *fsl_dummy_rx; 100 static DEFINE_MUTEX(fsl_dummy_rx_lock); 101 static int fsl_dummy_rx_refcnt; 102 103 static void fsl_spi_change_mode(struct spi_device *spi) 104 { 105 struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); 106 struct spi_mpc8xxx_cs *cs = spi->controller_state; 107 struct fsl_spi_reg *reg_base = mspi->reg_base; 108 __be32 __iomem *mode = ®_base->mode; 109 unsigned long flags; 110 111 if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) 112 return; 113 114 /* Turn off IRQs locally to minimize time that SPI is disabled. */ 115 local_irq_save(flags); 116 117 /* Turn off SPI unit prior changing mode */ 118 mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE); 119 120 /* When in CPM mode, we need to reinit tx and rx. */ 121 if (mspi->flags & SPI_CPM_MODE) { 122 if (mspi->flags & SPI_QE) { 123 qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock, 124 QE_CR_PROTOCOL_UNSPECIFIED, 0); 125 } else { 126 cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX); 127 if (mspi->flags & SPI_CPM1) { 128 out_be16(&mspi->pram->rbptr, 129 in_be16(&mspi->pram->rbase)); 130 out_be16(&mspi->pram->tbptr, 131 in_be16(&mspi->pram->tbase)); 132 } 133 } 134 } 135 mpc8xxx_spi_write_reg(mode, cs->hw_mode); 136 local_irq_restore(flags); 137 } 138 139 static void fsl_spi_chipselect(struct spi_device *spi, int value) 140 { 141 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 142 struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; 143 bool pol = spi->mode & SPI_CS_HIGH; 144 struct spi_mpc8xxx_cs *cs = spi->controller_state; 145 146 if (value == BITBANG_CS_INACTIVE) { 147 if (pdata->cs_control) 148 pdata->cs_control(spi, !pol); 149 } 150 151 if (value == BITBANG_CS_ACTIVE) { 152 mpc8xxx_spi->rx_shift = cs->rx_shift; 153 mpc8xxx_spi->tx_shift = cs->tx_shift; 154 mpc8xxx_spi->get_rx = cs->get_rx; 155 mpc8xxx_spi->get_tx = cs->get_tx; 156 157 fsl_spi_change_mode(spi); 158 159 if (pdata->cs_control) 160 pdata->cs_control(spi, pol); 161 } 162 } 163 164 static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, 165 struct spi_device *spi, 166 struct mpc8xxx_spi *mpc8xxx_spi, 167 int bits_per_word) 168 { 169 cs->rx_shift = 0; 170 cs->tx_shift = 0; 171 if (bits_per_word <= 8) { 172 cs->get_rx = mpc8xxx_spi_rx_buf_u8; 173 cs->get_tx = mpc8xxx_spi_tx_buf_u8; 174 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { 175 cs->rx_shift = 16; 176 cs->tx_shift = 24; 177 } 178 } else if (bits_per_word <= 16) { 179 cs->get_rx = mpc8xxx_spi_rx_buf_u16; 180 cs->get_tx = mpc8xxx_spi_tx_buf_u16; 181 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { 182 cs->rx_shift = 16; 183 cs->tx_shift = 16; 184 } 185 } else if (bits_per_word <= 32) { 186 cs->get_rx = mpc8xxx_spi_rx_buf_u32; 187 cs->get_tx = mpc8xxx_spi_tx_buf_u32; 188 } else 189 return -EINVAL; 190 191 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE && 192 spi->mode & SPI_LSB_FIRST) { 193 cs->tx_shift = 0; 194 if (bits_per_word <= 8) 195 cs->rx_shift = 8; 196 else 197 cs->rx_shift = 0; 198 } 199 mpc8xxx_spi->rx_shift = cs->rx_shift; 200 mpc8xxx_spi->tx_shift = cs->tx_shift; 201 mpc8xxx_spi->get_rx = cs->get_rx; 202 mpc8xxx_spi->get_tx = cs->get_tx; 203 204 return bits_per_word; 205 } 206 207 static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, 208 struct spi_device *spi, 209 int bits_per_word) 210 { 211 /* QE uses Little Endian for words > 8 212 * so transform all words > 8 into 8 bits 213 * Unfortnatly that doesn't work for LSB so 214 * reject these for now */ 215 /* Note: 32 bits word, LSB works iff 216 * tfcr/rfcr is set to CPMFCR_GBL */ 217 if (spi->mode & SPI_LSB_FIRST && 218 bits_per_word > 8) 219 return -EINVAL; 220 if (bits_per_word > 8) 221 return 8; /* pretend its 8 bits */ 222 return bits_per_word; 223 } 224 225 static int fsl_spi_setup_transfer(struct spi_device *spi, 226 struct spi_transfer *t) 227 { 228 struct mpc8xxx_spi *mpc8xxx_spi; 229 int bits_per_word = 0; 230 u8 pm; 231 u32 hz = 0; 232 struct spi_mpc8xxx_cs *cs = spi->controller_state; 233 234 mpc8xxx_spi = spi_master_get_devdata(spi->master); 235 236 if (t) { 237 bits_per_word = t->bits_per_word; 238 hz = t->speed_hz; 239 } 240 241 /* spi_transfer level calls that work per-word */ 242 if (!bits_per_word) 243 bits_per_word = spi->bits_per_word; 244 245 /* Make sure its a bit width we support [4..16, 32] */ 246 if ((bits_per_word < 4) 247 || ((bits_per_word > 16) && (bits_per_word != 32))) 248 return -EINVAL; 249 250 if (!hz) 251 hz = spi->max_speed_hz; 252 253 if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) 254 bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi, 255 mpc8xxx_spi, 256 bits_per_word); 257 else if (mpc8xxx_spi->flags & SPI_QE) 258 bits_per_word = mspi_apply_qe_mode_quirks(cs, spi, 259 bits_per_word); 260 261 if (bits_per_word < 0) 262 return bits_per_word; 263 264 if (bits_per_word == 32) 265 bits_per_word = 0; 266 else 267 bits_per_word = bits_per_word - 1; 268 269 /* mask out bits we are going to set */ 270 cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16 271 | SPMODE_PM(0xF)); 272 273 cs->hw_mode |= SPMODE_LEN(bits_per_word); 274 275 if ((mpc8xxx_spi->spibrg / hz) > 64) { 276 cs->hw_mode |= SPMODE_DIV16; 277 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; 278 279 WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " 280 "Will use %d Hz instead.\n", dev_name(&spi->dev), 281 hz, mpc8xxx_spi->spibrg / 1024); 282 if (pm > 16) 283 pm = 16; 284 } else { 285 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; 286 } 287 if (pm) 288 pm--; 289 290 cs->hw_mode |= SPMODE_PM(pm); 291 292 fsl_spi_change_mode(spi); 293 return 0; 294 } 295 296 static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) 297 { 298 struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; 299 struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; 300 unsigned int xfer_len = min(mspi->count, SPI_MRBLR); 301 unsigned int xfer_ofs; 302 struct fsl_spi_reg *reg_base = mspi->reg_base; 303 304 xfer_ofs = mspi->xfer_in_progress->len - mspi->count; 305 306 if (mspi->rx_dma == mspi->dma_dummy_rx) 307 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); 308 else 309 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); 310 out_be16(&rx_bd->cbd_datlen, 0); 311 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); 312 313 if (mspi->tx_dma == mspi->dma_dummy_tx) 314 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); 315 else 316 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); 317 out_be16(&tx_bd->cbd_datlen, xfer_len); 318 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | 319 BD_SC_LAST); 320 321 /* start transfer */ 322 mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); 323 } 324 325 static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, 326 struct spi_transfer *t, bool is_dma_mapped) 327 { 328 struct device *dev = mspi->dev; 329 struct fsl_spi_reg *reg_base = mspi->reg_base; 330 331 if (is_dma_mapped) { 332 mspi->map_tx_dma = 0; 333 mspi->map_rx_dma = 0; 334 } else { 335 mspi->map_tx_dma = 1; 336 mspi->map_rx_dma = 1; 337 } 338 339 if (!t->tx_buf) { 340 mspi->tx_dma = mspi->dma_dummy_tx; 341 mspi->map_tx_dma = 0; 342 } 343 344 if (!t->rx_buf) { 345 mspi->rx_dma = mspi->dma_dummy_rx; 346 mspi->map_rx_dma = 0; 347 } 348 349 if (mspi->map_tx_dma) { 350 void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */ 351 352 mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len, 353 DMA_TO_DEVICE); 354 if (dma_mapping_error(dev, mspi->tx_dma)) { 355 dev_err(dev, "unable to map tx dma\n"); 356 return -ENOMEM; 357 } 358 } else if (t->tx_buf) { 359 mspi->tx_dma = t->tx_dma; 360 } 361 362 if (mspi->map_rx_dma) { 363 mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len, 364 DMA_FROM_DEVICE); 365 if (dma_mapping_error(dev, mspi->rx_dma)) { 366 dev_err(dev, "unable to map rx dma\n"); 367 goto err_rx_dma; 368 } 369 } else if (t->rx_buf) { 370 mspi->rx_dma = t->rx_dma; 371 } 372 373 /* enable rx ints */ 374 mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB); 375 376 mspi->xfer_in_progress = t; 377 mspi->count = t->len; 378 379 /* start CPM transfers */ 380 fsl_spi_cpm_bufs_start(mspi); 381 382 return 0; 383 384 err_rx_dma: 385 if (mspi->map_tx_dma) 386 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); 387 return -ENOMEM; 388 } 389 390 static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) 391 { 392 struct device *dev = mspi->dev; 393 struct spi_transfer *t = mspi->xfer_in_progress; 394 395 if (mspi->map_tx_dma) 396 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); 397 if (mspi->map_rx_dma) 398 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); 399 mspi->xfer_in_progress = NULL; 400 } 401 402 static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, 403 struct spi_transfer *t, unsigned int len) 404 { 405 u32 word; 406 struct fsl_spi_reg *reg_base = mspi->reg_base; 407 408 mspi->count = len; 409 410 /* enable rx ints */ 411 mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); 412 413 /* transmit word */ 414 word = mspi->get_tx(mspi); 415 mpc8xxx_spi_write_reg(®_base->transmit, word); 416 417 return 0; 418 } 419 420 static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, 421 bool is_dma_mapped) 422 { 423 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 424 struct fsl_spi_reg *reg_base; 425 unsigned int len = t->len; 426 u8 bits_per_word; 427 int ret; 428 429 reg_base = mpc8xxx_spi->reg_base; 430 bits_per_word = spi->bits_per_word; 431 if (t->bits_per_word) 432 bits_per_word = t->bits_per_word; 433 434 if (bits_per_word > 8) { 435 /* invalid length? */ 436 if (len & 1) 437 return -EINVAL; 438 len /= 2; 439 } 440 if (bits_per_word > 16) { 441 /* invalid length? */ 442 if (len & 1) 443 return -EINVAL; 444 len /= 2; 445 } 446 447 mpc8xxx_spi->tx = t->tx_buf; 448 mpc8xxx_spi->rx = t->rx_buf; 449 450 INIT_COMPLETION(mpc8xxx_spi->done); 451 452 if (mpc8xxx_spi->flags & SPI_CPM_MODE) 453 ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); 454 else 455 ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len); 456 if (ret) 457 return ret; 458 459 wait_for_completion(&mpc8xxx_spi->done); 460 461 /* disable rx ints */ 462 mpc8xxx_spi_write_reg(®_base->mask, 0); 463 464 if (mpc8xxx_spi->flags & SPI_CPM_MODE) 465 fsl_spi_cpm_bufs_complete(mpc8xxx_spi); 466 467 return mpc8xxx_spi->count; 468 } 469 470 static void fsl_spi_do_one_msg(struct spi_message *m) 471 { 472 struct spi_device *spi = m->spi; 473 struct spi_transfer *t; 474 unsigned int cs_change; 475 const int nsecs = 50; 476 int status; 477 478 cs_change = 1; 479 status = 0; 480 list_for_each_entry(t, &m->transfers, transfer_list) { 481 if (t->bits_per_word || t->speed_hz) { 482 /* Don't allow changes if CS is active */ 483 status = -EINVAL; 484 485 if (cs_change) 486 status = fsl_spi_setup_transfer(spi, t); 487 if (status < 0) 488 break; 489 } 490 491 if (cs_change) { 492 fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE); 493 ndelay(nsecs); 494 } 495 cs_change = t->cs_change; 496 if (t->len) 497 status = fsl_spi_bufs(spi, t, m->is_dma_mapped); 498 if (status) { 499 status = -EMSGSIZE; 500 break; 501 } 502 m->actual_length += t->len; 503 504 if (t->delay_usecs) 505 udelay(t->delay_usecs); 506 507 if (cs_change) { 508 ndelay(nsecs); 509 fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); 510 ndelay(nsecs); 511 } 512 } 513 514 m->status = status; 515 m->complete(m->context); 516 517 if (status || !cs_change) { 518 ndelay(nsecs); 519 fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); 520 } 521 522 fsl_spi_setup_transfer(spi, NULL); 523 } 524 525 static int fsl_spi_setup(struct spi_device *spi) 526 { 527 struct mpc8xxx_spi *mpc8xxx_spi; 528 struct fsl_spi_reg *reg_base; 529 int retval; 530 u32 hw_mode; 531 struct spi_mpc8xxx_cs *cs = spi->controller_state; 532 533 if (!spi->max_speed_hz) 534 return -EINVAL; 535 536 if (!cs) { 537 cs = kzalloc(sizeof *cs, GFP_KERNEL); 538 if (!cs) 539 return -ENOMEM; 540 spi->controller_state = cs; 541 } 542 mpc8xxx_spi = spi_master_get_devdata(spi->master); 543 544 reg_base = mpc8xxx_spi->reg_base; 545 546 hw_mode = cs->hw_mode; /* Save original settings */ 547 cs->hw_mode = mpc8xxx_spi_read_reg(®_base->mode); 548 /* mask out bits we are going to set */ 549 cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH 550 | SPMODE_REV | SPMODE_LOOP); 551 552 if (spi->mode & SPI_CPHA) 553 cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK; 554 if (spi->mode & SPI_CPOL) 555 cs->hw_mode |= SPMODE_CI_INACTIVEHIGH; 556 if (!(spi->mode & SPI_LSB_FIRST)) 557 cs->hw_mode |= SPMODE_REV; 558 if (spi->mode & SPI_LOOP) 559 cs->hw_mode |= SPMODE_LOOP; 560 561 retval = fsl_spi_setup_transfer(spi, NULL); 562 if (retval < 0) { 563 cs->hw_mode = hw_mode; /* Restore settings */ 564 return retval; 565 } 566 return 0; 567 } 568 569 static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) 570 { 571 u16 len; 572 struct fsl_spi_reg *reg_base = mspi->reg_base; 573 574 dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, 575 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); 576 577 len = in_be16(&mspi->rx_bd->cbd_datlen); 578 if (len > mspi->count) { 579 WARN_ON(1); 580 len = mspi->count; 581 } 582 583 /* Clear the events */ 584 mpc8xxx_spi_write_reg(®_base->event, events); 585 586 mspi->count -= len; 587 if (mspi->count) 588 fsl_spi_cpm_bufs_start(mspi); 589 else 590 complete(&mspi->done); 591 } 592 593 static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) 594 { 595 struct fsl_spi_reg *reg_base = mspi->reg_base; 596 597 /* We need handle RX first */ 598 if (events & SPIE_NE) { 599 u32 rx_data = mpc8xxx_spi_read_reg(®_base->receive); 600 601 if (mspi->rx) 602 mspi->get_rx(rx_data, mspi); 603 } 604 605 if ((events & SPIE_NF) == 0) 606 /* spin until TX is done */ 607 while (((events = 608 mpc8xxx_spi_read_reg(®_base->event)) & 609 SPIE_NF) == 0) 610 cpu_relax(); 611 612 /* Clear the events */ 613 mpc8xxx_spi_write_reg(®_base->event, events); 614 615 mspi->count -= 1; 616 if (mspi->count) { 617 u32 word = mspi->get_tx(mspi); 618 619 mpc8xxx_spi_write_reg(®_base->transmit, word); 620 } else { 621 complete(&mspi->done); 622 } 623 } 624 625 static irqreturn_t fsl_spi_irq(s32 irq, void *context_data) 626 { 627 struct mpc8xxx_spi *mspi = context_data; 628 irqreturn_t ret = IRQ_NONE; 629 u32 events; 630 struct fsl_spi_reg *reg_base = mspi->reg_base; 631 632 /* Get interrupt events(tx/rx) */ 633 events = mpc8xxx_spi_read_reg(®_base->event); 634 if (events) 635 ret = IRQ_HANDLED; 636 637 dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); 638 639 if (mspi->flags & SPI_CPM_MODE) 640 fsl_spi_cpm_irq(mspi, events); 641 else 642 fsl_spi_cpu_irq(mspi, events); 643 644 return ret; 645 } 646 647 static void *fsl_spi_alloc_dummy_rx(void) 648 { 649 mutex_lock(&fsl_dummy_rx_lock); 650 651 if (!fsl_dummy_rx) 652 fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); 653 if (fsl_dummy_rx) 654 fsl_dummy_rx_refcnt++; 655 656 mutex_unlock(&fsl_dummy_rx_lock); 657 658 return fsl_dummy_rx; 659 } 660 661 static void fsl_spi_free_dummy_rx(void) 662 { 663 mutex_lock(&fsl_dummy_rx_lock); 664 665 switch (fsl_dummy_rx_refcnt) { 666 case 0: 667 WARN_ON(1); 668 break; 669 case 1: 670 kfree(fsl_dummy_rx); 671 fsl_dummy_rx = NULL; 672 /* fall through */ 673 default: 674 fsl_dummy_rx_refcnt--; 675 break; 676 } 677 678 mutex_unlock(&fsl_dummy_rx_lock); 679 } 680 681 static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) 682 { 683 struct device *dev = mspi->dev; 684 struct device_node *np = dev->of_node; 685 const u32 *iprop; 686 int size; 687 void __iomem *spi_base; 688 unsigned long pram_ofs = -ENOMEM; 689 690 /* Can't use of_address_to_resource(), QE muram isn't at 0. */ 691 iprop = of_get_property(np, "reg", &size); 692 693 /* QE with a fixed pram location? */ 694 if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4) 695 return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE); 696 697 /* QE but with a dynamic pram location? */ 698 if (mspi->flags & SPI_QE) { 699 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); 700 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock, 701 QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs); 702 return pram_ofs; 703 } 704 705 spi_base = of_iomap(np, 1); 706 if (spi_base == NULL) 707 return -EINVAL; 708 709 if (mspi->flags & SPI_CPM2) { 710 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); 711 out_be16(spi_base, pram_ofs); 712 } else { 713 struct spi_pram __iomem *pram = spi_base; 714 u16 rpbase = in_be16(&pram->rpbase); 715 716 /* Microcode relocation patch applied? */ 717 if (rpbase) 718 pram_ofs = rpbase; 719 else { 720 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); 721 out_be16(spi_base, pram_ofs); 722 } 723 } 724 725 iounmap(spi_base); 726 return pram_ofs; 727 } 728 729 static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) 730 { 731 struct device *dev = mspi->dev; 732 struct device_node *np = dev->of_node; 733 const u32 *iprop; 734 int size; 735 unsigned long pram_ofs; 736 unsigned long bds_ofs; 737 738 if (!(mspi->flags & SPI_CPM_MODE)) 739 return 0; 740 741 if (!fsl_spi_alloc_dummy_rx()) 742 return -ENOMEM; 743 744 if (mspi->flags & SPI_QE) { 745 iprop = of_get_property(np, "cell-index", &size); 746 if (iprop && size == sizeof(*iprop)) 747 mspi->subblock = *iprop; 748 749 switch (mspi->subblock) { 750 default: 751 dev_warn(dev, "cell-index unspecified, assuming SPI1"); 752 /* fall through */ 753 case 0: 754 mspi->subblock = QE_CR_SUBBLOCK_SPI1; 755 break; 756 case 1: 757 mspi->subblock = QE_CR_SUBBLOCK_SPI2; 758 break; 759 } 760 } 761 762 pram_ofs = fsl_spi_cpm_get_pram(mspi); 763 if (IS_ERR_VALUE(pram_ofs)) { 764 dev_err(dev, "can't allocate spi parameter ram\n"); 765 goto err_pram; 766 } 767 768 bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) + 769 sizeof(*mspi->rx_bd), 8); 770 if (IS_ERR_VALUE(bds_ofs)) { 771 dev_err(dev, "can't allocate bds\n"); 772 goto err_bds; 773 } 774 775 mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE, 776 DMA_TO_DEVICE); 777 if (dma_mapping_error(dev, mspi->dma_dummy_tx)) { 778 dev_err(dev, "unable to map dummy tx buffer\n"); 779 goto err_dummy_tx; 780 } 781 782 mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, 783 DMA_FROM_DEVICE); 784 if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { 785 dev_err(dev, "unable to map dummy rx buffer\n"); 786 goto err_dummy_rx; 787 } 788 789 mspi->pram = cpm_muram_addr(pram_ofs); 790 791 mspi->tx_bd = cpm_muram_addr(bds_ofs); 792 mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); 793 794 /* Initialize parameter ram. */ 795 out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd)); 796 out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd)); 797 out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL); 798 out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL); 799 out_be16(&mspi->pram->mrblr, SPI_MRBLR); 800 out_be32(&mspi->pram->rstate, 0); 801 out_be32(&mspi->pram->rdp, 0); 802 out_be16(&mspi->pram->rbptr, 0); 803 out_be16(&mspi->pram->rbc, 0); 804 out_be32(&mspi->pram->rxtmp, 0); 805 out_be32(&mspi->pram->tstate, 0); 806 out_be32(&mspi->pram->tdp, 0); 807 out_be16(&mspi->pram->tbptr, 0); 808 out_be16(&mspi->pram->tbc, 0); 809 out_be32(&mspi->pram->txtmp, 0); 810 811 return 0; 812 813 err_dummy_rx: 814 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); 815 err_dummy_tx: 816 cpm_muram_free(bds_ofs); 817 err_bds: 818 cpm_muram_free(pram_ofs); 819 err_pram: 820 fsl_spi_free_dummy_rx(); 821 return -ENOMEM; 822 } 823 824 static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) 825 { 826 struct device *dev = mspi->dev; 827 828 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); 829 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); 830 cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); 831 cpm_muram_free(cpm_muram_offset(mspi->pram)); 832 fsl_spi_free_dummy_rx(); 833 } 834 835 static void fsl_spi_remove(struct mpc8xxx_spi *mspi) 836 { 837 iounmap(mspi->reg_base); 838 fsl_spi_cpm_free(mspi); 839 } 840 841 static struct spi_master * __devinit fsl_spi_probe(struct device *dev, 842 struct resource *mem, unsigned int irq) 843 { 844 struct fsl_spi_platform_data *pdata = dev->platform_data; 845 struct spi_master *master; 846 struct mpc8xxx_spi *mpc8xxx_spi; 847 struct fsl_spi_reg *reg_base; 848 u32 regval; 849 int ret = 0; 850 851 master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); 852 if (master == NULL) { 853 ret = -ENOMEM; 854 goto err; 855 } 856 857 dev_set_drvdata(dev, master); 858 859 ret = mpc8xxx_spi_probe(dev, mem, irq); 860 if (ret) 861 goto err_probe; 862 863 master->setup = fsl_spi_setup; 864 865 mpc8xxx_spi = spi_master_get_devdata(master); 866 mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg; 867 mpc8xxx_spi->spi_remove = fsl_spi_remove; 868 869 870 ret = fsl_spi_cpm_init(mpc8xxx_spi); 871 if (ret) 872 goto err_cpm_init; 873 874 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { 875 mpc8xxx_spi->rx_shift = 16; 876 mpc8xxx_spi->tx_shift = 24; 877 } 878 879 mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); 880 if (mpc8xxx_spi->reg_base == NULL) { 881 ret = -ENOMEM; 882 goto err_ioremap; 883 } 884 885 /* Register for SPI Interrupt */ 886 ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq, 887 0, "fsl_spi", mpc8xxx_spi); 888 889 if (ret != 0) 890 goto free_irq; 891 892 reg_base = mpc8xxx_spi->reg_base; 893 894 /* SPI controller initializations */ 895 mpc8xxx_spi_write_reg(®_base->mode, 0); 896 mpc8xxx_spi_write_reg(®_base->mask, 0); 897 mpc8xxx_spi_write_reg(®_base->command, 0); 898 mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); 899 900 /* Enable SPI interface */ 901 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 902 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) 903 regval |= SPMODE_OP; 904 905 mpc8xxx_spi_write_reg(®_base->mode, regval); 906 907 ret = spi_register_master(master); 908 if (ret < 0) 909 goto unreg_master; 910 911 dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base, 912 mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); 913 914 return master; 915 916 unreg_master: 917 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 918 free_irq: 919 iounmap(mpc8xxx_spi->reg_base); 920 err_ioremap: 921 fsl_spi_cpm_free(mpc8xxx_spi); 922 err_cpm_init: 923 err_probe: 924 spi_master_put(master); 925 err: 926 return ERR_PTR(ret); 927 } 928 929 static void fsl_spi_cs_control(struct spi_device *spi, bool on) 930 { 931 struct device *dev = spi->dev.parent; 932 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); 933 u16 cs = spi->chip_select; 934 int gpio = pinfo->gpios[cs]; 935 bool alow = pinfo->alow_flags[cs]; 936 937 gpio_set_value(gpio, on ^ alow); 938 } 939 940 static int of_fsl_spi_get_chipselects(struct device *dev) 941 { 942 struct device_node *np = dev->of_node; 943 struct fsl_spi_platform_data *pdata = dev->platform_data; 944 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); 945 unsigned int ngpios; 946 int i = 0; 947 int ret; 948 949 ngpios = of_gpio_count(np); 950 if (!ngpios) { 951 /* 952 * SPI w/o chip-select line. One SPI device is still permitted 953 * though. 954 */ 955 pdata->max_chipselect = 1; 956 return 0; 957 } 958 959 pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL); 960 if (!pinfo->gpios) 961 return -ENOMEM; 962 memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios)); 963 964 pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags), 965 GFP_KERNEL); 966 if (!pinfo->alow_flags) { 967 ret = -ENOMEM; 968 goto err_alloc_flags; 969 } 970 971 for (; i < ngpios; i++) { 972 int gpio; 973 enum of_gpio_flags flags; 974 975 gpio = of_get_gpio_flags(np, i, &flags); 976 if (!gpio_is_valid(gpio)) { 977 dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); 978 ret = gpio; 979 goto err_loop; 980 } 981 982 ret = gpio_request(gpio, dev_name(dev)); 983 if (ret) { 984 dev_err(dev, "can't request gpio #%d: %d\n", i, ret); 985 goto err_loop; 986 } 987 988 pinfo->gpios[i] = gpio; 989 pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW; 990 991 ret = gpio_direction_output(pinfo->gpios[i], 992 pinfo->alow_flags[i]); 993 if (ret) { 994 dev_err(dev, "can't set output direction for gpio " 995 "#%d: %d\n", i, ret); 996 goto err_loop; 997 } 998 } 999 1000 pdata->max_chipselect = ngpios; 1001 pdata->cs_control = fsl_spi_cs_control; 1002 1003 return 0; 1004 1005 err_loop: 1006 while (i >= 0) { 1007 if (gpio_is_valid(pinfo->gpios[i])) 1008 gpio_free(pinfo->gpios[i]); 1009 i--; 1010 } 1011 1012 kfree(pinfo->alow_flags); 1013 pinfo->alow_flags = NULL; 1014 err_alloc_flags: 1015 kfree(pinfo->gpios); 1016 pinfo->gpios = NULL; 1017 return ret; 1018 } 1019 1020 static int of_fsl_spi_free_chipselects(struct device *dev) 1021 { 1022 struct fsl_spi_platform_data *pdata = dev->platform_data; 1023 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); 1024 int i; 1025 1026 if (!pinfo->gpios) 1027 return 0; 1028 1029 for (i = 0; i < pdata->max_chipselect; i++) { 1030 if (gpio_is_valid(pinfo->gpios[i])) 1031 gpio_free(pinfo->gpios[i]); 1032 } 1033 1034 kfree(pinfo->gpios); 1035 kfree(pinfo->alow_flags); 1036 return 0; 1037 } 1038 1039 static int __devinit of_fsl_spi_probe(struct platform_device *ofdev) 1040 { 1041 struct device *dev = &ofdev->dev; 1042 struct device_node *np = ofdev->dev.of_node; 1043 struct spi_master *master; 1044 struct resource mem; 1045 struct resource irq; 1046 int ret = -ENOMEM; 1047 1048 ret = of_mpc8xxx_spi_probe(ofdev); 1049 if (ret) 1050 return ret; 1051 1052 ret = of_fsl_spi_get_chipselects(dev); 1053 if (ret) 1054 goto err; 1055 1056 ret = of_address_to_resource(np, 0, &mem); 1057 if (ret) 1058 goto err; 1059 1060 ret = of_irq_to_resource(np, 0, &irq); 1061 if (!ret) { 1062 ret = -EINVAL; 1063 goto err; 1064 } 1065 1066 master = fsl_spi_probe(dev, &mem, irq.start); 1067 if (IS_ERR(master)) { 1068 ret = PTR_ERR(master); 1069 goto err; 1070 } 1071 1072 return 0; 1073 1074 err: 1075 of_fsl_spi_free_chipselects(dev); 1076 return ret; 1077 } 1078 1079 static int __devexit of_fsl_spi_remove(struct platform_device *ofdev) 1080 { 1081 int ret; 1082 1083 ret = mpc8xxx_spi_remove(&ofdev->dev); 1084 if (ret) 1085 return ret; 1086 of_fsl_spi_free_chipselects(&ofdev->dev); 1087 return 0; 1088 } 1089 1090 static const struct of_device_id of_fsl_spi_match[] = { 1091 { .compatible = "fsl,spi" }, 1092 {} 1093 }; 1094 MODULE_DEVICE_TABLE(of, of_fsl_spi_match); 1095 1096 static struct platform_driver of_fsl_spi_driver = { 1097 .driver = { 1098 .name = "fsl_spi", 1099 .owner = THIS_MODULE, 1100 .of_match_table = of_fsl_spi_match, 1101 }, 1102 .probe = of_fsl_spi_probe, 1103 .remove = __devexit_p(of_fsl_spi_remove), 1104 }; 1105 1106 #ifdef CONFIG_MPC832x_RDB 1107 /* 1108 * XXX XXX XXX 1109 * This is "legacy" platform driver, was used by the MPC8323E-RDB boards 1110 * only. The driver should go away soon, since newer MPC8323E-RDB's device 1111 * tree can work with OpenFirmware driver. But for now we support old trees 1112 * as well. 1113 */ 1114 static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) 1115 { 1116 struct resource *mem; 1117 int irq; 1118 struct spi_master *master; 1119 1120 if (!pdev->dev.platform_data) 1121 return -EINVAL; 1122 1123 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1124 if (!mem) 1125 return -EINVAL; 1126 1127 irq = platform_get_irq(pdev, 0); 1128 if (irq <= 0) 1129 return -EINVAL; 1130 1131 master = fsl_spi_probe(&pdev->dev, mem, irq); 1132 if (IS_ERR(master)) 1133 return PTR_ERR(master); 1134 return 0; 1135 } 1136 1137 static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev) 1138 { 1139 return mpc8xxx_spi_remove(&pdev->dev); 1140 } 1141 1142 MODULE_ALIAS("platform:mpc8xxx_spi"); 1143 static struct platform_driver mpc8xxx_spi_driver = { 1144 .probe = plat_mpc8xxx_spi_probe, 1145 .remove = __devexit_p(plat_mpc8xxx_spi_remove), 1146 .driver = { 1147 .name = "mpc8xxx_spi", 1148 .owner = THIS_MODULE, 1149 }, 1150 }; 1151 1152 static bool legacy_driver_failed; 1153 1154 static void __init legacy_driver_register(void) 1155 { 1156 legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver); 1157 } 1158 1159 static void __exit legacy_driver_unregister(void) 1160 { 1161 if (legacy_driver_failed) 1162 return; 1163 platform_driver_unregister(&mpc8xxx_spi_driver); 1164 } 1165 #else 1166 static void __init legacy_driver_register(void) {} 1167 static void __exit legacy_driver_unregister(void) {} 1168 #endif /* CONFIG_MPC832x_RDB */ 1169 1170 static int __init fsl_spi_init(void) 1171 { 1172 legacy_driver_register(); 1173 return platform_driver_register(&of_fsl_spi_driver); 1174 } 1175 module_init(fsl_spi_init); 1176 1177 static void __exit fsl_spi_exit(void) 1178 { 1179 platform_driver_unregister(&of_fsl_spi_driver); 1180 legacy_driver_unregister(); 1181 } 1182 module_exit(fsl_spi_exit); 1183 1184 MODULE_AUTHOR("Kumar Gala"); 1185 MODULE_DESCRIPTION("Simple Freescale SPI Driver"); 1186 MODULE_LICENSE("GPL"); 1187