1 /* 2 * SH RSPI driver 3 * 4 * Copyright (C) 2012 Renesas Solutions Corp. 5 * 6 * Based on spi-sh.c: 7 * Copyright (C) 2011 Renesas Solutions Corp. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <linux/errno.h> 28 #include <linux/list.h> 29 #include <linux/workqueue.h> 30 #include <linux/interrupt.h> 31 #include <linux/platform_device.h> 32 #include <linux/io.h> 33 #include <linux/clk.h> 34 #include <linux/dmaengine.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/sh_dma.h> 37 #include <linux/spi/spi.h> 38 #include <linux/spi/rspi.h> 39 40 #define RSPI_SPCR 0x00 /* Control Register */ 41 #define RSPI_SSLP 0x01 /* Slave Select Polarity Register */ 42 #define RSPI_SPPCR 0x02 /* Pin Control Register */ 43 #define RSPI_SPSR 0x03 /* Status Register */ 44 #define RSPI_SPDR 0x04 /* Data Register */ 45 #define RSPI_SPSCR 0x08 /* Sequence Control Register */ 46 #define RSPI_SPSSR 0x09 /* Sequence Status Register */ 47 #define RSPI_SPBR 0x0a /* Bit Rate Register */ 48 #define RSPI_SPDCR 0x0b /* Data Control Register */ 49 #define RSPI_SPCKD 0x0c /* Clock Delay Register */ 50 #define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */ 51 #define RSPI_SPND 0x0e /* Next-Access Delay Register */ 52 #define RSPI_SPCR2 0x0f /* Control Register 2 */ 53 #define RSPI_SPCMD0 0x10 /* Command Register 0 */ 54 #define RSPI_SPCMD1 0x12 /* Command Register 1 */ 55 #define RSPI_SPCMD2 0x14 /* Command Register 2 */ 56 #define RSPI_SPCMD3 0x16 /* Command Register 3 */ 57 #define RSPI_SPCMD4 0x18 /* Command Register 4 */ 58 #define RSPI_SPCMD5 0x1a /* Command Register 5 */ 59 #define RSPI_SPCMD6 0x1c /* Command Register 6 */ 60 #define RSPI_SPCMD7 0x1e /* Command Register 7 */ 61 #define RSPI_SPBFCR 0x20 /* Buffer Control Register */ 62 #define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */ 63 64 /*qspi only */ 65 #define QSPI_SPBFCR 0x18 /* Buffer Control Register */ 66 #define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */ 67 #define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */ 68 #define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */ 69 #define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */ 70 #define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */ 71 72 /* SPCR - Control Register */ 73 #define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */ 74 #define SPCR_SPE 0x40 /* Function Enable */ 75 #define SPCR_SPTIE 0x20 /* Transmit Interrupt Enable */ 76 #define SPCR_SPEIE 0x10 /* Error Interrupt Enable */ 77 #define SPCR_MSTR 0x08 /* Master/Slave Mode Select */ 78 #define SPCR_MODFEN 0x04 /* Mode Fault Error Detection Enable */ 79 /* RSPI on SH only */ 80 #define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */ 81 #define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */ 82 /* QSPI on R-Car M2 only */ 83 #define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */ 84 #define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */ 85 86 /* SSLP - Slave Select Polarity Register */ 87 #define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */ 88 #define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */ 89 90 /* SPPCR - Pin Control Register */ 91 #define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */ 92 #define SPPCR_MOIFV 0x10 /* MOSI Idle Fixed Value */ 93 #define SPPCR_SPOM 0x04 94 #define SPPCR_SPLP2 0x02 /* Loopback Mode 2 (non-inverting) */ 95 #define SPPCR_SPLP 0x01 /* Loopback Mode (inverting) */ 96 97 #define SPPCR_IO3FV 0x04 /* Single-/Dual-SPI Mode IO3 Output Fixed Value */ 98 #define SPPCR_IO2FV 0x04 /* Single-/Dual-SPI Mode IO2 Output Fixed Value */ 99 100 /* SPSR - Status Register */ 101 #define SPSR_SPRF 0x80 /* Receive Buffer Full Flag */ 102 #define SPSR_TEND 0x40 /* Transmit End */ 103 #define SPSR_SPTEF 0x20 /* Transmit Buffer Empty Flag */ 104 #define SPSR_PERF 0x08 /* Parity Error Flag */ 105 #define SPSR_MODF 0x04 /* Mode Fault Error Flag */ 106 #define SPSR_IDLNF 0x02 /* RSPI Idle Flag */ 107 #define SPSR_OVRF 0x01 /* Overrun Error Flag */ 108 109 /* SPSCR - Sequence Control Register */ 110 #define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */ 111 112 /* SPSSR - Sequence Status Register */ 113 #define SPSSR_SPECM_MASK 0x70 /* Command Error Mask */ 114 #define SPSSR_SPCP_MASK 0x07 /* Command Pointer Mask */ 115 116 /* SPDCR - Data Control Register */ 117 #define SPDCR_TXDMY 0x80 /* Dummy Data Transmission Enable */ 118 #define SPDCR_SPLW1 0x40 /* Access Width Specification (RZ) */ 119 #define SPDCR_SPLW0 0x20 /* Access Width Specification (RZ) */ 120 #define SPDCR_SPLLWORD (SPDCR_SPLW1 | SPDCR_SPLW0) 121 #define SPDCR_SPLWORD SPDCR_SPLW1 122 #define SPDCR_SPLBYTE SPDCR_SPLW0 123 #define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */ 124 #define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select */ 125 #define SPDCR_SLSEL1 0x08 126 #define SPDCR_SLSEL0 0x04 127 #define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select */ 128 #define SPDCR_SPFC1 0x02 129 #define SPDCR_SPFC0 0x01 130 #define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) */ 131 132 /* SPCKD - Clock Delay Register */ 133 #define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */ 134 135 /* SSLND - Slave Select Negation Delay Register */ 136 #define SSLND_SLNDL_MASK 0x07 /* SSL Negation Delay Setting (1-8) */ 137 138 /* SPND - Next-Access Delay Register */ 139 #define SPND_SPNDL_MASK 0x07 /* Next-Access Delay Setting (1-8) */ 140 141 /* SPCR2 - Control Register 2 */ 142 #define SPCR2_PTE 0x08 /* Parity Self-Test Enable */ 143 #define SPCR2_SPIE 0x04 /* Idle Interrupt Enable */ 144 #define SPCR2_SPOE 0x02 /* Odd Parity Enable (vs. Even) */ 145 #define SPCR2_SPPE 0x01 /* Parity Enable */ 146 147 /* SPCMDn - Command Registers */ 148 #define SPCMD_SCKDEN 0x8000 /* Clock Delay Setting Enable */ 149 #define SPCMD_SLNDEN 0x4000 /* SSL Negation Delay Setting Enable */ 150 #define SPCMD_SPNDEN 0x2000 /* Next-Access Delay Enable */ 151 #define SPCMD_LSBF 0x1000 /* LSB First */ 152 #define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */ 153 #define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK) 154 #define SPCMD_SPB_8BIT 0x0000 /* qspi only */ 155 #define SPCMD_SPB_16BIT 0x0100 156 #define SPCMD_SPB_20BIT 0x0000 157 #define SPCMD_SPB_24BIT 0x0100 158 #define SPCMD_SPB_32BIT 0x0200 159 #define SPCMD_SSLKP 0x0080 /* SSL Signal Level Keeping */ 160 #define SPCMD_SPIMOD_MASK 0x0060 /* SPI Operating Mode (QSPI only) */ 161 #define SPCMD_SPIMOD1 0x0040 162 #define SPCMD_SPIMOD0 0x0020 163 #define SPCMD_SPIMOD_SINGLE 0 164 #define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0 165 #define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1 166 #define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */ 167 #define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */ 168 #define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */ 169 #define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */ 170 #define SPCMD_CPHA 0x0001 /* Clock Phase Setting */ 171 172 /* SPBFCR - Buffer Control Register */ 173 #define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset (qspi only) */ 174 #define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset (qspi only) */ 175 #define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */ 176 #define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */ 177 178 #define DUMMY_DATA 0x00 179 180 struct rspi_data { 181 void __iomem *addr; 182 u32 max_speed_hz; 183 struct spi_master *master; 184 struct list_head queue; 185 struct work_struct ws; 186 wait_queue_head_t wait; 187 spinlock_t lock; 188 struct clk *clk; 189 u8 spsr; 190 u16 spcmd; 191 const struct spi_ops *ops; 192 193 /* for dmaengine */ 194 struct dma_chan *chan_tx; 195 struct dma_chan *chan_rx; 196 int irq; 197 198 unsigned dma_width_16bit:1; 199 unsigned dma_callbacked:1; 200 }; 201 202 static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset) 203 { 204 iowrite8(data, rspi->addr + offset); 205 } 206 207 static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset) 208 { 209 iowrite16(data, rspi->addr + offset); 210 } 211 212 static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset) 213 { 214 iowrite32(data, rspi->addr + offset); 215 } 216 217 static u8 rspi_read8(const struct rspi_data *rspi, u16 offset) 218 { 219 return ioread8(rspi->addr + offset); 220 } 221 222 static u16 rspi_read16(const struct rspi_data *rspi, u16 offset) 223 { 224 return ioread16(rspi->addr + offset); 225 } 226 227 /* optional functions */ 228 struct spi_ops { 229 int (*set_config_register)(const struct rspi_data *rspi, 230 int access_size); 231 int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg, 232 struct spi_transfer *t); 233 int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg, 234 struct spi_transfer *t); 235 236 }; 237 238 /* 239 * functions for RSPI 240 */ 241 static int rspi_set_config_register(const struct rspi_data *rspi, 242 int access_size) 243 { 244 int spbr; 245 246 /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */ 247 rspi_write8(rspi, 0x00, RSPI_SPPCR); 248 249 /* Sets transfer bit rate */ 250 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1; 251 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); 252 253 /* Sets number of frames to be used: 1 frame */ 254 rspi_write8(rspi, 0x00, RSPI_SPDCR); 255 256 /* Sets RSPCK, SSL, next-access delay value */ 257 rspi_write8(rspi, 0x00, RSPI_SPCKD); 258 rspi_write8(rspi, 0x00, RSPI_SSLND); 259 rspi_write8(rspi, 0x00, RSPI_SPND); 260 261 /* Sets parity, interrupt mask */ 262 rspi_write8(rspi, 0x00, RSPI_SPCR2); 263 264 /* Sets SPCMD */ 265 rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | rspi->spcmd, 266 RSPI_SPCMD0); 267 268 /* Sets RSPI mode */ 269 rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR); 270 271 return 0; 272 } 273 274 /* 275 * functions for QSPI 276 */ 277 static int qspi_set_config_register(const struct rspi_data *rspi, 278 int access_size) 279 { 280 u16 spcmd; 281 int spbr; 282 283 /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */ 284 rspi_write8(rspi, 0x00, RSPI_SPPCR); 285 286 /* Sets transfer bit rate */ 287 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz); 288 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); 289 290 /* Sets number of frames to be used: 1 frame */ 291 rspi_write8(rspi, 0x00, RSPI_SPDCR); 292 293 /* Sets RSPCK, SSL, next-access delay value */ 294 rspi_write8(rspi, 0x00, RSPI_SPCKD); 295 rspi_write8(rspi, 0x00, RSPI_SSLND); 296 rspi_write8(rspi, 0x00, RSPI_SPND); 297 298 /* Data Length Setting */ 299 if (access_size == 8) 300 spcmd = SPCMD_SPB_8BIT; 301 else if (access_size == 16) 302 spcmd = SPCMD_SPB_16BIT; 303 else 304 spcmd = SPCMD_SPB_32BIT; 305 306 spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | rspi->spcmd | SPCMD_SPNDEN; 307 308 /* Resets transfer data length */ 309 rspi_write32(rspi, 0, QSPI_SPBMUL0); 310 311 /* Resets transmit and receive buffer */ 312 rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR); 313 /* Sets buffer to allow normal operation */ 314 rspi_write8(rspi, 0x00, QSPI_SPBFCR); 315 316 /* Sets SPCMD */ 317 rspi_write16(rspi, spcmd, RSPI_SPCMD0); 318 319 /* Enables SPI function in a master mode */ 320 rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR); 321 322 return 0; 323 } 324 325 #define set_config_register(spi, n) spi->ops->set_config_register(spi, n) 326 327 static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable) 328 { 329 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR); 330 } 331 332 static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable) 333 { 334 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR); 335 } 336 337 static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask, 338 u8 enable_bit) 339 { 340 int ret; 341 342 rspi->spsr = rspi_read8(rspi, RSPI_SPSR); 343 rspi_enable_irq(rspi, enable_bit); 344 ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ); 345 if (ret == 0 && !(rspi->spsr & wait_mask)) 346 return -ETIMEDOUT; 347 348 return 0; 349 } 350 351 static void rspi_assert_ssl(const struct rspi_data *rspi) 352 { 353 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR); 354 } 355 356 static void rspi_negate_ssl(const struct rspi_data *rspi) 357 { 358 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR); 359 } 360 361 static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg, 362 struct spi_transfer *t) 363 { 364 int remain = t->len; 365 const u8 *data = t->tx_buf; 366 while (remain > 0) { 367 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, 368 RSPI_SPCR); 369 370 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) { 371 dev_err(&rspi->master->dev, 372 "%s: tx empty timeout\n", __func__); 373 return -ETIMEDOUT; 374 } 375 376 rspi_write16(rspi, *data, RSPI_SPDR); 377 data++; 378 remain--; 379 } 380 381 /* Waiting for the last transmission */ 382 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); 383 384 return 0; 385 } 386 387 static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg, 388 struct spi_transfer *t) 389 { 390 int remain = t->len; 391 const u8 *data = t->tx_buf; 392 393 rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR); 394 rspi_write8(rspi, 0x00, QSPI_SPBFCR); 395 396 while (remain > 0) { 397 398 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) { 399 dev_err(&rspi->master->dev, 400 "%s: tx empty timeout\n", __func__); 401 return -ETIMEDOUT; 402 } 403 rspi_write8(rspi, *data++, RSPI_SPDR); 404 405 if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) { 406 dev_err(&rspi->master->dev, 407 "%s: receive timeout\n", __func__); 408 return -ETIMEDOUT; 409 } 410 rspi_read8(rspi, RSPI_SPDR); 411 412 remain--; 413 } 414 415 /* Waiting for the last transmission */ 416 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); 417 418 return 0; 419 } 420 421 #define send_pio(spi, mesg, t) spi->ops->send_pio(spi, mesg, t) 422 423 static void rspi_dma_complete(void *arg) 424 { 425 struct rspi_data *rspi = arg; 426 427 rspi->dma_callbacked = 1; 428 wake_up_interruptible(&rspi->wait); 429 } 430 431 static int rspi_dma_map_sg(struct scatterlist *sg, const void *buf, 432 unsigned len, struct dma_chan *chan, 433 enum dma_transfer_direction dir) 434 { 435 sg_init_table(sg, 1); 436 sg_set_buf(sg, buf, len); 437 sg_dma_len(sg) = len; 438 return dma_map_sg(chan->device->dev, sg, 1, dir); 439 } 440 441 static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan, 442 enum dma_transfer_direction dir) 443 { 444 dma_unmap_sg(chan->device->dev, sg, 1, dir); 445 } 446 447 static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len) 448 { 449 u16 *dst = buf; 450 const u8 *src = data; 451 452 while (len) { 453 *dst++ = (u16)(*src++); 454 len--; 455 } 456 } 457 458 static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len) 459 { 460 u8 *dst = buf; 461 const u16 *src = data; 462 463 while (len) { 464 *dst++ = (u8)*src++; 465 len--; 466 } 467 } 468 469 static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t) 470 { 471 struct scatterlist sg; 472 const void *buf = NULL; 473 struct dma_async_tx_descriptor *desc; 474 unsigned len; 475 int ret = 0; 476 477 if (rspi->dma_width_16bit) { 478 void *tmp; 479 /* 480 * If DMAC bus width is 16-bit, the driver allocates a dummy 481 * buffer. And, the driver converts original data into the 482 * DMAC data as the following format: 483 * original data: 1st byte, 2nd byte ... 484 * DMAC data: 1st byte, dummy, 2nd byte, dummy ... 485 */ 486 len = t->len * 2; 487 tmp = kmalloc(len, GFP_KERNEL); 488 if (!tmp) 489 return -ENOMEM; 490 rspi_memory_to_8bit(tmp, t->tx_buf, t->len); 491 buf = tmp; 492 } else { 493 len = t->len; 494 buf = t->tx_buf; 495 } 496 497 if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) { 498 ret = -EFAULT; 499 goto end_nomap; 500 } 501 desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE, 502 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 503 if (!desc) { 504 ret = -EIO; 505 goto end; 506 } 507 508 /* 509 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be 510 * called. So, this driver disables the IRQ while DMA transfer. 511 */ 512 disable_irq(rspi->irq); 513 514 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR); 515 rspi_enable_irq(rspi, SPCR_SPTIE); 516 rspi->dma_callbacked = 0; 517 518 desc->callback = rspi_dma_complete; 519 desc->callback_param = rspi; 520 dmaengine_submit(desc); 521 dma_async_issue_pending(rspi->chan_tx); 522 523 ret = wait_event_interruptible_timeout(rspi->wait, 524 rspi->dma_callbacked, HZ); 525 if (ret > 0 && rspi->dma_callbacked) 526 ret = 0; 527 else if (!ret) 528 ret = -ETIMEDOUT; 529 rspi_disable_irq(rspi, SPCR_SPTIE); 530 531 enable_irq(rspi->irq); 532 533 end: 534 rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE); 535 end_nomap: 536 if (rspi->dma_width_16bit) 537 kfree(buf); 538 539 return ret; 540 } 541 542 static void rspi_receive_init(const struct rspi_data *rspi) 543 { 544 u8 spsr; 545 546 spsr = rspi_read8(rspi, RSPI_SPSR); 547 if (spsr & SPSR_SPRF) 548 rspi_read16(rspi, RSPI_SPDR); /* dummy read */ 549 if (spsr & SPSR_OVRF) 550 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF, 551 RSPI_SPSR); 552 } 553 554 static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg, 555 struct spi_transfer *t) 556 { 557 int remain = t->len; 558 u8 *data; 559 560 rspi_receive_init(rspi); 561 562 data = t->rx_buf; 563 while (remain > 0) { 564 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, 565 RSPI_SPCR); 566 567 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) { 568 dev_err(&rspi->master->dev, 569 "%s: tx empty timeout\n", __func__); 570 return -ETIMEDOUT; 571 } 572 /* dummy write for generate clock */ 573 rspi_write16(rspi, DUMMY_DATA, RSPI_SPDR); 574 575 if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) { 576 dev_err(&rspi->master->dev, 577 "%s: receive timeout\n", __func__); 578 return -ETIMEDOUT; 579 } 580 /* SPDR allows 16 or 32-bit access only */ 581 *data = (u8)rspi_read16(rspi, RSPI_SPDR); 582 583 data++; 584 remain--; 585 } 586 587 return 0; 588 } 589 590 static void qspi_receive_init(const struct rspi_data *rspi) 591 { 592 u8 spsr; 593 594 spsr = rspi_read8(rspi, RSPI_SPSR); 595 if (spsr & SPSR_SPRF) 596 rspi_read8(rspi, RSPI_SPDR); /* dummy read */ 597 rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR); 598 rspi_write8(rspi, 0x00, QSPI_SPBFCR); 599 } 600 601 static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg, 602 struct spi_transfer *t) 603 { 604 int remain = t->len; 605 u8 *data; 606 607 qspi_receive_init(rspi); 608 609 data = t->rx_buf; 610 while (remain > 0) { 611 612 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) { 613 dev_err(&rspi->master->dev, 614 "%s: tx empty timeout\n", __func__); 615 return -ETIMEDOUT; 616 } 617 /* dummy write for generate clock */ 618 rspi_write8(rspi, DUMMY_DATA, RSPI_SPDR); 619 620 if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) { 621 dev_err(&rspi->master->dev, 622 "%s: receive timeout\n", __func__); 623 return -ETIMEDOUT; 624 } 625 /* SPDR allows 8, 16 or 32-bit access */ 626 *data++ = rspi_read8(rspi, RSPI_SPDR); 627 remain--; 628 } 629 630 return 0; 631 } 632 633 #define receive_pio(spi, mesg, t) spi->ops->receive_pio(spi, mesg, t) 634 635 static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) 636 { 637 struct scatterlist sg, sg_dummy; 638 void *dummy = NULL, *rx_buf = NULL; 639 struct dma_async_tx_descriptor *desc, *desc_dummy; 640 unsigned len; 641 int ret = 0; 642 643 if (rspi->dma_width_16bit) { 644 /* 645 * If DMAC bus width is 16-bit, the driver allocates a dummy 646 * buffer. And, finally the driver converts the DMAC data into 647 * actual data as the following format: 648 * DMAC data: 1st byte, dummy, 2nd byte, dummy ... 649 * actual data: 1st byte, 2nd byte ... 650 */ 651 len = t->len * 2; 652 rx_buf = kmalloc(len, GFP_KERNEL); 653 if (!rx_buf) 654 return -ENOMEM; 655 } else { 656 len = t->len; 657 rx_buf = t->rx_buf; 658 } 659 660 /* prepare dummy transfer to generate SPI clocks */ 661 dummy = kzalloc(len, GFP_KERNEL); 662 if (!dummy) { 663 ret = -ENOMEM; 664 goto end_nomap; 665 } 666 if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx, 667 DMA_TO_DEVICE)) { 668 ret = -EFAULT; 669 goto end_nomap; 670 } 671 desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1, 672 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 673 if (!desc_dummy) { 674 ret = -EIO; 675 goto end_dummy_mapped; 676 } 677 678 /* prepare receive transfer */ 679 if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx, 680 DMA_FROM_DEVICE)) { 681 ret = -EFAULT; 682 goto end_dummy_mapped; 683 684 } 685 desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE, 686 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 687 if (!desc) { 688 ret = -EIO; 689 goto end; 690 } 691 692 rspi_receive_init(rspi); 693 694 /* 695 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be 696 * called. So, this driver disables the IRQ while DMA transfer. 697 */ 698 disable_irq(rspi->irq); 699 700 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR); 701 rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE); 702 rspi->dma_callbacked = 0; 703 704 desc->callback = rspi_dma_complete; 705 desc->callback_param = rspi; 706 dmaengine_submit(desc); 707 dma_async_issue_pending(rspi->chan_rx); 708 709 desc_dummy->callback = NULL; /* No callback */ 710 dmaengine_submit(desc_dummy); 711 dma_async_issue_pending(rspi->chan_tx); 712 713 ret = wait_event_interruptible_timeout(rspi->wait, 714 rspi->dma_callbacked, HZ); 715 if (ret > 0 && rspi->dma_callbacked) 716 ret = 0; 717 else if (!ret) 718 ret = -ETIMEDOUT; 719 rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE); 720 721 enable_irq(rspi->irq); 722 723 end: 724 rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE); 725 end_dummy_mapped: 726 rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE); 727 end_nomap: 728 if (rspi->dma_width_16bit) { 729 if (!ret) 730 rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len); 731 kfree(rx_buf); 732 } 733 kfree(dummy); 734 735 return ret; 736 } 737 738 static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t) 739 { 740 if (t->tx_buf && rspi->chan_tx) 741 return 1; 742 /* If the module receives data by DMAC, it also needs TX DMAC */ 743 if (t->rx_buf && rspi->chan_tx && rspi->chan_rx) 744 return 1; 745 746 return 0; 747 } 748 749 static void rspi_work(struct work_struct *work) 750 { 751 struct rspi_data *rspi = container_of(work, struct rspi_data, ws); 752 struct spi_message *mesg; 753 struct spi_transfer *t; 754 unsigned long flags; 755 int ret; 756 757 while (1) { 758 spin_lock_irqsave(&rspi->lock, flags); 759 if (list_empty(&rspi->queue)) { 760 spin_unlock_irqrestore(&rspi->lock, flags); 761 break; 762 } 763 mesg = list_entry(rspi->queue.next, struct spi_message, queue); 764 list_del_init(&mesg->queue); 765 spin_unlock_irqrestore(&rspi->lock, flags); 766 767 rspi_assert_ssl(rspi); 768 769 list_for_each_entry(t, &mesg->transfers, transfer_list) { 770 if (t->tx_buf) { 771 if (rspi_is_dma(rspi, t)) 772 ret = rspi_send_dma(rspi, t); 773 else 774 ret = send_pio(rspi, mesg, t); 775 if (ret < 0) 776 goto error; 777 } 778 if (t->rx_buf) { 779 if (rspi_is_dma(rspi, t)) 780 ret = rspi_receive_dma(rspi, t); 781 else 782 ret = receive_pio(rspi, mesg, t); 783 if (ret < 0) 784 goto error; 785 } 786 mesg->actual_length += t->len; 787 } 788 rspi_negate_ssl(rspi); 789 790 mesg->status = 0; 791 mesg->complete(mesg->context); 792 } 793 794 return; 795 796 error: 797 mesg->status = ret; 798 mesg->complete(mesg->context); 799 } 800 801 static int rspi_setup(struct spi_device *spi) 802 { 803 struct rspi_data *rspi = spi_master_get_devdata(spi->master); 804 805 rspi->max_speed_hz = spi->max_speed_hz; 806 807 rspi->spcmd = SPCMD_SSLKP; 808 if (spi->mode & SPI_CPOL) 809 rspi->spcmd |= SPCMD_CPOL; 810 if (spi->mode & SPI_CPHA) 811 rspi->spcmd |= SPCMD_CPHA; 812 813 set_config_register(rspi, 8); 814 815 return 0; 816 } 817 818 static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg) 819 { 820 struct rspi_data *rspi = spi_master_get_devdata(spi->master); 821 unsigned long flags; 822 823 mesg->actual_length = 0; 824 mesg->status = -EINPROGRESS; 825 826 spin_lock_irqsave(&rspi->lock, flags); 827 list_add_tail(&mesg->queue, &rspi->queue); 828 schedule_work(&rspi->ws); 829 spin_unlock_irqrestore(&rspi->lock, flags); 830 831 return 0; 832 } 833 834 static void rspi_cleanup(struct spi_device *spi) 835 { 836 } 837 838 static irqreturn_t rspi_irq(int irq, void *_sr) 839 { 840 struct rspi_data *rspi = _sr; 841 u8 spsr; 842 irqreturn_t ret = IRQ_NONE; 843 u8 disable_irq = 0; 844 845 rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR); 846 if (spsr & SPSR_SPRF) 847 disable_irq |= SPCR_SPRIE; 848 if (spsr & SPSR_SPTEF) 849 disable_irq |= SPCR_SPTIE; 850 851 if (disable_irq) { 852 ret = IRQ_HANDLED; 853 rspi_disable_irq(rspi, disable_irq); 854 wake_up(&rspi->wait); 855 } 856 857 return ret; 858 } 859 860 static int rspi_request_dma(struct rspi_data *rspi, 861 struct platform_device *pdev) 862 { 863 const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev); 864 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 865 dma_cap_mask_t mask; 866 struct dma_slave_config cfg; 867 int ret; 868 869 if (!res || !rspi_pd) 870 return 0; /* The driver assumes no error. */ 871 872 rspi->dma_width_16bit = rspi_pd->dma_width_16bit; 873 874 /* If the module receives data by DMAC, it also needs TX DMAC */ 875 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) { 876 dma_cap_zero(mask); 877 dma_cap_set(DMA_SLAVE, mask); 878 rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter, 879 (void *)rspi_pd->dma_rx_id); 880 if (rspi->chan_rx) { 881 cfg.slave_id = rspi_pd->dma_rx_id; 882 cfg.direction = DMA_DEV_TO_MEM; 883 cfg.dst_addr = 0; 884 cfg.src_addr = res->start + RSPI_SPDR; 885 ret = dmaengine_slave_config(rspi->chan_rx, &cfg); 886 if (!ret) 887 dev_info(&pdev->dev, "Use DMA when rx.\n"); 888 else 889 return ret; 890 } 891 } 892 if (rspi_pd->dma_tx_id) { 893 dma_cap_zero(mask); 894 dma_cap_set(DMA_SLAVE, mask); 895 rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter, 896 (void *)rspi_pd->dma_tx_id); 897 if (rspi->chan_tx) { 898 cfg.slave_id = rspi_pd->dma_tx_id; 899 cfg.direction = DMA_MEM_TO_DEV; 900 cfg.dst_addr = res->start + RSPI_SPDR; 901 cfg.src_addr = 0; 902 ret = dmaengine_slave_config(rspi->chan_tx, &cfg); 903 if (!ret) 904 dev_info(&pdev->dev, "Use DMA when tx\n"); 905 else 906 return ret; 907 } 908 } 909 910 return 0; 911 } 912 913 static void rspi_release_dma(struct rspi_data *rspi) 914 { 915 if (rspi->chan_tx) 916 dma_release_channel(rspi->chan_tx); 917 if (rspi->chan_rx) 918 dma_release_channel(rspi->chan_rx); 919 } 920 921 static int rspi_remove(struct platform_device *pdev) 922 { 923 struct rspi_data *rspi = platform_get_drvdata(pdev); 924 925 rspi_release_dma(rspi); 926 clk_disable(rspi->clk); 927 928 return 0; 929 } 930 931 static int rspi_probe(struct platform_device *pdev) 932 { 933 struct resource *res; 934 struct spi_master *master; 935 struct rspi_data *rspi; 936 int ret, irq; 937 char clk_name[16]; 938 const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev); 939 const struct spi_ops *ops; 940 const struct platform_device_id *id_entry = pdev->id_entry; 941 942 ops = (struct spi_ops *)id_entry->driver_data; 943 /* ops parameter check */ 944 if (!ops->set_config_register) { 945 dev_err(&pdev->dev, "there is no set_config_register\n"); 946 return -ENODEV; 947 } 948 949 irq = platform_get_irq(pdev, 0); 950 if (irq < 0) { 951 dev_err(&pdev->dev, "platform_get_irq error\n"); 952 return -ENODEV; 953 } 954 955 master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data)); 956 if (master == NULL) { 957 dev_err(&pdev->dev, "spi_alloc_master error.\n"); 958 return -ENOMEM; 959 } 960 961 rspi = spi_master_get_devdata(master); 962 platform_set_drvdata(pdev, rspi); 963 rspi->ops = ops; 964 rspi->master = master; 965 966 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 967 rspi->addr = devm_ioremap_resource(&pdev->dev, res); 968 if (IS_ERR(rspi->addr)) { 969 ret = PTR_ERR(rspi->addr); 970 goto error1; 971 } 972 973 snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id); 974 rspi->clk = devm_clk_get(&pdev->dev, clk_name); 975 if (IS_ERR(rspi->clk)) { 976 dev_err(&pdev->dev, "cannot get clock\n"); 977 ret = PTR_ERR(rspi->clk); 978 goto error1; 979 } 980 clk_enable(rspi->clk); 981 982 INIT_LIST_HEAD(&rspi->queue); 983 spin_lock_init(&rspi->lock); 984 INIT_WORK(&rspi->ws, rspi_work); 985 init_waitqueue_head(&rspi->wait); 986 987 if (rspi_pd && rspi_pd->num_chipselect) 988 master->num_chipselect = rspi_pd->num_chipselect; 989 else 990 master->num_chipselect = 2; /* default */ 991 992 master->bus_num = pdev->id; 993 master->setup = rspi_setup; 994 master->transfer = rspi_transfer; 995 master->cleanup = rspi_cleanup; 996 master->mode_bits = SPI_CPHA | SPI_CPOL; 997 998 ret = devm_request_irq(&pdev->dev, irq, rspi_irq, 0, 999 dev_name(&pdev->dev), rspi); 1000 if (ret < 0) { 1001 dev_err(&pdev->dev, "request_irq error\n"); 1002 goto error2; 1003 } 1004 1005 rspi->irq = irq; 1006 ret = rspi_request_dma(rspi, pdev); 1007 if (ret < 0) { 1008 dev_err(&pdev->dev, "rspi_request_dma failed.\n"); 1009 goto error3; 1010 } 1011 1012 ret = devm_spi_register_master(&pdev->dev, master); 1013 if (ret < 0) { 1014 dev_err(&pdev->dev, "spi_register_master error.\n"); 1015 goto error3; 1016 } 1017 1018 dev_info(&pdev->dev, "probed\n"); 1019 1020 return 0; 1021 1022 error3: 1023 rspi_release_dma(rspi); 1024 error2: 1025 clk_disable(rspi->clk); 1026 error1: 1027 spi_master_put(master); 1028 1029 return ret; 1030 } 1031 1032 static struct spi_ops rspi_ops = { 1033 .set_config_register = rspi_set_config_register, 1034 .send_pio = rspi_send_pio, 1035 .receive_pio = rspi_receive_pio, 1036 }; 1037 1038 static struct spi_ops qspi_ops = { 1039 .set_config_register = qspi_set_config_register, 1040 .send_pio = qspi_send_pio, 1041 .receive_pio = qspi_receive_pio, 1042 }; 1043 1044 static struct platform_device_id spi_driver_ids[] = { 1045 { "rspi", (kernel_ulong_t)&rspi_ops }, 1046 { "qspi", (kernel_ulong_t)&qspi_ops }, 1047 {}, 1048 }; 1049 1050 MODULE_DEVICE_TABLE(platform, spi_driver_ids); 1051 1052 static struct platform_driver rspi_driver = { 1053 .probe = rspi_probe, 1054 .remove = rspi_remove, 1055 .id_table = spi_driver_ids, 1056 .driver = { 1057 .name = "renesas_spi", 1058 .owner = THIS_MODULE, 1059 }, 1060 }; 1061 module_platform_driver(rspi_driver); 1062 1063 MODULE_DESCRIPTION("Renesas RSPI bus driver"); 1064 MODULE_LICENSE("GPL v2"); 1065 MODULE_AUTHOR("Yoshihiro Shimoda"); 1066 MODULE_ALIAS("platform:rspi"); 1067