1 /* 2 * A driver for the ARM PL022 PrimeCell SSP/SPI bus master. 3 * 4 * Copyright (C) 2008-2009 ST-Ericsson AB 5 * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. 6 * 7 * Author: Linus Walleij <linus.walleij@stericsson.com> 8 * 9 * Initial version inspired by: 10 * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c 11 * Initial adoption to PL022 by: 12 * Sachin Verma <sachin.verma@st.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 */ 24 25 #include <linux/init.h> 26 #include <linux/module.h> 27 #include <linux/device.h> 28 #include <linux/ioport.h> 29 #include <linux/errno.h> 30 #include <linux/interrupt.h> 31 #include <linux/spi/spi.h> 32 #include <linux/delay.h> 33 #include <linux/clk.h> 34 #include <linux/err.h> 35 #include <linux/amba/bus.h> 36 #include <linux/amba/pl022.h> 37 #include <linux/io.h> 38 #include <linux/slab.h> 39 #include <linux/dmaengine.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/scatterlist.h> 42 #include <linux/pm_runtime.h> 43 44 /* 45 * This macro is used to define some register default values. 46 * reg is masked with mask, the OR:ed with an (again masked) 47 * val shifted sb steps to the left. 48 */ 49 #define SSP_WRITE_BITS(reg, val, mask, sb) \ 50 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask)))) 51 52 /* 53 * This macro is also used to define some default values. 54 * It will just shift val by sb steps to the left and mask 55 * the result with mask. 56 */ 57 #define GEN_MASK_BITS(val, mask, sb) \ 58 (((val)<<(sb)) & (mask)) 59 60 #define DRIVE_TX 0 61 #define DO_NOT_DRIVE_TX 1 62 63 #define DO_NOT_QUEUE_DMA 0 64 #define QUEUE_DMA 1 65 66 #define RX_TRANSFER 1 67 #define TX_TRANSFER 2 68 69 /* 70 * Macros to access SSP Registers with their offsets 71 */ 72 #define SSP_CR0(r) (r + 0x000) 73 #define SSP_CR1(r) (r + 0x004) 74 #define SSP_DR(r) (r + 0x008) 75 #define SSP_SR(r) (r + 0x00C) 76 #define SSP_CPSR(r) (r + 0x010) 77 #define SSP_IMSC(r) (r + 0x014) 78 #define SSP_RIS(r) (r + 0x018) 79 #define SSP_MIS(r) (r + 0x01C) 80 #define SSP_ICR(r) (r + 0x020) 81 #define SSP_DMACR(r) (r + 0x024) 82 #define SSP_ITCR(r) (r + 0x080) 83 #define SSP_ITIP(r) (r + 0x084) 84 #define SSP_ITOP(r) (r + 0x088) 85 #define SSP_TDR(r) (r + 0x08C) 86 87 #define SSP_PID0(r) (r + 0xFE0) 88 #define SSP_PID1(r) (r + 0xFE4) 89 #define SSP_PID2(r) (r + 0xFE8) 90 #define SSP_PID3(r) (r + 0xFEC) 91 92 #define SSP_CID0(r) (r + 0xFF0) 93 #define SSP_CID1(r) (r + 0xFF4) 94 #define SSP_CID2(r) (r + 0xFF8) 95 #define SSP_CID3(r) (r + 0xFFC) 96 97 /* 98 * SSP Control Register 0 - SSP_CR0 99 */ 100 #define SSP_CR0_MASK_DSS (0x0FUL << 0) 101 #define SSP_CR0_MASK_FRF (0x3UL << 4) 102 #define SSP_CR0_MASK_SPO (0x1UL << 6) 103 #define SSP_CR0_MASK_SPH (0x1UL << 7) 104 #define SSP_CR0_MASK_SCR (0xFFUL << 8) 105 106 /* 107 * The ST version of this block moves som bits 108 * in SSP_CR0 and extends it to 32 bits 109 */ 110 #define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) 111 #define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) 112 #define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) 113 #define SSP_CR0_MASK_FRF_ST (0x3UL << 21) 114 115 /* 116 * SSP Control Register 0 - SSP_CR1 117 */ 118 #define SSP_CR1_MASK_LBM (0x1UL << 0) 119 #define SSP_CR1_MASK_SSE (0x1UL << 1) 120 #define SSP_CR1_MASK_MS (0x1UL << 2) 121 #define SSP_CR1_MASK_SOD (0x1UL << 3) 122 123 /* 124 * The ST version of this block adds some bits 125 * in SSP_CR1 126 */ 127 #define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) 128 #define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) 129 #define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) 130 #define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) 131 #define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) 132 /* This one is only in the PL023 variant */ 133 #define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) 134 135 /* 136 * SSP Status Register - SSP_SR 137 */ 138 #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ 139 #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ 140 #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ 141 #define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ 142 #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ 143 144 /* 145 * SSP Clock Prescale Register - SSP_CPSR 146 */ 147 #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0) 148 149 /* 150 * SSP Interrupt Mask Set/Clear Register - SSP_IMSC 151 */ 152 #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */ 153 #define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */ 154 #define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */ 155 #define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */ 156 157 /* 158 * SSP Raw Interrupt Status Register - SSP_RIS 159 */ 160 /* Receive Overrun Raw Interrupt status */ 161 #define SSP_RIS_MASK_RORRIS (0x1UL << 0) 162 /* Receive Timeout Raw Interrupt status */ 163 #define SSP_RIS_MASK_RTRIS (0x1UL << 1) 164 /* Receive FIFO Raw Interrupt status */ 165 #define SSP_RIS_MASK_RXRIS (0x1UL << 2) 166 /* Transmit FIFO Raw Interrupt status */ 167 #define SSP_RIS_MASK_TXRIS (0x1UL << 3) 168 169 /* 170 * SSP Masked Interrupt Status Register - SSP_MIS 171 */ 172 /* Receive Overrun Masked Interrupt status */ 173 #define SSP_MIS_MASK_RORMIS (0x1UL << 0) 174 /* Receive Timeout Masked Interrupt status */ 175 #define SSP_MIS_MASK_RTMIS (0x1UL << 1) 176 /* Receive FIFO Masked Interrupt status */ 177 #define SSP_MIS_MASK_RXMIS (0x1UL << 2) 178 /* Transmit FIFO Masked Interrupt status */ 179 #define SSP_MIS_MASK_TXMIS (0x1UL << 3) 180 181 /* 182 * SSP Interrupt Clear Register - SSP_ICR 183 */ 184 /* Receive Overrun Raw Clear Interrupt bit */ 185 #define SSP_ICR_MASK_RORIC (0x1UL << 0) 186 /* Receive Timeout Clear Interrupt bit */ 187 #define SSP_ICR_MASK_RTIC (0x1UL << 1) 188 189 /* 190 * SSP DMA Control Register - SSP_DMACR 191 */ 192 /* Receive DMA Enable bit */ 193 #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0) 194 /* Transmit DMA Enable bit */ 195 #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1) 196 197 /* 198 * SSP Integration Test control Register - SSP_ITCR 199 */ 200 #define SSP_ITCR_MASK_ITEN (0x1UL << 0) 201 #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1) 202 203 /* 204 * SSP Integration Test Input Register - SSP_ITIP 205 */ 206 #define ITIP_MASK_SSPRXD (0x1UL << 0) 207 #define ITIP_MASK_SSPFSSIN (0x1UL << 1) 208 #define ITIP_MASK_SSPCLKIN (0x1UL << 2) 209 #define ITIP_MASK_RXDMAC (0x1UL << 3) 210 #define ITIP_MASK_TXDMAC (0x1UL << 4) 211 #define ITIP_MASK_SSPTXDIN (0x1UL << 5) 212 213 /* 214 * SSP Integration Test output Register - SSP_ITOP 215 */ 216 #define ITOP_MASK_SSPTXD (0x1UL << 0) 217 #define ITOP_MASK_SSPFSSOUT (0x1UL << 1) 218 #define ITOP_MASK_SSPCLKOUT (0x1UL << 2) 219 #define ITOP_MASK_SSPOEn (0x1UL << 3) 220 #define ITOP_MASK_SSPCTLOEn (0x1UL << 4) 221 #define ITOP_MASK_RORINTR (0x1UL << 5) 222 #define ITOP_MASK_RTINTR (0x1UL << 6) 223 #define ITOP_MASK_RXINTR (0x1UL << 7) 224 #define ITOP_MASK_TXINTR (0x1UL << 8) 225 #define ITOP_MASK_INTR (0x1UL << 9) 226 #define ITOP_MASK_RXDMABREQ (0x1UL << 10) 227 #define ITOP_MASK_RXDMASREQ (0x1UL << 11) 228 #define ITOP_MASK_TXDMABREQ (0x1UL << 12) 229 #define ITOP_MASK_TXDMASREQ (0x1UL << 13) 230 231 /* 232 * SSP Test Data Register - SSP_TDR 233 */ 234 #define TDR_MASK_TESTDATA (0xFFFFFFFF) 235 236 /* 237 * Message State 238 * we use the spi_message.state (void *) pointer to 239 * hold a single state value, that's why all this 240 * (void *) casting is done here. 241 */ 242 #define STATE_START ((void *) 0) 243 #define STATE_RUNNING ((void *) 1) 244 #define STATE_DONE ((void *) 2) 245 #define STATE_ERROR ((void *) -1) 246 247 /* 248 * SSP State - Whether Enabled or Disabled 249 */ 250 #define SSP_DISABLED (0) 251 #define SSP_ENABLED (1) 252 253 /* 254 * SSP DMA State - Whether DMA Enabled or Disabled 255 */ 256 #define SSP_DMA_DISABLED (0) 257 #define SSP_DMA_ENABLED (1) 258 259 /* 260 * SSP Clock Defaults 261 */ 262 #define SSP_DEFAULT_CLKRATE 0x2 263 #define SSP_DEFAULT_PRESCALE 0x40 264 265 /* 266 * SSP Clock Parameter ranges 267 */ 268 #define CPSDVR_MIN 0x02 269 #define CPSDVR_MAX 0xFE 270 #define SCR_MIN 0x00 271 #define SCR_MAX 0xFF 272 273 /* 274 * SSP Interrupt related Macros 275 */ 276 #define DEFAULT_SSP_REG_IMSC 0x0UL 277 #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC 278 #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) 279 280 #define CLEAR_ALL_INTERRUPTS 0x3 281 282 #define SPI_POLLING_TIMEOUT 1000 283 284 /* 285 * The type of reading going on on this chip 286 */ 287 enum ssp_reading { 288 READING_NULL, 289 READING_U8, 290 READING_U16, 291 READING_U32 292 }; 293 294 /** 295 * The type of writing going on on this chip 296 */ 297 enum ssp_writing { 298 WRITING_NULL, 299 WRITING_U8, 300 WRITING_U16, 301 WRITING_U32 302 }; 303 304 /** 305 * struct vendor_data - vendor-specific config parameters 306 * for PL022 derivates 307 * @fifodepth: depth of FIFOs (both) 308 * @max_bpw: maximum number of bits per word 309 * @unidir: supports unidirection transfers 310 * @extended_cr: 32 bit wide control register 0 with extra 311 * features and extra features in CR1 as found in the ST variants 312 * @pl023: supports a subset of the ST extensions called "PL023" 313 */ 314 struct vendor_data { 315 int fifodepth; 316 int max_bpw; 317 bool unidir; 318 bool extended_cr; 319 bool pl023; 320 bool loopback; 321 }; 322 323 /** 324 * struct pl022 - This is the private SSP driver data structure 325 * @adev: AMBA device model hookup 326 * @vendor: vendor data for the IP block 327 * @phybase: the physical memory where the SSP device resides 328 * @virtbase: the virtual memory where the SSP is mapped 329 * @clk: outgoing clock "SPICLK" for the SPI bus 330 * @master: SPI framework hookup 331 * @master_info: controller-specific data from machine setup 332 * @kworker: thread struct for message pump 333 * @kworker_task: pointer to task for message pump kworker thread 334 * @pump_messages: work struct for scheduling work to the message pump 335 * @queue_lock: spinlock to syncronise access to message queue 336 * @queue: message queue 337 * @busy: message pump is busy 338 * @running: message pump is running 339 * @pump_transfers: Tasklet used in Interrupt Transfer mode 340 * @cur_msg: Pointer to current spi_message being processed 341 * @cur_transfer: Pointer to current spi_transfer 342 * @cur_chip: pointer to current clients chip(assigned from controller_state) 343 * @next_msg_cs_active: the next message in the queue has been examined 344 * and it was found that it uses the same chip select as the previous 345 * message, so we left it active after the previous transfer, and it's 346 * active already. 347 * @tx: current position in TX buffer to be read 348 * @tx_end: end position in TX buffer to be read 349 * @rx: current position in RX buffer to be written 350 * @rx_end: end position in RX buffer to be written 351 * @read: the type of read currently going on 352 * @write: the type of write currently going on 353 * @exp_fifo_level: expected FIFO level 354 * @dma_rx_channel: optional channel for RX DMA 355 * @dma_tx_channel: optional channel for TX DMA 356 * @sgt_rx: scattertable for the RX transfer 357 * @sgt_tx: scattertable for the TX transfer 358 * @dummypage: a dummy page used for driving data on the bus with DMA 359 */ 360 struct pl022 { 361 struct amba_device *adev; 362 struct vendor_data *vendor; 363 resource_size_t phybase; 364 void __iomem *virtbase; 365 struct clk *clk; 366 struct spi_master *master; 367 struct pl022_ssp_controller *master_info; 368 /* Message per-transfer pump */ 369 struct tasklet_struct pump_transfers; 370 struct spi_message *cur_msg; 371 struct spi_transfer *cur_transfer; 372 struct chip_data *cur_chip; 373 bool next_msg_cs_active; 374 void *tx; 375 void *tx_end; 376 void *rx; 377 void *rx_end; 378 enum ssp_reading read; 379 enum ssp_writing write; 380 u32 exp_fifo_level; 381 enum ssp_rx_level_trig rx_lev_trig; 382 enum ssp_tx_level_trig tx_lev_trig; 383 /* DMA settings */ 384 #ifdef CONFIG_DMA_ENGINE 385 struct dma_chan *dma_rx_channel; 386 struct dma_chan *dma_tx_channel; 387 struct sg_table sgt_rx; 388 struct sg_table sgt_tx; 389 char *dummypage; 390 bool dma_running; 391 #endif 392 }; 393 394 /** 395 * struct chip_data - To maintain runtime state of SSP for each client chip 396 * @cr0: Value of control register CR0 of SSP - on later ST variants this 397 * register is 32 bits wide rather than just 16 398 * @cr1: Value of control register CR1 of SSP 399 * @dmacr: Value of DMA control Register of SSP 400 * @cpsr: Value of Clock prescale register 401 * @n_bytes: how many bytes(power of 2) reqd for a given data width of client 402 * @enable_dma: Whether to enable DMA or not 403 * @read: function ptr to be used to read when doing xfer for this chip 404 * @write: function ptr to be used to write when doing xfer for this chip 405 * @cs_control: chip select callback provided by chip 406 * @xfer_type: polling/interrupt/DMA 407 * 408 * Runtime state of the SSP controller, maintained per chip, 409 * This would be set according to the current message that would be served 410 */ 411 struct chip_data { 412 u32 cr0; 413 u16 cr1; 414 u16 dmacr; 415 u16 cpsr; 416 u8 n_bytes; 417 bool enable_dma; 418 enum ssp_reading read; 419 enum ssp_writing write; 420 void (*cs_control) (u32 command); 421 int xfer_type; 422 }; 423 424 /** 425 * null_cs_control - Dummy chip select function 426 * @command: select/delect the chip 427 * 428 * If no chip select function is provided by client this is used as dummy 429 * chip select 430 */ 431 static void null_cs_control(u32 command) 432 { 433 pr_debug("pl022: dummy chip select control, CS=0x%x\n", command); 434 } 435 436 /** 437 * giveback - current spi_message is over, schedule next message and call 438 * callback of this message. Assumes that caller already 439 * set message->status; dma and pio irqs are blocked 440 * @pl022: SSP driver private data structure 441 */ 442 static void giveback(struct pl022 *pl022) 443 { 444 struct spi_transfer *last_transfer; 445 pl022->next_msg_cs_active = false; 446 447 last_transfer = list_entry(pl022->cur_msg->transfers.prev, 448 struct spi_transfer, 449 transfer_list); 450 451 /* Delay if requested before any change in chip select */ 452 if (last_transfer->delay_usecs) 453 /* 454 * FIXME: This runs in interrupt context. 455 * Is this really smart? 456 */ 457 udelay(last_transfer->delay_usecs); 458 459 if (!last_transfer->cs_change) { 460 struct spi_message *next_msg; 461 462 /* 463 * cs_change was not set. We can keep the chip select 464 * enabled if there is message in the queue and it is 465 * for the same spi device. 466 * 467 * We cannot postpone this until pump_messages, because 468 * after calling msg->complete (below) the driver that 469 * sent the current message could be unloaded, which 470 * could invalidate the cs_control() callback... 471 */ 472 /* get a pointer to the next message, if any */ 473 next_msg = spi_get_next_queued_message(pl022->master); 474 475 /* 476 * see if the next and current messages point 477 * to the same spi device. 478 */ 479 if (next_msg && next_msg->spi != pl022->cur_msg->spi) 480 next_msg = NULL; 481 if (!next_msg || pl022->cur_msg->state == STATE_ERROR) 482 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); 483 else 484 pl022->next_msg_cs_active = true; 485 486 } 487 488 pl022->cur_msg = NULL; 489 pl022->cur_transfer = NULL; 490 pl022->cur_chip = NULL; 491 spi_finalize_current_message(pl022->master); 492 493 /* disable the SPI/SSP operation */ 494 writew((readw(SSP_CR1(pl022->virtbase)) & 495 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); 496 497 } 498 499 /** 500 * flush - flush the FIFO to reach a clean state 501 * @pl022: SSP driver private data structure 502 */ 503 static int flush(struct pl022 *pl022) 504 { 505 unsigned long limit = loops_per_jiffy << 1; 506 507 dev_dbg(&pl022->adev->dev, "flush\n"); 508 do { 509 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) 510 readw(SSP_DR(pl022->virtbase)); 511 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); 512 513 pl022->exp_fifo_level = 0; 514 515 return limit; 516 } 517 518 /** 519 * restore_state - Load configuration of current chip 520 * @pl022: SSP driver private data structure 521 */ 522 static void restore_state(struct pl022 *pl022) 523 { 524 struct chip_data *chip = pl022->cur_chip; 525 526 if (pl022->vendor->extended_cr) 527 writel(chip->cr0, SSP_CR0(pl022->virtbase)); 528 else 529 writew(chip->cr0, SSP_CR0(pl022->virtbase)); 530 writew(chip->cr1, SSP_CR1(pl022->virtbase)); 531 writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); 532 writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); 533 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 534 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 535 } 536 537 /* 538 * Default SSP Register Values 539 */ 540 #define DEFAULT_SSP_REG_CR0 ( \ 541 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ 542 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ 543 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 544 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ 545 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ 546 ) 547 548 /* ST versions have slightly different bit layout */ 549 #define DEFAULT_SSP_REG_CR0_ST ( \ 550 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ 551 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ 552 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 553 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ 554 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ 555 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ 556 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ 557 ) 558 559 /* The PL023 version is slightly different again */ 560 #define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ 561 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ 562 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 563 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ 564 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ 565 ) 566 567 #define DEFAULT_SSP_REG_CR1 ( \ 568 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ 569 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ 570 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ 571 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ 572 ) 573 574 /* ST versions extend this register to use all 16 bits */ 575 #define DEFAULT_SSP_REG_CR1_ST ( \ 576 DEFAULT_SSP_REG_CR1 | \ 577 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ 578 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ 579 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ 580 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ 581 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ 582 ) 583 584 /* 585 * The PL023 variant has further differences: no loopback mode, no microwire 586 * support, and a new clock feedback delay setting. 587 */ 588 #define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ 589 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ 590 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ 591 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ 592 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ 593 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ 594 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ 595 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ 596 GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ 597 ) 598 599 #define DEFAULT_SSP_REG_CPSR ( \ 600 GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ 601 ) 602 603 #define DEFAULT_SSP_REG_DMACR (\ 604 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \ 605 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ 606 ) 607 608 /** 609 * load_ssp_default_config - Load default configuration for SSP 610 * @pl022: SSP driver private data structure 611 */ 612 static void load_ssp_default_config(struct pl022 *pl022) 613 { 614 if (pl022->vendor->pl023) { 615 writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); 616 writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); 617 } else if (pl022->vendor->extended_cr) { 618 writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); 619 writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); 620 } else { 621 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); 622 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); 623 } 624 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); 625 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); 626 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 627 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 628 } 629 630 /** 631 * This will write to TX and read from RX according to the parameters 632 * set in pl022. 633 */ 634 static void readwriter(struct pl022 *pl022) 635 { 636 637 /* 638 * The FIFO depth is different between primecell variants. 639 * I believe filling in too much in the FIFO might cause 640 * errons in 8bit wide transfers on ARM variants (just 8 words 641 * FIFO, means only 8x8 = 64 bits in FIFO) at least. 642 * 643 * To prevent this issue, the TX FIFO is only filled to the 644 * unused RX FIFO fill length, regardless of what the TX 645 * FIFO status flag indicates. 646 */ 647 dev_dbg(&pl022->adev->dev, 648 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", 649 __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end); 650 651 /* Read as much as you can */ 652 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) 653 && (pl022->rx < pl022->rx_end)) { 654 switch (pl022->read) { 655 case READING_NULL: 656 readw(SSP_DR(pl022->virtbase)); 657 break; 658 case READING_U8: 659 *(u8 *) (pl022->rx) = 660 readw(SSP_DR(pl022->virtbase)) & 0xFFU; 661 break; 662 case READING_U16: 663 *(u16 *) (pl022->rx) = 664 (u16) readw(SSP_DR(pl022->virtbase)); 665 break; 666 case READING_U32: 667 *(u32 *) (pl022->rx) = 668 readl(SSP_DR(pl022->virtbase)); 669 break; 670 } 671 pl022->rx += (pl022->cur_chip->n_bytes); 672 pl022->exp_fifo_level--; 673 } 674 /* 675 * Write as much as possible up to the RX FIFO size 676 */ 677 while ((pl022->exp_fifo_level < pl022->vendor->fifodepth) 678 && (pl022->tx < pl022->tx_end)) { 679 switch (pl022->write) { 680 case WRITING_NULL: 681 writew(0x0, SSP_DR(pl022->virtbase)); 682 break; 683 case WRITING_U8: 684 writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase)); 685 break; 686 case WRITING_U16: 687 writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase)); 688 break; 689 case WRITING_U32: 690 writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase)); 691 break; 692 } 693 pl022->tx += (pl022->cur_chip->n_bytes); 694 pl022->exp_fifo_level++; 695 /* 696 * This inner reader takes care of things appearing in the RX 697 * FIFO as we're transmitting. This will happen a lot since the 698 * clock starts running when you put things into the TX FIFO, 699 * and then things are continuously clocked into the RX FIFO. 700 */ 701 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) 702 && (pl022->rx < pl022->rx_end)) { 703 switch (pl022->read) { 704 case READING_NULL: 705 readw(SSP_DR(pl022->virtbase)); 706 break; 707 case READING_U8: 708 *(u8 *) (pl022->rx) = 709 readw(SSP_DR(pl022->virtbase)) & 0xFFU; 710 break; 711 case READING_U16: 712 *(u16 *) (pl022->rx) = 713 (u16) readw(SSP_DR(pl022->virtbase)); 714 break; 715 case READING_U32: 716 *(u32 *) (pl022->rx) = 717 readl(SSP_DR(pl022->virtbase)); 718 break; 719 } 720 pl022->rx += (pl022->cur_chip->n_bytes); 721 pl022->exp_fifo_level--; 722 } 723 } 724 /* 725 * When we exit here the TX FIFO should be full and the RX FIFO 726 * should be empty 727 */ 728 } 729 730 /** 731 * next_transfer - Move to the Next transfer in the current spi message 732 * @pl022: SSP driver private data structure 733 * 734 * This function moves though the linked list of spi transfers in the 735 * current spi message and returns with the state of current spi 736 * message i.e whether its last transfer is done(STATE_DONE) or 737 * Next transfer is ready(STATE_RUNNING) 738 */ 739 static void *next_transfer(struct pl022 *pl022) 740 { 741 struct spi_message *msg = pl022->cur_msg; 742 struct spi_transfer *trans = pl022->cur_transfer; 743 744 /* Move to next transfer */ 745 if (trans->transfer_list.next != &msg->transfers) { 746 pl022->cur_transfer = 747 list_entry(trans->transfer_list.next, 748 struct spi_transfer, transfer_list); 749 return STATE_RUNNING; 750 } 751 return STATE_DONE; 752 } 753 754 /* 755 * This DMA functionality is only compiled in if we have 756 * access to the generic DMA devices/DMA engine. 757 */ 758 #ifdef CONFIG_DMA_ENGINE 759 static void unmap_free_dma_scatter(struct pl022 *pl022) 760 { 761 /* Unmap and free the SG tables */ 762 dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, 763 pl022->sgt_tx.nents, DMA_TO_DEVICE); 764 dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, 765 pl022->sgt_rx.nents, DMA_FROM_DEVICE); 766 sg_free_table(&pl022->sgt_rx); 767 sg_free_table(&pl022->sgt_tx); 768 } 769 770 static void dma_callback(void *data) 771 { 772 struct pl022 *pl022 = data; 773 struct spi_message *msg = pl022->cur_msg; 774 775 BUG_ON(!pl022->sgt_rx.sgl); 776 777 #ifdef VERBOSE_DEBUG 778 /* 779 * Optionally dump out buffers to inspect contents, this is 780 * good if you want to convince yourself that the loopback 781 * read/write contents are the same, when adopting to a new 782 * DMA engine. 783 */ 784 { 785 struct scatterlist *sg; 786 unsigned int i; 787 788 dma_sync_sg_for_cpu(&pl022->adev->dev, 789 pl022->sgt_rx.sgl, 790 pl022->sgt_rx.nents, 791 DMA_FROM_DEVICE); 792 793 for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { 794 dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); 795 print_hex_dump(KERN_ERR, "SPI RX: ", 796 DUMP_PREFIX_OFFSET, 797 16, 798 1, 799 sg_virt(sg), 800 sg_dma_len(sg), 801 1); 802 } 803 for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { 804 dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); 805 print_hex_dump(KERN_ERR, "SPI TX: ", 806 DUMP_PREFIX_OFFSET, 807 16, 808 1, 809 sg_virt(sg), 810 sg_dma_len(sg), 811 1); 812 } 813 } 814 #endif 815 816 unmap_free_dma_scatter(pl022); 817 818 /* Update total bytes transferred */ 819 msg->actual_length += pl022->cur_transfer->len; 820 if (pl022->cur_transfer->cs_change) 821 pl022->cur_chip-> 822 cs_control(SSP_CHIP_DESELECT); 823 824 /* Move to next transfer */ 825 msg->state = next_transfer(pl022); 826 tasklet_schedule(&pl022->pump_transfers); 827 } 828 829 static void setup_dma_scatter(struct pl022 *pl022, 830 void *buffer, 831 unsigned int length, 832 struct sg_table *sgtab) 833 { 834 struct scatterlist *sg; 835 int bytesleft = length; 836 void *bufp = buffer; 837 int mapbytes; 838 int i; 839 840 if (buffer) { 841 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { 842 /* 843 * If there are less bytes left than what fits 844 * in the current page (plus page alignment offset) 845 * we just feed in this, else we stuff in as much 846 * as we can. 847 */ 848 if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) 849 mapbytes = bytesleft; 850 else 851 mapbytes = PAGE_SIZE - offset_in_page(bufp); 852 sg_set_page(sg, virt_to_page(bufp), 853 mapbytes, offset_in_page(bufp)); 854 bufp += mapbytes; 855 bytesleft -= mapbytes; 856 dev_dbg(&pl022->adev->dev, 857 "set RX/TX target page @ %p, %d bytes, %d left\n", 858 bufp, mapbytes, bytesleft); 859 } 860 } else { 861 /* Map the dummy buffer on every page */ 862 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { 863 if (bytesleft < PAGE_SIZE) 864 mapbytes = bytesleft; 865 else 866 mapbytes = PAGE_SIZE; 867 sg_set_page(sg, virt_to_page(pl022->dummypage), 868 mapbytes, 0); 869 bytesleft -= mapbytes; 870 dev_dbg(&pl022->adev->dev, 871 "set RX/TX to dummy page %d bytes, %d left\n", 872 mapbytes, bytesleft); 873 874 } 875 } 876 BUG_ON(bytesleft); 877 } 878 879 /** 880 * configure_dma - configures the channels for the next transfer 881 * @pl022: SSP driver's private data structure 882 */ 883 static int configure_dma(struct pl022 *pl022) 884 { 885 struct dma_slave_config rx_conf = { 886 .src_addr = SSP_DR(pl022->phybase), 887 .direction = DMA_DEV_TO_MEM, 888 .device_fc = false, 889 }; 890 struct dma_slave_config tx_conf = { 891 .dst_addr = SSP_DR(pl022->phybase), 892 .direction = DMA_MEM_TO_DEV, 893 .device_fc = false, 894 }; 895 unsigned int pages; 896 int ret; 897 int rx_sglen, tx_sglen; 898 struct dma_chan *rxchan = pl022->dma_rx_channel; 899 struct dma_chan *txchan = pl022->dma_tx_channel; 900 struct dma_async_tx_descriptor *rxdesc; 901 struct dma_async_tx_descriptor *txdesc; 902 903 /* Check that the channels are available */ 904 if (!rxchan || !txchan) 905 return -ENODEV; 906 907 /* 908 * If supplied, the DMA burstsize should equal the FIFO trigger level. 909 * Notice that the DMA engine uses one-to-one mapping. Since we can 910 * not trigger on 2 elements this needs explicit mapping rather than 911 * calculation. 912 */ 913 switch (pl022->rx_lev_trig) { 914 case SSP_RX_1_OR_MORE_ELEM: 915 rx_conf.src_maxburst = 1; 916 break; 917 case SSP_RX_4_OR_MORE_ELEM: 918 rx_conf.src_maxburst = 4; 919 break; 920 case SSP_RX_8_OR_MORE_ELEM: 921 rx_conf.src_maxburst = 8; 922 break; 923 case SSP_RX_16_OR_MORE_ELEM: 924 rx_conf.src_maxburst = 16; 925 break; 926 case SSP_RX_32_OR_MORE_ELEM: 927 rx_conf.src_maxburst = 32; 928 break; 929 default: 930 rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1; 931 break; 932 } 933 934 switch (pl022->tx_lev_trig) { 935 case SSP_TX_1_OR_MORE_EMPTY_LOC: 936 tx_conf.dst_maxburst = 1; 937 break; 938 case SSP_TX_4_OR_MORE_EMPTY_LOC: 939 tx_conf.dst_maxburst = 4; 940 break; 941 case SSP_TX_8_OR_MORE_EMPTY_LOC: 942 tx_conf.dst_maxburst = 8; 943 break; 944 case SSP_TX_16_OR_MORE_EMPTY_LOC: 945 tx_conf.dst_maxburst = 16; 946 break; 947 case SSP_TX_32_OR_MORE_EMPTY_LOC: 948 tx_conf.dst_maxburst = 32; 949 break; 950 default: 951 tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1; 952 break; 953 } 954 955 switch (pl022->read) { 956 case READING_NULL: 957 /* Use the same as for writing */ 958 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 959 break; 960 case READING_U8: 961 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 962 break; 963 case READING_U16: 964 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 965 break; 966 case READING_U32: 967 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 968 break; 969 } 970 971 switch (pl022->write) { 972 case WRITING_NULL: 973 /* Use the same as for reading */ 974 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 975 break; 976 case WRITING_U8: 977 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 978 break; 979 case WRITING_U16: 980 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 981 break; 982 case WRITING_U32: 983 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 984 break; 985 } 986 987 /* SPI pecularity: we need to read and write the same width */ 988 if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 989 rx_conf.src_addr_width = tx_conf.dst_addr_width; 990 if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 991 tx_conf.dst_addr_width = rx_conf.src_addr_width; 992 BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); 993 994 dmaengine_slave_config(rxchan, &rx_conf); 995 dmaengine_slave_config(txchan, &tx_conf); 996 997 /* Create sglists for the transfers */ 998 pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE); 999 dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); 1000 1001 ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC); 1002 if (ret) 1003 goto err_alloc_rx_sg; 1004 1005 ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC); 1006 if (ret) 1007 goto err_alloc_tx_sg; 1008 1009 /* Fill in the scatterlists for the RX+TX buffers */ 1010 setup_dma_scatter(pl022, pl022->rx, 1011 pl022->cur_transfer->len, &pl022->sgt_rx); 1012 setup_dma_scatter(pl022, pl022->tx, 1013 pl022->cur_transfer->len, &pl022->sgt_tx); 1014 1015 /* Map DMA buffers */ 1016 rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, 1017 pl022->sgt_rx.nents, DMA_FROM_DEVICE); 1018 if (!rx_sglen) 1019 goto err_rx_sgmap; 1020 1021 tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, 1022 pl022->sgt_tx.nents, DMA_TO_DEVICE); 1023 if (!tx_sglen) 1024 goto err_tx_sgmap; 1025 1026 /* Send both scatterlists */ 1027 rxdesc = dmaengine_prep_slave_sg(rxchan, 1028 pl022->sgt_rx.sgl, 1029 rx_sglen, 1030 DMA_DEV_TO_MEM, 1031 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1032 if (!rxdesc) 1033 goto err_rxdesc; 1034 1035 txdesc = dmaengine_prep_slave_sg(txchan, 1036 pl022->sgt_tx.sgl, 1037 tx_sglen, 1038 DMA_MEM_TO_DEV, 1039 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1040 if (!txdesc) 1041 goto err_txdesc; 1042 1043 /* Put the callback on the RX transfer only, that should finish last */ 1044 rxdesc->callback = dma_callback; 1045 rxdesc->callback_param = pl022; 1046 1047 /* Submit and fire RX and TX with TX last so we're ready to read! */ 1048 dmaengine_submit(rxdesc); 1049 dmaengine_submit(txdesc); 1050 dma_async_issue_pending(rxchan); 1051 dma_async_issue_pending(txchan); 1052 pl022->dma_running = true; 1053 1054 return 0; 1055 1056 err_txdesc: 1057 dmaengine_terminate_all(txchan); 1058 err_rxdesc: 1059 dmaengine_terminate_all(rxchan); 1060 dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, 1061 pl022->sgt_tx.nents, DMA_TO_DEVICE); 1062 err_tx_sgmap: 1063 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, 1064 pl022->sgt_tx.nents, DMA_FROM_DEVICE); 1065 err_rx_sgmap: 1066 sg_free_table(&pl022->sgt_tx); 1067 err_alloc_tx_sg: 1068 sg_free_table(&pl022->sgt_rx); 1069 err_alloc_rx_sg: 1070 return -ENOMEM; 1071 } 1072 1073 static int __devinit pl022_dma_probe(struct pl022 *pl022) 1074 { 1075 dma_cap_mask_t mask; 1076 1077 /* Try to acquire a generic DMA engine slave channel */ 1078 dma_cap_zero(mask); 1079 dma_cap_set(DMA_SLAVE, mask); 1080 /* 1081 * We need both RX and TX channels to do DMA, else do none 1082 * of them. 1083 */ 1084 pl022->dma_rx_channel = dma_request_channel(mask, 1085 pl022->master_info->dma_filter, 1086 pl022->master_info->dma_rx_param); 1087 if (!pl022->dma_rx_channel) { 1088 dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n"); 1089 goto err_no_rxchan; 1090 } 1091 1092 pl022->dma_tx_channel = dma_request_channel(mask, 1093 pl022->master_info->dma_filter, 1094 pl022->master_info->dma_tx_param); 1095 if (!pl022->dma_tx_channel) { 1096 dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n"); 1097 goto err_no_txchan; 1098 } 1099 1100 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); 1101 if (!pl022->dummypage) { 1102 dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); 1103 goto err_no_dummypage; 1104 } 1105 1106 dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", 1107 dma_chan_name(pl022->dma_rx_channel), 1108 dma_chan_name(pl022->dma_tx_channel)); 1109 1110 return 0; 1111 1112 err_no_dummypage: 1113 dma_release_channel(pl022->dma_tx_channel); 1114 err_no_txchan: 1115 dma_release_channel(pl022->dma_rx_channel); 1116 pl022->dma_rx_channel = NULL; 1117 err_no_rxchan: 1118 dev_err(&pl022->adev->dev, 1119 "Failed to work in dma mode, work without dma!\n"); 1120 return -ENODEV; 1121 } 1122 1123 static void terminate_dma(struct pl022 *pl022) 1124 { 1125 struct dma_chan *rxchan = pl022->dma_rx_channel; 1126 struct dma_chan *txchan = pl022->dma_tx_channel; 1127 1128 dmaengine_terminate_all(rxchan); 1129 dmaengine_terminate_all(txchan); 1130 unmap_free_dma_scatter(pl022); 1131 pl022->dma_running = false; 1132 } 1133 1134 static void pl022_dma_remove(struct pl022 *pl022) 1135 { 1136 if (pl022->dma_running) 1137 terminate_dma(pl022); 1138 if (pl022->dma_tx_channel) 1139 dma_release_channel(pl022->dma_tx_channel); 1140 if (pl022->dma_rx_channel) 1141 dma_release_channel(pl022->dma_rx_channel); 1142 kfree(pl022->dummypage); 1143 } 1144 1145 #else 1146 static inline int configure_dma(struct pl022 *pl022) 1147 { 1148 return -ENODEV; 1149 } 1150 1151 static inline int pl022_dma_probe(struct pl022 *pl022) 1152 { 1153 return 0; 1154 } 1155 1156 static inline void pl022_dma_remove(struct pl022 *pl022) 1157 { 1158 } 1159 #endif 1160 1161 /** 1162 * pl022_interrupt_handler - Interrupt handler for SSP controller 1163 * 1164 * This function handles interrupts generated for an interrupt based transfer. 1165 * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the 1166 * current message's state as STATE_ERROR and schedule the tasklet 1167 * pump_transfers which will do the postprocessing of the current message by 1168 * calling giveback(). Otherwise it reads data from RX FIFO till there is no 1169 * more data, and writes data in TX FIFO till it is not full. If we complete 1170 * the transfer we move to the next transfer and schedule the tasklet. 1171 */ 1172 static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) 1173 { 1174 struct pl022 *pl022 = dev_id; 1175 struct spi_message *msg = pl022->cur_msg; 1176 u16 irq_status = 0; 1177 u16 flag = 0; 1178 1179 if (unlikely(!msg)) { 1180 dev_err(&pl022->adev->dev, 1181 "bad message state in interrupt handler"); 1182 /* Never fail */ 1183 return IRQ_HANDLED; 1184 } 1185 1186 /* Read the Interrupt Status Register */ 1187 irq_status = readw(SSP_MIS(pl022->virtbase)); 1188 1189 if (unlikely(!irq_status)) 1190 return IRQ_NONE; 1191 1192 /* 1193 * This handles the FIFO interrupts, the timeout 1194 * interrupts are flatly ignored, they cannot be 1195 * trusted. 1196 */ 1197 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { 1198 /* 1199 * Overrun interrupt - bail out since our Data has been 1200 * corrupted 1201 */ 1202 dev_err(&pl022->adev->dev, "FIFO overrun\n"); 1203 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) 1204 dev_err(&pl022->adev->dev, 1205 "RXFIFO is full\n"); 1206 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) 1207 dev_err(&pl022->adev->dev, 1208 "TXFIFO is full\n"); 1209 1210 /* 1211 * Disable and clear interrupts, disable SSP, 1212 * mark message with bad status so it can be 1213 * retried. 1214 */ 1215 writew(DISABLE_ALL_INTERRUPTS, 1216 SSP_IMSC(pl022->virtbase)); 1217 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 1218 writew((readw(SSP_CR1(pl022->virtbase)) & 1219 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); 1220 msg->state = STATE_ERROR; 1221 1222 /* Schedule message queue handler */ 1223 tasklet_schedule(&pl022->pump_transfers); 1224 return IRQ_HANDLED; 1225 } 1226 1227 readwriter(pl022); 1228 1229 if ((pl022->tx == pl022->tx_end) && (flag == 0)) { 1230 flag = 1; 1231 /* Disable Transmit interrupt, enable receive interrupt */ 1232 writew((readw(SSP_IMSC(pl022->virtbase)) & 1233 ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM, 1234 SSP_IMSC(pl022->virtbase)); 1235 } 1236 1237 /* 1238 * Since all transactions must write as much as shall be read, 1239 * we can conclude the entire transaction once RX is complete. 1240 * At this point, all TX will always be finished. 1241 */ 1242 if (pl022->rx >= pl022->rx_end) { 1243 writew(DISABLE_ALL_INTERRUPTS, 1244 SSP_IMSC(pl022->virtbase)); 1245 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 1246 if (unlikely(pl022->rx > pl022->rx_end)) { 1247 dev_warn(&pl022->adev->dev, "read %u surplus " 1248 "bytes (did you request an odd " 1249 "number of bytes on a 16bit bus?)\n", 1250 (u32) (pl022->rx - pl022->rx_end)); 1251 } 1252 /* Update total bytes transferred */ 1253 msg->actual_length += pl022->cur_transfer->len; 1254 if (pl022->cur_transfer->cs_change) 1255 pl022->cur_chip-> 1256 cs_control(SSP_CHIP_DESELECT); 1257 /* Move to next transfer */ 1258 msg->state = next_transfer(pl022); 1259 tasklet_schedule(&pl022->pump_transfers); 1260 return IRQ_HANDLED; 1261 } 1262 1263 return IRQ_HANDLED; 1264 } 1265 1266 /** 1267 * This sets up the pointers to memory for the next message to 1268 * send out on the SPI bus. 1269 */ 1270 static int set_up_next_transfer(struct pl022 *pl022, 1271 struct spi_transfer *transfer) 1272 { 1273 int residue; 1274 1275 /* Sanity check the message for this bus width */ 1276 residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes; 1277 if (unlikely(residue != 0)) { 1278 dev_err(&pl022->adev->dev, 1279 "message of %u bytes to transmit but the current " 1280 "chip bus has a data width of %u bytes!\n", 1281 pl022->cur_transfer->len, 1282 pl022->cur_chip->n_bytes); 1283 dev_err(&pl022->adev->dev, "skipping this message\n"); 1284 return -EIO; 1285 } 1286 pl022->tx = (void *)transfer->tx_buf; 1287 pl022->tx_end = pl022->tx + pl022->cur_transfer->len; 1288 pl022->rx = (void *)transfer->rx_buf; 1289 pl022->rx_end = pl022->rx + pl022->cur_transfer->len; 1290 pl022->write = 1291 pl022->tx ? pl022->cur_chip->write : WRITING_NULL; 1292 pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL; 1293 return 0; 1294 } 1295 1296 /** 1297 * pump_transfers - Tasklet function which schedules next transfer 1298 * when running in interrupt or DMA transfer mode. 1299 * @data: SSP driver private data structure 1300 * 1301 */ 1302 static void pump_transfers(unsigned long data) 1303 { 1304 struct pl022 *pl022 = (struct pl022 *) data; 1305 struct spi_message *message = NULL; 1306 struct spi_transfer *transfer = NULL; 1307 struct spi_transfer *previous = NULL; 1308 1309 /* Get current state information */ 1310 message = pl022->cur_msg; 1311 transfer = pl022->cur_transfer; 1312 1313 /* Handle for abort */ 1314 if (message->state == STATE_ERROR) { 1315 message->status = -EIO; 1316 giveback(pl022); 1317 return; 1318 } 1319 1320 /* Handle end of message */ 1321 if (message->state == STATE_DONE) { 1322 message->status = 0; 1323 giveback(pl022); 1324 return; 1325 } 1326 1327 /* Delay if requested at end of transfer before CS change */ 1328 if (message->state == STATE_RUNNING) { 1329 previous = list_entry(transfer->transfer_list.prev, 1330 struct spi_transfer, 1331 transfer_list); 1332 if (previous->delay_usecs) 1333 /* 1334 * FIXME: This runs in interrupt context. 1335 * Is this really smart? 1336 */ 1337 udelay(previous->delay_usecs); 1338 1339 /* Reselect chip select only if cs_change was requested */ 1340 if (previous->cs_change) 1341 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1342 } else { 1343 /* STATE_START */ 1344 message->state = STATE_RUNNING; 1345 } 1346 1347 if (set_up_next_transfer(pl022, transfer)) { 1348 message->state = STATE_ERROR; 1349 message->status = -EIO; 1350 giveback(pl022); 1351 return; 1352 } 1353 /* Flush the FIFOs and let's go! */ 1354 flush(pl022); 1355 1356 if (pl022->cur_chip->enable_dma) { 1357 if (configure_dma(pl022)) { 1358 dev_dbg(&pl022->adev->dev, 1359 "configuration of DMA failed, fall back to interrupt mode\n"); 1360 goto err_config_dma; 1361 } 1362 return; 1363 } 1364 1365 err_config_dma: 1366 /* enable all interrupts except RX */ 1367 writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase)); 1368 } 1369 1370 static void do_interrupt_dma_transfer(struct pl022 *pl022) 1371 { 1372 /* 1373 * Default is to enable all interrupts except RX - 1374 * this will be enabled once TX is complete 1375 */ 1376 u32 irqflags = ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM; 1377 1378 /* Enable target chip, if not already active */ 1379 if (!pl022->next_msg_cs_active) 1380 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1381 1382 if (set_up_next_transfer(pl022, pl022->cur_transfer)) { 1383 /* Error path */ 1384 pl022->cur_msg->state = STATE_ERROR; 1385 pl022->cur_msg->status = -EIO; 1386 giveback(pl022); 1387 return; 1388 } 1389 /* If we're using DMA, set up DMA here */ 1390 if (pl022->cur_chip->enable_dma) { 1391 /* Configure DMA transfer */ 1392 if (configure_dma(pl022)) { 1393 dev_dbg(&pl022->adev->dev, 1394 "configuration of DMA failed, fall back to interrupt mode\n"); 1395 goto err_config_dma; 1396 } 1397 /* Disable interrupts in DMA mode, IRQ from DMA controller */ 1398 irqflags = DISABLE_ALL_INTERRUPTS; 1399 } 1400 err_config_dma: 1401 /* Enable SSP, turn on interrupts */ 1402 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), 1403 SSP_CR1(pl022->virtbase)); 1404 writew(irqflags, SSP_IMSC(pl022->virtbase)); 1405 } 1406 1407 static void do_polling_transfer(struct pl022 *pl022) 1408 { 1409 struct spi_message *message = NULL; 1410 struct spi_transfer *transfer = NULL; 1411 struct spi_transfer *previous = NULL; 1412 struct chip_data *chip; 1413 unsigned long time, timeout; 1414 1415 chip = pl022->cur_chip; 1416 message = pl022->cur_msg; 1417 1418 while (message->state != STATE_DONE) { 1419 /* Handle for abort */ 1420 if (message->state == STATE_ERROR) 1421 break; 1422 transfer = pl022->cur_transfer; 1423 1424 /* Delay if requested at end of transfer */ 1425 if (message->state == STATE_RUNNING) { 1426 previous = 1427 list_entry(transfer->transfer_list.prev, 1428 struct spi_transfer, transfer_list); 1429 if (previous->delay_usecs) 1430 udelay(previous->delay_usecs); 1431 if (previous->cs_change) 1432 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1433 } else { 1434 /* STATE_START */ 1435 message->state = STATE_RUNNING; 1436 if (!pl022->next_msg_cs_active) 1437 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1438 } 1439 1440 /* Configuration Changing Per Transfer */ 1441 if (set_up_next_transfer(pl022, transfer)) { 1442 /* Error path */ 1443 message->state = STATE_ERROR; 1444 break; 1445 } 1446 /* Flush FIFOs and enable SSP */ 1447 flush(pl022); 1448 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), 1449 SSP_CR1(pl022->virtbase)); 1450 1451 dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); 1452 1453 timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); 1454 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { 1455 time = jiffies; 1456 readwriter(pl022); 1457 if (time_after(time, timeout)) { 1458 dev_warn(&pl022->adev->dev, 1459 "%s: timeout!\n", __func__); 1460 message->state = STATE_ERROR; 1461 goto out; 1462 } 1463 cpu_relax(); 1464 } 1465 1466 /* Update total byte transferred */ 1467 message->actual_length += pl022->cur_transfer->len; 1468 if (pl022->cur_transfer->cs_change) 1469 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); 1470 /* Move to next transfer */ 1471 message->state = next_transfer(pl022); 1472 } 1473 out: 1474 /* Handle end of message */ 1475 if (message->state == STATE_DONE) 1476 message->status = 0; 1477 else 1478 message->status = -EIO; 1479 1480 giveback(pl022); 1481 return; 1482 } 1483 1484 static int pl022_transfer_one_message(struct spi_master *master, 1485 struct spi_message *msg) 1486 { 1487 struct pl022 *pl022 = spi_master_get_devdata(master); 1488 1489 /* Initial message state */ 1490 pl022->cur_msg = msg; 1491 msg->state = STATE_START; 1492 1493 pl022->cur_transfer = list_entry(msg->transfers.next, 1494 struct spi_transfer, transfer_list); 1495 1496 /* Setup the SPI using the per chip configuration */ 1497 pl022->cur_chip = spi_get_ctldata(msg->spi); 1498 1499 restore_state(pl022); 1500 flush(pl022); 1501 1502 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) 1503 do_polling_transfer(pl022); 1504 else 1505 do_interrupt_dma_transfer(pl022); 1506 1507 return 0; 1508 } 1509 1510 static int pl022_prepare_transfer_hardware(struct spi_master *master) 1511 { 1512 struct pl022 *pl022 = spi_master_get_devdata(master); 1513 1514 /* 1515 * Just make sure we have all we need to run the transfer by syncing 1516 * with the runtime PM framework. 1517 */ 1518 pm_runtime_get_sync(&pl022->adev->dev); 1519 return 0; 1520 } 1521 1522 static int pl022_unprepare_transfer_hardware(struct spi_master *master) 1523 { 1524 struct pl022 *pl022 = spi_master_get_devdata(master); 1525 1526 /* nothing more to do - disable spi/ssp and power off */ 1527 writew((readw(SSP_CR1(pl022->virtbase)) & 1528 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); 1529 1530 if (pl022->master_info->autosuspend_delay > 0) { 1531 pm_runtime_mark_last_busy(&pl022->adev->dev); 1532 pm_runtime_put_autosuspend(&pl022->adev->dev); 1533 } else { 1534 pm_runtime_put(&pl022->adev->dev); 1535 } 1536 1537 return 0; 1538 } 1539 1540 static int verify_controller_parameters(struct pl022 *pl022, 1541 struct pl022_config_chip const *chip_info) 1542 { 1543 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) 1544 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { 1545 dev_err(&pl022->adev->dev, 1546 "interface is configured incorrectly\n"); 1547 return -EINVAL; 1548 } 1549 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && 1550 (!pl022->vendor->unidir)) { 1551 dev_err(&pl022->adev->dev, 1552 "unidirectional mode not supported in this " 1553 "hardware version\n"); 1554 return -EINVAL; 1555 } 1556 if ((chip_info->hierarchy != SSP_MASTER) 1557 && (chip_info->hierarchy != SSP_SLAVE)) { 1558 dev_err(&pl022->adev->dev, 1559 "hierarchy is configured incorrectly\n"); 1560 return -EINVAL; 1561 } 1562 if ((chip_info->com_mode != INTERRUPT_TRANSFER) 1563 && (chip_info->com_mode != DMA_TRANSFER) 1564 && (chip_info->com_mode != POLLING_TRANSFER)) { 1565 dev_err(&pl022->adev->dev, 1566 "Communication mode is configured incorrectly\n"); 1567 return -EINVAL; 1568 } 1569 switch (chip_info->rx_lev_trig) { 1570 case SSP_RX_1_OR_MORE_ELEM: 1571 case SSP_RX_4_OR_MORE_ELEM: 1572 case SSP_RX_8_OR_MORE_ELEM: 1573 /* These are always OK, all variants can handle this */ 1574 break; 1575 case SSP_RX_16_OR_MORE_ELEM: 1576 if (pl022->vendor->fifodepth < 16) { 1577 dev_err(&pl022->adev->dev, 1578 "RX FIFO Trigger Level is configured incorrectly\n"); 1579 return -EINVAL; 1580 } 1581 break; 1582 case SSP_RX_32_OR_MORE_ELEM: 1583 if (pl022->vendor->fifodepth < 32) { 1584 dev_err(&pl022->adev->dev, 1585 "RX FIFO Trigger Level is configured incorrectly\n"); 1586 return -EINVAL; 1587 } 1588 break; 1589 default: 1590 dev_err(&pl022->adev->dev, 1591 "RX FIFO Trigger Level is configured incorrectly\n"); 1592 return -EINVAL; 1593 break; 1594 } 1595 switch (chip_info->tx_lev_trig) { 1596 case SSP_TX_1_OR_MORE_EMPTY_LOC: 1597 case SSP_TX_4_OR_MORE_EMPTY_LOC: 1598 case SSP_TX_8_OR_MORE_EMPTY_LOC: 1599 /* These are always OK, all variants can handle this */ 1600 break; 1601 case SSP_TX_16_OR_MORE_EMPTY_LOC: 1602 if (pl022->vendor->fifodepth < 16) { 1603 dev_err(&pl022->adev->dev, 1604 "TX FIFO Trigger Level is configured incorrectly\n"); 1605 return -EINVAL; 1606 } 1607 break; 1608 case SSP_TX_32_OR_MORE_EMPTY_LOC: 1609 if (pl022->vendor->fifodepth < 32) { 1610 dev_err(&pl022->adev->dev, 1611 "TX FIFO Trigger Level is configured incorrectly\n"); 1612 return -EINVAL; 1613 } 1614 break; 1615 default: 1616 dev_err(&pl022->adev->dev, 1617 "TX FIFO Trigger Level is configured incorrectly\n"); 1618 return -EINVAL; 1619 break; 1620 } 1621 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { 1622 if ((chip_info->ctrl_len < SSP_BITS_4) 1623 || (chip_info->ctrl_len > SSP_BITS_32)) { 1624 dev_err(&pl022->adev->dev, 1625 "CTRL LEN is configured incorrectly\n"); 1626 return -EINVAL; 1627 } 1628 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) 1629 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { 1630 dev_err(&pl022->adev->dev, 1631 "Wait State is configured incorrectly\n"); 1632 return -EINVAL; 1633 } 1634 /* Half duplex is only available in the ST Micro version */ 1635 if (pl022->vendor->extended_cr) { 1636 if ((chip_info->duplex != 1637 SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1638 && (chip_info->duplex != 1639 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { 1640 dev_err(&pl022->adev->dev, 1641 "Microwire duplex mode is configured incorrectly\n"); 1642 return -EINVAL; 1643 } 1644 } else { 1645 if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1646 dev_err(&pl022->adev->dev, 1647 "Microwire half duplex mode requested," 1648 " but this is only available in the" 1649 " ST version of PL022\n"); 1650 return -EINVAL; 1651 } 1652 } 1653 return 0; 1654 } 1655 1656 static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr) 1657 { 1658 return rate / (cpsdvsr * (1 + scr)); 1659 } 1660 1661 static int calculate_effective_freq(struct pl022 *pl022, int freq, struct 1662 ssp_clock_params * clk_freq) 1663 { 1664 /* Lets calculate the frequency parameters */ 1665 u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN; 1666 u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0, 1667 best_scr = 0, tmp, found = 0; 1668 1669 rate = clk_get_rate(pl022->clk); 1670 /* cpsdvscr = 2 & scr 0 */ 1671 max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN); 1672 /* cpsdvsr = 254 & scr = 255 */ 1673 min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX); 1674 1675 if (freq > max_tclk) 1676 dev_warn(&pl022->adev->dev, 1677 "Max speed that can be programmed is %d Hz, you requested %d\n", 1678 max_tclk, freq); 1679 1680 if (freq < min_tclk) { 1681 dev_err(&pl022->adev->dev, 1682 "Requested frequency: %d Hz is less than minimum possible %d Hz\n", 1683 freq, min_tclk); 1684 return -EINVAL; 1685 } 1686 1687 /* 1688 * best_freq will give closest possible available rate (<= requested 1689 * freq) for all values of scr & cpsdvsr. 1690 */ 1691 while ((cpsdvsr <= CPSDVR_MAX) && !found) { 1692 while (scr <= SCR_MAX) { 1693 tmp = spi_rate(rate, cpsdvsr, scr); 1694 1695 if (tmp > freq) { 1696 /* we need lower freq */ 1697 scr++; 1698 continue; 1699 } 1700 1701 /* 1702 * If found exact value, mark found and break. 1703 * If found more closer value, update and break. 1704 */ 1705 if (tmp > best_freq) { 1706 best_freq = tmp; 1707 best_cpsdvsr = cpsdvsr; 1708 best_scr = scr; 1709 1710 if (tmp == freq) 1711 found = 1; 1712 } 1713 /* 1714 * increased scr will give lower rates, which are not 1715 * required 1716 */ 1717 break; 1718 } 1719 cpsdvsr += 2; 1720 scr = SCR_MIN; 1721 } 1722 1723 WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n", 1724 freq); 1725 1726 clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF); 1727 clk_freq->scr = (u8) (best_scr & 0xFF); 1728 dev_dbg(&pl022->adev->dev, 1729 "SSP Target Frequency is: %u, Effective Frequency is %u\n", 1730 freq, best_freq); 1731 dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n", 1732 clk_freq->cpsdvsr, clk_freq->scr); 1733 1734 return 0; 1735 } 1736 1737 /* 1738 * A piece of default chip info unless the platform 1739 * supplies it. 1740 */ 1741 static const struct pl022_config_chip pl022_default_chip_info = { 1742 .com_mode = POLLING_TRANSFER, 1743 .iface = SSP_INTERFACE_MOTOROLA_SPI, 1744 .hierarchy = SSP_SLAVE, 1745 .slave_tx_disable = DO_NOT_DRIVE_TX, 1746 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, 1747 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, 1748 .ctrl_len = SSP_BITS_8, 1749 .wait_state = SSP_MWIRE_WAIT_ZERO, 1750 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, 1751 .cs_control = null_cs_control, 1752 }; 1753 1754 /** 1755 * pl022_setup - setup function registered to SPI master framework 1756 * @spi: spi device which is requesting setup 1757 * 1758 * This function is registered to the SPI framework for this SPI master 1759 * controller. If it is the first time when setup is called by this device, 1760 * this function will initialize the runtime state for this chip and save 1761 * the same in the device structure. Else it will update the runtime info 1762 * with the updated chip info. Nothing is really being written to the 1763 * controller hardware here, that is not done until the actual transfer 1764 * commence. 1765 */ 1766 static int pl022_setup(struct spi_device *spi) 1767 { 1768 struct pl022_config_chip const *chip_info; 1769 struct chip_data *chip; 1770 struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0}; 1771 int status = 0; 1772 struct pl022 *pl022 = spi_master_get_devdata(spi->master); 1773 unsigned int bits = spi->bits_per_word; 1774 u32 tmp; 1775 1776 if (!spi->max_speed_hz) 1777 return -EINVAL; 1778 1779 /* Get controller_state if one is supplied */ 1780 chip = spi_get_ctldata(spi); 1781 1782 if (chip == NULL) { 1783 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 1784 if (!chip) { 1785 dev_err(&spi->dev, 1786 "cannot allocate controller state\n"); 1787 return -ENOMEM; 1788 } 1789 dev_dbg(&spi->dev, 1790 "allocated memory for controller's runtime state\n"); 1791 } 1792 1793 /* Get controller data if one is supplied */ 1794 chip_info = spi->controller_data; 1795 1796 if (chip_info == NULL) { 1797 chip_info = &pl022_default_chip_info; 1798 /* spi_board_info.controller_data not is supplied */ 1799 dev_dbg(&spi->dev, 1800 "using default controller_data settings\n"); 1801 } else 1802 dev_dbg(&spi->dev, 1803 "using user supplied controller_data settings\n"); 1804 1805 /* 1806 * We can override with custom divisors, else we use the board 1807 * frequency setting 1808 */ 1809 if ((0 == chip_info->clk_freq.cpsdvsr) 1810 && (0 == chip_info->clk_freq.scr)) { 1811 status = calculate_effective_freq(pl022, 1812 spi->max_speed_hz, 1813 &clk_freq); 1814 if (status < 0) 1815 goto err_config_params; 1816 } else { 1817 memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); 1818 if ((clk_freq.cpsdvsr % 2) != 0) 1819 clk_freq.cpsdvsr = 1820 clk_freq.cpsdvsr - 1; 1821 } 1822 if ((clk_freq.cpsdvsr < CPSDVR_MIN) 1823 || (clk_freq.cpsdvsr > CPSDVR_MAX)) { 1824 status = -EINVAL; 1825 dev_err(&spi->dev, 1826 "cpsdvsr is configured incorrectly\n"); 1827 goto err_config_params; 1828 } 1829 1830 status = verify_controller_parameters(pl022, chip_info); 1831 if (status) { 1832 dev_err(&spi->dev, "controller data is incorrect"); 1833 goto err_config_params; 1834 } 1835 1836 pl022->rx_lev_trig = chip_info->rx_lev_trig; 1837 pl022->tx_lev_trig = chip_info->tx_lev_trig; 1838 1839 /* Now set controller state based on controller data */ 1840 chip->xfer_type = chip_info->com_mode; 1841 if (!chip_info->cs_control) { 1842 chip->cs_control = null_cs_control; 1843 dev_warn(&spi->dev, 1844 "chip select function is NULL for this chip\n"); 1845 } else 1846 chip->cs_control = chip_info->cs_control; 1847 1848 /* Check bits per word with vendor specific range */ 1849 if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) { 1850 status = -ENOTSUPP; 1851 dev_err(&spi->dev, "illegal data size for this controller!\n"); 1852 dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n", 1853 pl022->vendor->max_bpw); 1854 goto err_config_params; 1855 } else if (bits <= 8) { 1856 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); 1857 chip->n_bytes = 1; 1858 chip->read = READING_U8; 1859 chip->write = WRITING_U8; 1860 } else if (bits <= 16) { 1861 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); 1862 chip->n_bytes = 2; 1863 chip->read = READING_U16; 1864 chip->write = WRITING_U16; 1865 } else { 1866 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); 1867 chip->n_bytes = 4; 1868 chip->read = READING_U32; 1869 chip->write = WRITING_U32; 1870 } 1871 1872 /* Now Initialize all register settings required for this chip */ 1873 chip->cr0 = 0; 1874 chip->cr1 = 0; 1875 chip->dmacr = 0; 1876 chip->cpsr = 0; 1877 if ((chip_info->com_mode == DMA_TRANSFER) 1878 && ((pl022->master_info)->enable_dma)) { 1879 chip->enable_dma = true; 1880 dev_dbg(&spi->dev, "DMA mode set in controller state\n"); 1881 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, 1882 SSP_DMACR_MASK_RXDMAE, 0); 1883 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, 1884 SSP_DMACR_MASK_TXDMAE, 1); 1885 } else { 1886 chip->enable_dma = false; 1887 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); 1888 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, 1889 SSP_DMACR_MASK_RXDMAE, 0); 1890 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, 1891 SSP_DMACR_MASK_TXDMAE, 1); 1892 } 1893 1894 chip->cpsr = clk_freq.cpsdvsr; 1895 1896 /* Special setup for the ST micro extended control registers */ 1897 if (pl022->vendor->extended_cr) { 1898 u32 etx; 1899 1900 if (pl022->vendor->pl023) { 1901 /* These bits are only in the PL023 */ 1902 SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, 1903 SSP_CR1_MASK_FBCLKDEL_ST, 13); 1904 } else { 1905 /* These bits are in the PL022 but not PL023 */ 1906 SSP_WRITE_BITS(chip->cr0, chip_info->duplex, 1907 SSP_CR0_MASK_HALFDUP_ST, 5); 1908 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, 1909 SSP_CR0_MASK_CSS_ST, 16); 1910 SSP_WRITE_BITS(chip->cr0, chip_info->iface, 1911 SSP_CR0_MASK_FRF_ST, 21); 1912 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, 1913 SSP_CR1_MASK_MWAIT_ST, 6); 1914 } 1915 SSP_WRITE_BITS(chip->cr0, bits - 1, 1916 SSP_CR0_MASK_DSS_ST, 0); 1917 1918 if (spi->mode & SPI_LSB_FIRST) { 1919 tmp = SSP_RX_LSB; 1920 etx = SSP_TX_LSB; 1921 } else { 1922 tmp = SSP_RX_MSB; 1923 etx = SSP_TX_MSB; 1924 } 1925 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); 1926 SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); 1927 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, 1928 SSP_CR1_MASK_RXIFLSEL_ST, 7); 1929 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, 1930 SSP_CR1_MASK_TXIFLSEL_ST, 10); 1931 } else { 1932 SSP_WRITE_BITS(chip->cr0, bits - 1, 1933 SSP_CR0_MASK_DSS, 0); 1934 SSP_WRITE_BITS(chip->cr0, chip_info->iface, 1935 SSP_CR0_MASK_FRF, 4); 1936 } 1937 1938 /* Stuff that is common for all versions */ 1939 if (spi->mode & SPI_CPOL) 1940 tmp = SSP_CLK_POL_IDLE_HIGH; 1941 else 1942 tmp = SSP_CLK_POL_IDLE_LOW; 1943 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); 1944 1945 if (spi->mode & SPI_CPHA) 1946 tmp = SSP_CLK_SECOND_EDGE; 1947 else 1948 tmp = SSP_CLK_FIRST_EDGE; 1949 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); 1950 1951 SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); 1952 /* Loopback is available on all versions except PL023 */ 1953 if (pl022->vendor->loopback) { 1954 if (spi->mode & SPI_LOOP) 1955 tmp = LOOPBACK_ENABLED; 1956 else 1957 tmp = LOOPBACK_DISABLED; 1958 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); 1959 } 1960 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); 1961 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); 1962 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 1963 3); 1964 1965 /* Save controller_state */ 1966 spi_set_ctldata(spi, chip); 1967 return status; 1968 err_config_params: 1969 spi_set_ctldata(spi, NULL); 1970 kfree(chip); 1971 return status; 1972 } 1973 1974 /** 1975 * pl022_cleanup - cleanup function registered to SPI master framework 1976 * @spi: spi device which is requesting cleanup 1977 * 1978 * This function is registered to the SPI framework for this SPI master 1979 * controller. It will free the runtime state of chip. 1980 */ 1981 static void pl022_cleanup(struct spi_device *spi) 1982 { 1983 struct chip_data *chip = spi_get_ctldata(spi); 1984 1985 spi_set_ctldata(spi, NULL); 1986 kfree(chip); 1987 } 1988 1989 static int __devinit 1990 pl022_probe(struct amba_device *adev, const struct amba_id *id) 1991 { 1992 struct device *dev = &adev->dev; 1993 struct pl022_ssp_controller *platform_info = adev->dev.platform_data; 1994 struct spi_master *master; 1995 struct pl022 *pl022 = NULL; /*Data for this driver */ 1996 int status = 0; 1997 1998 dev_info(&adev->dev, 1999 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid); 2000 if (platform_info == NULL) { 2001 dev_err(&adev->dev, "probe - no platform data supplied\n"); 2002 status = -ENODEV; 2003 goto err_no_pdata; 2004 } 2005 2006 /* Allocate master with space for data */ 2007 master = spi_alloc_master(dev, sizeof(struct pl022)); 2008 if (master == NULL) { 2009 dev_err(&adev->dev, "probe - cannot alloc SPI master\n"); 2010 status = -ENOMEM; 2011 goto err_no_master; 2012 } 2013 2014 pl022 = spi_master_get_devdata(master); 2015 pl022->master = master; 2016 pl022->master_info = platform_info; 2017 pl022->adev = adev; 2018 pl022->vendor = id->data; 2019 2020 /* 2021 * Bus Number Which has been Assigned to this SSP controller 2022 * on this board 2023 */ 2024 master->bus_num = platform_info->bus_id; 2025 master->num_chipselect = platform_info->num_chipselect; 2026 master->cleanup = pl022_cleanup; 2027 master->setup = pl022_setup; 2028 master->prepare_transfer_hardware = pl022_prepare_transfer_hardware; 2029 master->transfer_one_message = pl022_transfer_one_message; 2030 master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware; 2031 master->rt = platform_info->rt; 2032 2033 /* 2034 * Supports mode 0-3, loopback, and active low CS. Transfers are 2035 * always MS bit first on the original pl022. 2036 */ 2037 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 2038 if (pl022->vendor->extended_cr) 2039 master->mode_bits |= SPI_LSB_FIRST; 2040 2041 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); 2042 2043 status = amba_request_regions(adev, NULL); 2044 if (status) 2045 goto err_no_ioregion; 2046 2047 pl022->phybase = adev->res.start; 2048 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); 2049 if (pl022->virtbase == NULL) { 2050 status = -ENOMEM; 2051 goto err_no_ioremap; 2052 } 2053 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", 2054 adev->res.start, pl022->virtbase); 2055 2056 pm_runtime_resume(dev); 2057 2058 pl022->clk = clk_get(&adev->dev, NULL); 2059 if (IS_ERR(pl022->clk)) { 2060 status = PTR_ERR(pl022->clk); 2061 dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n"); 2062 goto err_no_clk; 2063 } 2064 2065 status = clk_prepare(pl022->clk); 2066 if (status) { 2067 dev_err(&adev->dev, "could not prepare SSP/SPI bus clock\n"); 2068 goto err_clk_prep; 2069 } 2070 2071 status = clk_enable(pl022->clk); 2072 if (status) { 2073 dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n"); 2074 goto err_no_clk_en; 2075 } 2076 2077 /* Initialize transfer pump */ 2078 tasklet_init(&pl022->pump_transfers, pump_transfers, 2079 (unsigned long)pl022); 2080 2081 /* Disable SSP */ 2082 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 2083 SSP_CR1(pl022->virtbase)); 2084 load_ssp_default_config(pl022); 2085 2086 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", 2087 pl022); 2088 if (status < 0) { 2089 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); 2090 goto err_no_irq; 2091 } 2092 2093 /* Get DMA channels */ 2094 if (platform_info->enable_dma) { 2095 status = pl022_dma_probe(pl022); 2096 if (status != 0) 2097 platform_info->enable_dma = 0; 2098 } 2099 2100 /* Register with the SPI framework */ 2101 amba_set_drvdata(adev, pl022); 2102 status = spi_register_master(master); 2103 if (status != 0) { 2104 dev_err(&adev->dev, 2105 "probe - problem registering spi master\n"); 2106 goto err_spi_register; 2107 } 2108 dev_dbg(dev, "probe succeeded\n"); 2109 2110 /* let runtime pm put suspend */ 2111 if (platform_info->autosuspend_delay > 0) { 2112 dev_info(&adev->dev, 2113 "will use autosuspend for runtime pm, delay %dms\n", 2114 platform_info->autosuspend_delay); 2115 pm_runtime_set_autosuspend_delay(dev, 2116 platform_info->autosuspend_delay); 2117 pm_runtime_use_autosuspend(dev); 2118 pm_runtime_put_autosuspend(dev); 2119 } else { 2120 pm_runtime_put(dev); 2121 } 2122 return 0; 2123 2124 err_spi_register: 2125 if (platform_info->enable_dma) 2126 pl022_dma_remove(pl022); 2127 2128 free_irq(adev->irq[0], pl022); 2129 err_no_irq: 2130 clk_disable(pl022->clk); 2131 err_no_clk_en: 2132 clk_unprepare(pl022->clk); 2133 err_clk_prep: 2134 clk_put(pl022->clk); 2135 err_no_clk: 2136 iounmap(pl022->virtbase); 2137 err_no_ioremap: 2138 amba_release_regions(adev); 2139 err_no_ioregion: 2140 spi_master_put(master); 2141 err_no_master: 2142 err_no_pdata: 2143 return status; 2144 } 2145 2146 static int __devexit 2147 pl022_remove(struct amba_device *adev) 2148 { 2149 struct pl022 *pl022 = amba_get_drvdata(adev); 2150 2151 if (!pl022) 2152 return 0; 2153 2154 /* 2155 * undo pm_runtime_put() in probe. I assume that we're not 2156 * accessing the primecell here. 2157 */ 2158 pm_runtime_get_noresume(&adev->dev); 2159 2160 load_ssp_default_config(pl022); 2161 if (pl022->master_info->enable_dma) 2162 pl022_dma_remove(pl022); 2163 2164 free_irq(adev->irq[0], pl022); 2165 clk_disable(pl022->clk); 2166 clk_unprepare(pl022->clk); 2167 clk_put(pl022->clk); 2168 pm_runtime_disable(&adev->dev); 2169 iounmap(pl022->virtbase); 2170 amba_release_regions(adev); 2171 tasklet_disable(&pl022->pump_transfers); 2172 spi_unregister_master(pl022->master); 2173 spi_master_put(pl022->master); 2174 amba_set_drvdata(adev, NULL); 2175 return 0; 2176 } 2177 2178 #ifdef CONFIG_SUSPEND 2179 static int pl022_suspend(struct device *dev) 2180 { 2181 struct pl022 *pl022 = dev_get_drvdata(dev); 2182 int ret; 2183 2184 ret = spi_master_suspend(pl022->master); 2185 if (ret) { 2186 dev_warn(dev, "cannot suspend master\n"); 2187 return ret; 2188 } 2189 2190 dev_dbg(dev, "suspended\n"); 2191 return 0; 2192 } 2193 2194 static int pl022_resume(struct device *dev) 2195 { 2196 struct pl022 *pl022 = dev_get_drvdata(dev); 2197 int ret; 2198 2199 /* Start the queue running */ 2200 ret = spi_master_resume(pl022->master); 2201 if (ret) 2202 dev_err(dev, "problem starting queue (%d)\n", ret); 2203 else 2204 dev_dbg(dev, "resumed\n"); 2205 2206 return ret; 2207 } 2208 #endif /* CONFIG_PM */ 2209 2210 #ifdef CONFIG_PM_RUNTIME 2211 static int pl022_runtime_suspend(struct device *dev) 2212 { 2213 struct pl022 *pl022 = dev_get_drvdata(dev); 2214 2215 clk_disable(pl022->clk); 2216 2217 return 0; 2218 } 2219 2220 static int pl022_runtime_resume(struct device *dev) 2221 { 2222 struct pl022 *pl022 = dev_get_drvdata(dev); 2223 2224 clk_enable(pl022->clk); 2225 2226 return 0; 2227 } 2228 #endif 2229 2230 static const struct dev_pm_ops pl022_dev_pm_ops = { 2231 SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume) 2232 SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL) 2233 }; 2234 2235 static struct vendor_data vendor_arm = { 2236 .fifodepth = 8, 2237 .max_bpw = 16, 2238 .unidir = false, 2239 .extended_cr = false, 2240 .pl023 = false, 2241 .loopback = true, 2242 }; 2243 2244 static struct vendor_data vendor_st = { 2245 .fifodepth = 32, 2246 .max_bpw = 32, 2247 .unidir = false, 2248 .extended_cr = true, 2249 .pl023 = false, 2250 .loopback = true, 2251 }; 2252 2253 static struct vendor_data vendor_st_pl023 = { 2254 .fifodepth = 32, 2255 .max_bpw = 32, 2256 .unidir = false, 2257 .extended_cr = true, 2258 .pl023 = true, 2259 .loopback = false, 2260 }; 2261 2262 static struct amba_id pl022_ids[] = { 2263 { 2264 /* 2265 * ARM PL022 variant, this has a 16bit wide 2266 * and 8 locations deep TX/RX FIFO 2267 */ 2268 .id = 0x00041022, 2269 .mask = 0x000fffff, 2270 .data = &vendor_arm, 2271 }, 2272 { 2273 /* 2274 * ST Micro derivative, this has 32bit wide 2275 * and 32 locations deep TX/RX FIFO 2276 */ 2277 .id = 0x01080022, 2278 .mask = 0xffffffff, 2279 .data = &vendor_st, 2280 }, 2281 { 2282 /* 2283 * ST-Ericsson derivative "PL023" (this is not 2284 * an official ARM number), this is a PL022 SSP block 2285 * stripped to SPI mode only, it has 32bit wide 2286 * and 32 locations deep TX/RX FIFO but no extended 2287 * CR0/CR1 register 2288 */ 2289 .id = 0x00080023, 2290 .mask = 0xffffffff, 2291 .data = &vendor_st_pl023, 2292 }, 2293 { 0, 0 }, 2294 }; 2295 2296 MODULE_DEVICE_TABLE(amba, pl022_ids); 2297 2298 static struct amba_driver pl022_driver = { 2299 .drv = { 2300 .name = "ssp-pl022", 2301 .pm = &pl022_dev_pm_ops, 2302 }, 2303 .id_table = pl022_ids, 2304 .probe = pl022_probe, 2305 .remove = __devexit_p(pl022_remove), 2306 }; 2307 2308 static int __init pl022_init(void) 2309 { 2310 return amba_driver_register(&pl022_driver); 2311 } 2312 subsys_initcall(pl022_init); 2313 2314 static void __exit pl022_exit(void) 2315 { 2316 amba_driver_unregister(&pl022_driver); 2317 } 2318 module_exit(pl022_exit); 2319 2320 MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); 2321 MODULE_DESCRIPTION("PL022 SSP Controller Driver"); 2322 MODULE_LICENSE("GPL"); 2323