1 /* 2 * A driver for the ARM PL022 PrimeCell SSP/SPI bus master. 3 * 4 * Copyright (C) 2008-2009 ST-Ericsson AB 5 * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. 6 * 7 * Author: Linus Walleij <linus.walleij@stericsson.com> 8 * 9 * Initial version inspired by: 10 * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c 11 * Initial adoption to PL022 by: 12 * Sachin Verma <sachin.verma@st.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 */ 24 25 #include <linux/init.h> 26 #include <linux/module.h> 27 #include <linux/device.h> 28 #include <linux/ioport.h> 29 #include <linux/errno.h> 30 #include <linux/interrupt.h> 31 #include <linux/spi/spi.h> 32 #include <linux/workqueue.h> 33 #include <linux/delay.h> 34 #include <linux/clk.h> 35 #include <linux/err.h> 36 #include <linux/amba/bus.h> 37 #include <linux/amba/pl022.h> 38 #include <linux/io.h> 39 #include <linux/slab.h> 40 #include <linux/dmaengine.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/scatterlist.h> 43 #include <linux/pm_runtime.h> 44 45 /* 46 * This macro is used to define some register default values. 47 * reg is masked with mask, the OR:ed with an (again masked) 48 * val shifted sb steps to the left. 49 */ 50 #define SSP_WRITE_BITS(reg, val, mask, sb) \ 51 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask)))) 52 53 /* 54 * This macro is also used to define some default values. 55 * It will just shift val by sb steps to the left and mask 56 * the result with mask. 57 */ 58 #define GEN_MASK_BITS(val, mask, sb) \ 59 (((val)<<(sb)) & (mask)) 60 61 #define DRIVE_TX 0 62 #define DO_NOT_DRIVE_TX 1 63 64 #define DO_NOT_QUEUE_DMA 0 65 #define QUEUE_DMA 1 66 67 #define RX_TRANSFER 1 68 #define TX_TRANSFER 2 69 70 /* 71 * Macros to access SSP Registers with their offsets 72 */ 73 #define SSP_CR0(r) (r + 0x000) 74 #define SSP_CR1(r) (r + 0x004) 75 #define SSP_DR(r) (r + 0x008) 76 #define SSP_SR(r) (r + 0x00C) 77 #define SSP_CPSR(r) (r + 0x010) 78 #define SSP_IMSC(r) (r + 0x014) 79 #define SSP_RIS(r) (r + 0x018) 80 #define SSP_MIS(r) (r + 0x01C) 81 #define SSP_ICR(r) (r + 0x020) 82 #define SSP_DMACR(r) (r + 0x024) 83 #define SSP_ITCR(r) (r + 0x080) 84 #define SSP_ITIP(r) (r + 0x084) 85 #define SSP_ITOP(r) (r + 0x088) 86 #define SSP_TDR(r) (r + 0x08C) 87 88 #define SSP_PID0(r) (r + 0xFE0) 89 #define SSP_PID1(r) (r + 0xFE4) 90 #define SSP_PID2(r) (r + 0xFE8) 91 #define SSP_PID3(r) (r + 0xFEC) 92 93 #define SSP_CID0(r) (r + 0xFF0) 94 #define SSP_CID1(r) (r + 0xFF4) 95 #define SSP_CID2(r) (r + 0xFF8) 96 #define SSP_CID3(r) (r + 0xFFC) 97 98 /* 99 * SSP Control Register 0 - SSP_CR0 100 */ 101 #define SSP_CR0_MASK_DSS (0x0FUL << 0) 102 #define SSP_CR0_MASK_FRF (0x3UL << 4) 103 #define SSP_CR0_MASK_SPO (0x1UL << 6) 104 #define SSP_CR0_MASK_SPH (0x1UL << 7) 105 #define SSP_CR0_MASK_SCR (0xFFUL << 8) 106 107 /* 108 * The ST version of this block moves som bits 109 * in SSP_CR0 and extends it to 32 bits 110 */ 111 #define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) 112 #define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) 113 #define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) 114 #define SSP_CR0_MASK_FRF_ST (0x3UL << 21) 115 116 /* 117 * SSP Control Register 0 - SSP_CR1 118 */ 119 #define SSP_CR1_MASK_LBM (0x1UL << 0) 120 #define SSP_CR1_MASK_SSE (0x1UL << 1) 121 #define SSP_CR1_MASK_MS (0x1UL << 2) 122 #define SSP_CR1_MASK_SOD (0x1UL << 3) 123 124 /* 125 * The ST version of this block adds some bits 126 * in SSP_CR1 127 */ 128 #define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) 129 #define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) 130 #define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) 131 #define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) 132 #define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) 133 /* This one is only in the PL023 variant */ 134 #define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) 135 136 /* 137 * SSP Status Register - SSP_SR 138 */ 139 #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ 140 #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ 141 #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ 142 #define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ 143 #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ 144 145 /* 146 * SSP Clock Prescale Register - SSP_CPSR 147 */ 148 #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0) 149 150 /* 151 * SSP Interrupt Mask Set/Clear Register - SSP_IMSC 152 */ 153 #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */ 154 #define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */ 155 #define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */ 156 #define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */ 157 158 /* 159 * SSP Raw Interrupt Status Register - SSP_RIS 160 */ 161 /* Receive Overrun Raw Interrupt status */ 162 #define SSP_RIS_MASK_RORRIS (0x1UL << 0) 163 /* Receive Timeout Raw Interrupt status */ 164 #define SSP_RIS_MASK_RTRIS (0x1UL << 1) 165 /* Receive FIFO Raw Interrupt status */ 166 #define SSP_RIS_MASK_RXRIS (0x1UL << 2) 167 /* Transmit FIFO Raw Interrupt status */ 168 #define SSP_RIS_MASK_TXRIS (0x1UL << 3) 169 170 /* 171 * SSP Masked Interrupt Status Register - SSP_MIS 172 */ 173 /* Receive Overrun Masked Interrupt status */ 174 #define SSP_MIS_MASK_RORMIS (0x1UL << 0) 175 /* Receive Timeout Masked Interrupt status */ 176 #define SSP_MIS_MASK_RTMIS (0x1UL << 1) 177 /* Receive FIFO Masked Interrupt status */ 178 #define SSP_MIS_MASK_RXMIS (0x1UL << 2) 179 /* Transmit FIFO Masked Interrupt status */ 180 #define SSP_MIS_MASK_TXMIS (0x1UL << 3) 181 182 /* 183 * SSP Interrupt Clear Register - SSP_ICR 184 */ 185 /* Receive Overrun Raw Clear Interrupt bit */ 186 #define SSP_ICR_MASK_RORIC (0x1UL << 0) 187 /* Receive Timeout Clear Interrupt bit */ 188 #define SSP_ICR_MASK_RTIC (0x1UL << 1) 189 190 /* 191 * SSP DMA Control Register - SSP_DMACR 192 */ 193 /* Receive DMA Enable bit */ 194 #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0) 195 /* Transmit DMA Enable bit */ 196 #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1) 197 198 /* 199 * SSP Integration Test control Register - SSP_ITCR 200 */ 201 #define SSP_ITCR_MASK_ITEN (0x1UL << 0) 202 #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1) 203 204 /* 205 * SSP Integration Test Input Register - SSP_ITIP 206 */ 207 #define ITIP_MASK_SSPRXD (0x1UL << 0) 208 #define ITIP_MASK_SSPFSSIN (0x1UL << 1) 209 #define ITIP_MASK_SSPCLKIN (0x1UL << 2) 210 #define ITIP_MASK_RXDMAC (0x1UL << 3) 211 #define ITIP_MASK_TXDMAC (0x1UL << 4) 212 #define ITIP_MASK_SSPTXDIN (0x1UL << 5) 213 214 /* 215 * SSP Integration Test output Register - SSP_ITOP 216 */ 217 #define ITOP_MASK_SSPTXD (0x1UL << 0) 218 #define ITOP_MASK_SSPFSSOUT (0x1UL << 1) 219 #define ITOP_MASK_SSPCLKOUT (0x1UL << 2) 220 #define ITOP_MASK_SSPOEn (0x1UL << 3) 221 #define ITOP_MASK_SSPCTLOEn (0x1UL << 4) 222 #define ITOP_MASK_RORINTR (0x1UL << 5) 223 #define ITOP_MASK_RTINTR (0x1UL << 6) 224 #define ITOP_MASK_RXINTR (0x1UL << 7) 225 #define ITOP_MASK_TXINTR (0x1UL << 8) 226 #define ITOP_MASK_INTR (0x1UL << 9) 227 #define ITOP_MASK_RXDMABREQ (0x1UL << 10) 228 #define ITOP_MASK_RXDMASREQ (0x1UL << 11) 229 #define ITOP_MASK_TXDMABREQ (0x1UL << 12) 230 #define ITOP_MASK_TXDMASREQ (0x1UL << 13) 231 232 /* 233 * SSP Test Data Register - SSP_TDR 234 */ 235 #define TDR_MASK_TESTDATA (0xFFFFFFFF) 236 237 /* 238 * Message State 239 * we use the spi_message.state (void *) pointer to 240 * hold a single state value, that's why all this 241 * (void *) casting is done here. 242 */ 243 #define STATE_START ((void *) 0) 244 #define STATE_RUNNING ((void *) 1) 245 #define STATE_DONE ((void *) 2) 246 #define STATE_ERROR ((void *) -1) 247 248 /* 249 * SSP State - Whether Enabled or Disabled 250 */ 251 #define SSP_DISABLED (0) 252 #define SSP_ENABLED (1) 253 254 /* 255 * SSP DMA State - Whether DMA Enabled or Disabled 256 */ 257 #define SSP_DMA_DISABLED (0) 258 #define SSP_DMA_ENABLED (1) 259 260 /* 261 * SSP Clock Defaults 262 */ 263 #define SSP_DEFAULT_CLKRATE 0x2 264 #define SSP_DEFAULT_PRESCALE 0x40 265 266 /* 267 * SSP Clock Parameter ranges 268 */ 269 #define CPSDVR_MIN 0x02 270 #define CPSDVR_MAX 0xFE 271 #define SCR_MIN 0x00 272 #define SCR_MAX 0xFF 273 274 /* 275 * SSP Interrupt related Macros 276 */ 277 #define DEFAULT_SSP_REG_IMSC 0x0UL 278 #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC 279 #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) 280 281 #define CLEAR_ALL_INTERRUPTS 0x3 282 283 #define SPI_POLLING_TIMEOUT 1000 284 285 /* 286 * The type of reading going on on this chip 287 */ 288 enum ssp_reading { 289 READING_NULL, 290 READING_U8, 291 READING_U16, 292 READING_U32 293 }; 294 295 /** 296 * The type of writing going on on this chip 297 */ 298 enum ssp_writing { 299 WRITING_NULL, 300 WRITING_U8, 301 WRITING_U16, 302 WRITING_U32 303 }; 304 305 /** 306 * struct vendor_data - vendor-specific config parameters 307 * for PL022 derivates 308 * @fifodepth: depth of FIFOs (both) 309 * @max_bpw: maximum number of bits per word 310 * @unidir: supports unidirection transfers 311 * @extended_cr: 32 bit wide control register 0 with extra 312 * features and extra features in CR1 as found in the ST variants 313 * @pl023: supports a subset of the ST extensions called "PL023" 314 */ 315 struct vendor_data { 316 int fifodepth; 317 int max_bpw; 318 bool unidir; 319 bool extended_cr; 320 bool pl023; 321 bool loopback; 322 }; 323 324 /** 325 * struct pl022 - This is the private SSP driver data structure 326 * @adev: AMBA device model hookup 327 * @vendor: vendor data for the IP block 328 * @phybase: the physical memory where the SSP device resides 329 * @virtbase: the virtual memory where the SSP is mapped 330 * @clk: outgoing clock "SPICLK" for the SPI bus 331 * @master: SPI framework hookup 332 * @master_info: controller-specific data from machine setup 333 * @workqueue: a workqueue on which any spi_message request is queued 334 * @pump_messages: work struct for scheduling work to the workqueue 335 * @queue_lock: spinlock to syncronise access to message queue 336 * @queue: message queue 337 * @busy: workqueue is busy 338 * @running: workqueue is running 339 * @pump_transfers: Tasklet used in Interrupt Transfer mode 340 * @cur_msg: Pointer to current spi_message being processed 341 * @cur_transfer: Pointer to current spi_transfer 342 * @cur_chip: pointer to current clients chip(assigned from controller_state) 343 * @tx: current position in TX buffer to be read 344 * @tx_end: end position in TX buffer to be read 345 * @rx: current position in RX buffer to be written 346 * @rx_end: end position in RX buffer to be written 347 * @read: the type of read currently going on 348 * @write: the type of write currently going on 349 * @exp_fifo_level: expected FIFO level 350 * @dma_rx_channel: optional channel for RX DMA 351 * @dma_tx_channel: optional channel for TX DMA 352 * @sgt_rx: scattertable for the RX transfer 353 * @sgt_tx: scattertable for the TX transfer 354 * @dummypage: a dummy page used for driving data on the bus with DMA 355 */ 356 struct pl022 { 357 struct amba_device *adev; 358 struct vendor_data *vendor; 359 resource_size_t phybase; 360 void __iomem *virtbase; 361 struct clk *clk; 362 struct spi_master *master; 363 struct pl022_ssp_controller *master_info; 364 /* Driver message queue */ 365 struct workqueue_struct *workqueue; 366 struct work_struct pump_messages; 367 spinlock_t queue_lock; 368 struct list_head queue; 369 bool busy; 370 bool running; 371 /* Message transfer pump */ 372 struct tasklet_struct pump_transfers; 373 struct spi_message *cur_msg; 374 struct spi_transfer *cur_transfer; 375 struct chip_data *cur_chip; 376 void *tx; 377 void *tx_end; 378 void *rx; 379 void *rx_end; 380 enum ssp_reading read; 381 enum ssp_writing write; 382 u32 exp_fifo_level; 383 enum ssp_rx_level_trig rx_lev_trig; 384 enum ssp_tx_level_trig tx_lev_trig; 385 /* DMA settings */ 386 #ifdef CONFIG_DMA_ENGINE 387 struct dma_chan *dma_rx_channel; 388 struct dma_chan *dma_tx_channel; 389 struct sg_table sgt_rx; 390 struct sg_table sgt_tx; 391 char *dummypage; 392 #endif 393 }; 394 395 /** 396 * struct chip_data - To maintain runtime state of SSP for each client chip 397 * @cr0: Value of control register CR0 of SSP - on later ST variants this 398 * register is 32 bits wide rather than just 16 399 * @cr1: Value of control register CR1 of SSP 400 * @dmacr: Value of DMA control Register of SSP 401 * @cpsr: Value of Clock prescale register 402 * @n_bytes: how many bytes(power of 2) reqd for a given data width of client 403 * @enable_dma: Whether to enable DMA or not 404 * @read: function ptr to be used to read when doing xfer for this chip 405 * @write: function ptr to be used to write when doing xfer for this chip 406 * @cs_control: chip select callback provided by chip 407 * @xfer_type: polling/interrupt/DMA 408 * 409 * Runtime state of the SSP controller, maintained per chip, 410 * This would be set according to the current message that would be served 411 */ 412 struct chip_data { 413 u32 cr0; 414 u16 cr1; 415 u16 dmacr; 416 u16 cpsr; 417 u8 n_bytes; 418 bool enable_dma; 419 enum ssp_reading read; 420 enum ssp_writing write; 421 void (*cs_control) (u32 command); 422 int xfer_type; 423 }; 424 425 /** 426 * null_cs_control - Dummy chip select function 427 * @command: select/delect the chip 428 * 429 * If no chip select function is provided by client this is used as dummy 430 * chip select 431 */ 432 static void null_cs_control(u32 command) 433 { 434 pr_debug("pl022: dummy chip select control, CS=0x%x\n", command); 435 } 436 437 /** 438 * giveback - current spi_message is over, schedule next message and call 439 * callback of this message. Assumes that caller already 440 * set message->status; dma and pio irqs are blocked 441 * @pl022: SSP driver private data structure 442 */ 443 static void giveback(struct pl022 *pl022) 444 { 445 struct spi_transfer *last_transfer; 446 unsigned long flags; 447 struct spi_message *msg; 448 void (*curr_cs_control) (u32 command); 449 450 /* 451 * This local reference to the chip select function 452 * is needed because we set curr_chip to NULL 453 * as a step toward termininating the message. 454 */ 455 curr_cs_control = pl022->cur_chip->cs_control; 456 spin_lock_irqsave(&pl022->queue_lock, flags); 457 msg = pl022->cur_msg; 458 pl022->cur_msg = NULL; 459 pl022->cur_transfer = NULL; 460 pl022->cur_chip = NULL; 461 queue_work(pl022->workqueue, &pl022->pump_messages); 462 spin_unlock_irqrestore(&pl022->queue_lock, flags); 463 464 last_transfer = list_entry(msg->transfers.prev, 465 struct spi_transfer, 466 transfer_list); 467 468 /* Delay if requested before any change in chip select */ 469 if (last_transfer->delay_usecs) 470 /* 471 * FIXME: This runs in interrupt context. 472 * Is this really smart? 473 */ 474 udelay(last_transfer->delay_usecs); 475 476 /* 477 * Drop chip select UNLESS cs_change is true or we are returning 478 * a message with an error, or next message is for another chip 479 */ 480 if (!last_transfer->cs_change) 481 curr_cs_control(SSP_CHIP_DESELECT); 482 else { 483 struct spi_message *next_msg; 484 485 /* Holding of cs was hinted, but we need to make sure 486 * the next message is for the same chip. Don't waste 487 * time with the following tests unless this was hinted. 488 * 489 * We cannot postpone this until pump_messages, because 490 * after calling msg->complete (below) the driver that 491 * sent the current message could be unloaded, which 492 * could invalidate the cs_control() callback... 493 */ 494 495 /* get a pointer to the next message, if any */ 496 spin_lock_irqsave(&pl022->queue_lock, flags); 497 if (list_empty(&pl022->queue)) 498 next_msg = NULL; 499 else 500 next_msg = list_entry(pl022->queue.next, 501 struct spi_message, queue); 502 spin_unlock_irqrestore(&pl022->queue_lock, flags); 503 504 /* see if the next and current messages point 505 * to the same chip 506 */ 507 if (next_msg && next_msg->spi != msg->spi) 508 next_msg = NULL; 509 if (!next_msg || msg->state == STATE_ERROR) 510 curr_cs_control(SSP_CHIP_DESELECT); 511 } 512 msg->state = NULL; 513 if (msg->complete) 514 msg->complete(msg->context); 515 /* This message is completed, so let's turn off the clocks & power */ 516 clk_disable(pl022->clk); 517 amba_pclk_disable(pl022->adev); 518 amba_vcore_disable(pl022->adev); 519 pm_runtime_put(&pl022->adev->dev); 520 } 521 522 /** 523 * flush - flush the FIFO to reach a clean state 524 * @pl022: SSP driver private data structure 525 */ 526 static int flush(struct pl022 *pl022) 527 { 528 unsigned long limit = loops_per_jiffy << 1; 529 530 dev_dbg(&pl022->adev->dev, "flush\n"); 531 do { 532 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) 533 readw(SSP_DR(pl022->virtbase)); 534 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); 535 536 pl022->exp_fifo_level = 0; 537 538 return limit; 539 } 540 541 /** 542 * restore_state - Load configuration of current chip 543 * @pl022: SSP driver private data structure 544 */ 545 static void restore_state(struct pl022 *pl022) 546 { 547 struct chip_data *chip = pl022->cur_chip; 548 549 if (pl022->vendor->extended_cr) 550 writel(chip->cr0, SSP_CR0(pl022->virtbase)); 551 else 552 writew(chip->cr0, SSP_CR0(pl022->virtbase)); 553 writew(chip->cr1, SSP_CR1(pl022->virtbase)); 554 writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); 555 writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); 556 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 557 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 558 } 559 560 /* 561 * Default SSP Register Values 562 */ 563 #define DEFAULT_SSP_REG_CR0 ( \ 564 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ 565 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ 566 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 567 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ 568 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ 569 ) 570 571 /* ST versions have slightly different bit layout */ 572 #define DEFAULT_SSP_REG_CR0_ST ( \ 573 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ 574 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ 575 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 576 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ 577 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ 578 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ 579 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ 580 ) 581 582 /* The PL023 version is slightly different again */ 583 #define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ 584 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ 585 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 586 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ 587 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ 588 ) 589 590 #define DEFAULT_SSP_REG_CR1 ( \ 591 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ 592 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ 593 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ 594 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ 595 ) 596 597 /* ST versions extend this register to use all 16 bits */ 598 #define DEFAULT_SSP_REG_CR1_ST ( \ 599 DEFAULT_SSP_REG_CR1 | \ 600 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ 601 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ 602 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ 603 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ 604 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ 605 ) 606 607 /* 608 * The PL023 variant has further differences: no loopback mode, no microwire 609 * support, and a new clock feedback delay setting. 610 */ 611 #define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ 612 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ 613 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ 614 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ 615 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ 616 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ 617 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ 618 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ 619 GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ 620 ) 621 622 #define DEFAULT_SSP_REG_CPSR ( \ 623 GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ 624 ) 625 626 #define DEFAULT_SSP_REG_DMACR (\ 627 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \ 628 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ 629 ) 630 631 /** 632 * load_ssp_default_config - Load default configuration for SSP 633 * @pl022: SSP driver private data structure 634 */ 635 static void load_ssp_default_config(struct pl022 *pl022) 636 { 637 if (pl022->vendor->pl023) { 638 writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); 639 writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); 640 } else if (pl022->vendor->extended_cr) { 641 writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); 642 writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); 643 } else { 644 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); 645 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); 646 } 647 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); 648 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); 649 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 650 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 651 } 652 653 /** 654 * This will write to TX and read from RX according to the parameters 655 * set in pl022. 656 */ 657 static void readwriter(struct pl022 *pl022) 658 { 659 660 /* 661 * The FIFO depth is different between primecell variants. 662 * I believe filling in too much in the FIFO might cause 663 * errons in 8bit wide transfers on ARM variants (just 8 words 664 * FIFO, means only 8x8 = 64 bits in FIFO) at least. 665 * 666 * To prevent this issue, the TX FIFO is only filled to the 667 * unused RX FIFO fill length, regardless of what the TX 668 * FIFO status flag indicates. 669 */ 670 dev_dbg(&pl022->adev->dev, 671 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", 672 __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end); 673 674 /* Read as much as you can */ 675 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) 676 && (pl022->rx < pl022->rx_end)) { 677 switch (pl022->read) { 678 case READING_NULL: 679 readw(SSP_DR(pl022->virtbase)); 680 break; 681 case READING_U8: 682 *(u8 *) (pl022->rx) = 683 readw(SSP_DR(pl022->virtbase)) & 0xFFU; 684 break; 685 case READING_U16: 686 *(u16 *) (pl022->rx) = 687 (u16) readw(SSP_DR(pl022->virtbase)); 688 break; 689 case READING_U32: 690 *(u32 *) (pl022->rx) = 691 readl(SSP_DR(pl022->virtbase)); 692 break; 693 } 694 pl022->rx += (pl022->cur_chip->n_bytes); 695 pl022->exp_fifo_level--; 696 } 697 /* 698 * Write as much as possible up to the RX FIFO size 699 */ 700 while ((pl022->exp_fifo_level < pl022->vendor->fifodepth) 701 && (pl022->tx < pl022->tx_end)) { 702 switch (pl022->write) { 703 case WRITING_NULL: 704 writew(0x0, SSP_DR(pl022->virtbase)); 705 break; 706 case WRITING_U8: 707 writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase)); 708 break; 709 case WRITING_U16: 710 writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase)); 711 break; 712 case WRITING_U32: 713 writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase)); 714 break; 715 } 716 pl022->tx += (pl022->cur_chip->n_bytes); 717 pl022->exp_fifo_level++; 718 /* 719 * This inner reader takes care of things appearing in the RX 720 * FIFO as we're transmitting. This will happen a lot since the 721 * clock starts running when you put things into the TX FIFO, 722 * and then things are continuously clocked into the RX FIFO. 723 */ 724 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) 725 && (pl022->rx < pl022->rx_end)) { 726 switch (pl022->read) { 727 case READING_NULL: 728 readw(SSP_DR(pl022->virtbase)); 729 break; 730 case READING_U8: 731 *(u8 *) (pl022->rx) = 732 readw(SSP_DR(pl022->virtbase)) & 0xFFU; 733 break; 734 case READING_U16: 735 *(u16 *) (pl022->rx) = 736 (u16) readw(SSP_DR(pl022->virtbase)); 737 break; 738 case READING_U32: 739 *(u32 *) (pl022->rx) = 740 readl(SSP_DR(pl022->virtbase)); 741 break; 742 } 743 pl022->rx += (pl022->cur_chip->n_bytes); 744 pl022->exp_fifo_level--; 745 } 746 } 747 /* 748 * When we exit here the TX FIFO should be full and the RX FIFO 749 * should be empty 750 */ 751 } 752 753 /** 754 * next_transfer - Move to the Next transfer in the current spi message 755 * @pl022: SSP driver private data structure 756 * 757 * This function moves though the linked list of spi transfers in the 758 * current spi message and returns with the state of current spi 759 * message i.e whether its last transfer is done(STATE_DONE) or 760 * Next transfer is ready(STATE_RUNNING) 761 */ 762 static void *next_transfer(struct pl022 *pl022) 763 { 764 struct spi_message *msg = pl022->cur_msg; 765 struct spi_transfer *trans = pl022->cur_transfer; 766 767 /* Move to next transfer */ 768 if (trans->transfer_list.next != &msg->transfers) { 769 pl022->cur_transfer = 770 list_entry(trans->transfer_list.next, 771 struct spi_transfer, transfer_list); 772 return STATE_RUNNING; 773 } 774 return STATE_DONE; 775 } 776 777 /* 778 * This DMA functionality is only compiled in if we have 779 * access to the generic DMA devices/DMA engine. 780 */ 781 #ifdef CONFIG_DMA_ENGINE 782 static void unmap_free_dma_scatter(struct pl022 *pl022) 783 { 784 /* Unmap and free the SG tables */ 785 dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, 786 pl022->sgt_tx.nents, DMA_TO_DEVICE); 787 dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, 788 pl022->sgt_rx.nents, DMA_FROM_DEVICE); 789 sg_free_table(&pl022->sgt_rx); 790 sg_free_table(&pl022->sgt_tx); 791 } 792 793 static void dma_callback(void *data) 794 { 795 struct pl022 *pl022 = data; 796 struct spi_message *msg = pl022->cur_msg; 797 798 BUG_ON(!pl022->sgt_rx.sgl); 799 800 #ifdef VERBOSE_DEBUG 801 /* 802 * Optionally dump out buffers to inspect contents, this is 803 * good if you want to convince yourself that the loopback 804 * read/write contents are the same, when adopting to a new 805 * DMA engine. 806 */ 807 { 808 struct scatterlist *sg; 809 unsigned int i; 810 811 dma_sync_sg_for_cpu(&pl022->adev->dev, 812 pl022->sgt_rx.sgl, 813 pl022->sgt_rx.nents, 814 DMA_FROM_DEVICE); 815 816 for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { 817 dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); 818 print_hex_dump(KERN_ERR, "SPI RX: ", 819 DUMP_PREFIX_OFFSET, 820 16, 821 1, 822 sg_virt(sg), 823 sg_dma_len(sg), 824 1); 825 } 826 for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { 827 dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); 828 print_hex_dump(KERN_ERR, "SPI TX: ", 829 DUMP_PREFIX_OFFSET, 830 16, 831 1, 832 sg_virt(sg), 833 sg_dma_len(sg), 834 1); 835 } 836 } 837 #endif 838 839 unmap_free_dma_scatter(pl022); 840 841 /* Update total bytes transferred */ 842 msg->actual_length += pl022->cur_transfer->len; 843 if (pl022->cur_transfer->cs_change) 844 pl022->cur_chip-> 845 cs_control(SSP_CHIP_DESELECT); 846 847 /* Move to next transfer */ 848 msg->state = next_transfer(pl022); 849 tasklet_schedule(&pl022->pump_transfers); 850 } 851 852 static void setup_dma_scatter(struct pl022 *pl022, 853 void *buffer, 854 unsigned int length, 855 struct sg_table *sgtab) 856 { 857 struct scatterlist *sg; 858 int bytesleft = length; 859 void *bufp = buffer; 860 int mapbytes; 861 int i; 862 863 if (buffer) { 864 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { 865 /* 866 * If there are less bytes left than what fits 867 * in the current page (plus page alignment offset) 868 * we just feed in this, else we stuff in as much 869 * as we can. 870 */ 871 if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) 872 mapbytes = bytesleft; 873 else 874 mapbytes = PAGE_SIZE - offset_in_page(bufp); 875 sg_set_page(sg, virt_to_page(bufp), 876 mapbytes, offset_in_page(bufp)); 877 bufp += mapbytes; 878 bytesleft -= mapbytes; 879 dev_dbg(&pl022->adev->dev, 880 "set RX/TX target page @ %p, %d bytes, %d left\n", 881 bufp, mapbytes, bytesleft); 882 } 883 } else { 884 /* Map the dummy buffer on every page */ 885 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { 886 if (bytesleft < PAGE_SIZE) 887 mapbytes = bytesleft; 888 else 889 mapbytes = PAGE_SIZE; 890 sg_set_page(sg, virt_to_page(pl022->dummypage), 891 mapbytes, 0); 892 bytesleft -= mapbytes; 893 dev_dbg(&pl022->adev->dev, 894 "set RX/TX to dummy page %d bytes, %d left\n", 895 mapbytes, bytesleft); 896 897 } 898 } 899 BUG_ON(bytesleft); 900 } 901 902 /** 903 * configure_dma - configures the channels for the next transfer 904 * @pl022: SSP driver's private data structure 905 */ 906 static int configure_dma(struct pl022 *pl022) 907 { 908 struct dma_slave_config rx_conf = { 909 .src_addr = SSP_DR(pl022->phybase), 910 .direction = DMA_FROM_DEVICE, 911 }; 912 struct dma_slave_config tx_conf = { 913 .dst_addr = SSP_DR(pl022->phybase), 914 .direction = DMA_TO_DEVICE, 915 }; 916 unsigned int pages; 917 int ret; 918 int rx_sglen, tx_sglen; 919 struct dma_chan *rxchan = pl022->dma_rx_channel; 920 struct dma_chan *txchan = pl022->dma_tx_channel; 921 struct dma_async_tx_descriptor *rxdesc; 922 struct dma_async_tx_descriptor *txdesc; 923 924 /* Check that the channels are available */ 925 if (!rxchan || !txchan) 926 return -ENODEV; 927 928 /* 929 * If supplied, the DMA burstsize should equal the FIFO trigger level. 930 * Notice that the DMA engine uses one-to-one mapping. Since we can 931 * not trigger on 2 elements this needs explicit mapping rather than 932 * calculation. 933 */ 934 switch (pl022->rx_lev_trig) { 935 case SSP_RX_1_OR_MORE_ELEM: 936 rx_conf.src_maxburst = 1; 937 break; 938 case SSP_RX_4_OR_MORE_ELEM: 939 rx_conf.src_maxburst = 4; 940 break; 941 case SSP_RX_8_OR_MORE_ELEM: 942 rx_conf.src_maxburst = 8; 943 break; 944 case SSP_RX_16_OR_MORE_ELEM: 945 rx_conf.src_maxburst = 16; 946 break; 947 case SSP_RX_32_OR_MORE_ELEM: 948 rx_conf.src_maxburst = 32; 949 break; 950 default: 951 rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1; 952 break; 953 } 954 955 switch (pl022->tx_lev_trig) { 956 case SSP_TX_1_OR_MORE_EMPTY_LOC: 957 tx_conf.dst_maxburst = 1; 958 break; 959 case SSP_TX_4_OR_MORE_EMPTY_LOC: 960 tx_conf.dst_maxburst = 4; 961 break; 962 case SSP_TX_8_OR_MORE_EMPTY_LOC: 963 tx_conf.dst_maxburst = 8; 964 break; 965 case SSP_TX_16_OR_MORE_EMPTY_LOC: 966 tx_conf.dst_maxburst = 16; 967 break; 968 case SSP_TX_32_OR_MORE_EMPTY_LOC: 969 tx_conf.dst_maxburst = 32; 970 break; 971 default: 972 tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1; 973 break; 974 } 975 976 switch (pl022->read) { 977 case READING_NULL: 978 /* Use the same as for writing */ 979 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 980 break; 981 case READING_U8: 982 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 983 break; 984 case READING_U16: 985 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 986 break; 987 case READING_U32: 988 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 989 break; 990 } 991 992 switch (pl022->write) { 993 case WRITING_NULL: 994 /* Use the same as for reading */ 995 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 996 break; 997 case WRITING_U8: 998 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 999 break; 1000 case WRITING_U16: 1001 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 1002 break; 1003 case WRITING_U32: 1004 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1005 break; 1006 } 1007 1008 /* SPI pecularity: we need to read and write the same width */ 1009 if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 1010 rx_conf.src_addr_width = tx_conf.dst_addr_width; 1011 if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 1012 tx_conf.dst_addr_width = rx_conf.src_addr_width; 1013 BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); 1014 1015 dmaengine_slave_config(rxchan, &rx_conf); 1016 dmaengine_slave_config(txchan, &tx_conf); 1017 1018 /* Create sglists for the transfers */ 1019 pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE); 1020 dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); 1021 1022 ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC); 1023 if (ret) 1024 goto err_alloc_rx_sg; 1025 1026 ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC); 1027 if (ret) 1028 goto err_alloc_tx_sg; 1029 1030 /* Fill in the scatterlists for the RX+TX buffers */ 1031 setup_dma_scatter(pl022, pl022->rx, 1032 pl022->cur_transfer->len, &pl022->sgt_rx); 1033 setup_dma_scatter(pl022, pl022->tx, 1034 pl022->cur_transfer->len, &pl022->sgt_tx); 1035 1036 /* Map DMA buffers */ 1037 rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, 1038 pl022->sgt_rx.nents, DMA_FROM_DEVICE); 1039 if (!rx_sglen) 1040 goto err_rx_sgmap; 1041 1042 tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, 1043 pl022->sgt_tx.nents, DMA_TO_DEVICE); 1044 if (!tx_sglen) 1045 goto err_tx_sgmap; 1046 1047 /* Send both scatterlists */ 1048 rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 1049 pl022->sgt_rx.sgl, 1050 rx_sglen, 1051 DMA_FROM_DEVICE, 1052 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1053 if (!rxdesc) 1054 goto err_rxdesc; 1055 1056 txdesc = txchan->device->device_prep_slave_sg(txchan, 1057 pl022->sgt_tx.sgl, 1058 tx_sglen, 1059 DMA_TO_DEVICE, 1060 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1061 if (!txdesc) 1062 goto err_txdesc; 1063 1064 /* Put the callback on the RX transfer only, that should finish last */ 1065 rxdesc->callback = dma_callback; 1066 rxdesc->callback_param = pl022; 1067 1068 /* Submit and fire RX and TX with TX last so we're ready to read! */ 1069 dmaengine_submit(rxdesc); 1070 dmaengine_submit(txdesc); 1071 dma_async_issue_pending(rxchan); 1072 dma_async_issue_pending(txchan); 1073 1074 return 0; 1075 1076 err_txdesc: 1077 dmaengine_terminate_all(txchan); 1078 err_rxdesc: 1079 dmaengine_terminate_all(rxchan); 1080 dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, 1081 pl022->sgt_tx.nents, DMA_TO_DEVICE); 1082 err_tx_sgmap: 1083 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, 1084 pl022->sgt_tx.nents, DMA_FROM_DEVICE); 1085 err_rx_sgmap: 1086 sg_free_table(&pl022->sgt_tx); 1087 err_alloc_tx_sg: 1088 sg_free_table(&pl022->sgt_rx); 1089 err_alloc_rx_sg: 1090 return -ENOMEM; 1091 } 1092 1093 static int __init pl022_dma_probe(struct pl022 *pl022) 1094 { 1095 dma_cap_mask_t mask; 1096 1097 /* Try to acquire a generic DMA engine slave channel */ 1098 dma_cap_zero(mask); 1099 dma_cap_set(DMA_SLAVE, mask); 1100 /* 1101 * We need both RX and TX channels to do DMA, else do none 1102 * of them. 1103 */ 1104 pl022->dma_rx_channel = dma_request_channel(mask, 1105 pl022->master_info->dma_filter, 1106 pl022->master_info->dma_rx_param); 1107 if (!pl022->dma_rx_channel) { 1108 dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n"); 1109 goto err_no_rxchan; 1110 } 1111 1112 pl022->dma_tx_channel = dma_request_channel(mask, 1113 pl022->master_info->dma_filter, 1114 pl022->master_info->dma_tx_param); 1115 if (!pl022->dma_tx_channel) { 1116 dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n"); 1117 goto err_no_txchan; 1118 } 1119 1120 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); 1121 if (!pl022->dummypage) { 1122 dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); 1123 goto err_no_dummypage; 1124 } 1125 1126 dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", 1127 dma_chan_name(pl022->dma_rx_channel), 1128 dma_chan_name(pl022->dma_tx_channel)); 1129 1130 return 0; 1131 1132 err_no_dummypage: 1133 dma_release_channel(pl022->dma_tx_channel); 1134 err_no_txchan: 1135 dma_release_channel(pl022->dma_rx_channel); 1136 pl022->dma_rx_channel = NULL; 1137 err_no_rxchan: 1138 dev_err(&pl022->adev->dev, 1139 "Failed to work in dma mode, work without dma!\n"); 1140 return -ENODEV; 1141 } 1142 1143 static void terminate_dma(struct pl022 *pl022) 1144 { 1145 struct dma_chan *rxchan = pl022->dma_rx_channel; 1146 struct dma_chan *txchan = pl022->dma_tx_channel; 1147 1148 dmaengine_terminate_all(rxchan); 1149 dmaengine_terminate_all(txchan); 1150 unmap_free_dma_scatter(pl022); 1151 } 1152 1153 static void pl022_dma_remove(struct pl022 *pl022) 1154 { 1155 if (pl022->busy) 1156 terminate_dma(pl022); 1157 if (pl022->dma_tx_channel) 1158 dma_release_channel(pl022->dma_tx_channel); 1159 if (pl022->dma_rx_channel) 1160 dma_release_channel(pl022->dma_rx_channel); 1161 kfree(pl022->dummypage); 1162 } 1163 1164 #else 1165 static inline int configure_dma(struct pl022 *pl022) 1166 { 1167 return -ENODEV; 1168 } 1169 1170 static inline int pl022_dma_probe(struct pl022 *pl022) 1171 { 1172 return 0; 1173 } 1174 1175 static inline void pl022_dma_remove(struct pl022 *pl022) 1176 { 1177 } 1178 #endif 1179 1180 /** 1181 * pl022_interrupt_handler - Interrupt handler for SSP controller 1182 * 1183 * This function handles interrupts generated for an interrupt based transfer. 1184 * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the 1185 * current message's state as STATE_ERROR and schedule the tasklet 1186 * pump_transfers which will do the postprocessing of the current message by 1187 * calling giveback(). Otherwise it reads data from RX FIFO till there is no 1188 * more data, and writes data in TX FIFO till it is not full. If we complete 1189 * the transfer we move to the next transfer and schedule the tasklet. 1190 */ 1191 static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) 1192 { 1193 struct pl022 *pl022 = dev_id; 1194 struct spi_message *msg = pl022->cur_msg; 1195 u16 irq_status = 0; 1196 u16 flag = 0; 1197 1198 if (unlikely(!msg)) { 1199 dev_err(&pl022->adev->dev, 1200 "bad message state in interrupt handler"); 1201 /* Never fail */ 1202 return IRQ_HANDLED; 1203 } 1204 1205 /* Read the Interrupt Status Register */ 1206 irq_status = readw(SSP_MIS(pl022->virtbase)); 1207 1208 if (unlikely(!irq_status)) 1209 return IRQ_NONE; 1210 1211 /* 1212 * This handles the FIFO interrupts, the timeout 1213 * interrupts are flatly ignored, they cannot be 1214 * trusted. 1215 */ 1216 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { 1217 /* 1218 * Overrun interrupt - bail out since our Data has been 1219 * corrupted 1220 */ 1221 dev_err(&pl022->adev->dev, "FIFO overrun\n"); 1222 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) 1223 dev_err(&pl022->adev->dev, 1224 "RXFIFO is full\n"); 1225 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) 1226 dev_err(&pl022->adev->dev, 1227 "TXFIFO is full\n"); 1228 1229 /* 1230 * Disable and clear interrupts, disable SSP, 1231 * mark message with bad status so it can be 1232 * retried. 1233 */ 1234 writew(DISABLE_ALL_INTERRUPTS, 1235 SSP_IMSC(pl022->virtbase)); 1236 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 1237 writew((readw(SSP_CR1(pl022->virtbase)) & 1238 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); 1239 msg->state = STATE_ERROR; 1240 1241 /* Schedule message queue handler */ 1242 tasklet_schedule(&pl022->pump_transfers); 1243 return IRQ_HANDLED; 1244 } 1245 1246 readwriter(pl022); 1247 1248 if ((pl022->tx == pl022->tx_end) && (flag == 0)) { 1249 flag = 1; 1250 /* Disable Transmit interrupt */ 1251 writew(readw(SSP_IMSC(pl022->virtbase)) & 1252 (~SSP_IMSC_MASK_TXIM), 1253 SSP_IMSC(pl022->virtbase)); 1254 } 1255 1256 /* 1257 * Since all transactions must write as much as shall be read, 1258 * we can conclude the entire transaction once RX is complete. 1259 * At this point, all TX will always be finished. 1260 */ 1261 if (pl022->rx >= pl022->rx_end) { 1262 writew(DISABLE_ALL_INTERRUPTS, 1263 SSP_IMSC(pl022->virtbase)); 1264 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 1265 if (unlikely(pl022->rx > pl022->rx_end)) { 1266 dev_warn(&pl022->adev->dev, "read %u surplus " 1267 "bytes (did you request an odd " 1268 "number of bytes on a 16bit bus?)\n", 1269 (u32) (pl022->rx - pl022->rx_end)); 1270 } 1271 /* Update total bytes transferred */ 1272 msg->actual_length += pl022->cur_transfer->len; 1273 if (pl022->cur_transfer->cs_change) 1274 pl022->cur_chip-> 1275 cs_control(SSP_CHIP_DESELECT); 1276 /* Move to next transfer */ 1277 msg->state = next_transfer(pl022); 1278 tasklet_schedule(&pl022->pump_transfers); 1279 return IRQ_HANDLED; 1280 } 1281 1282 return IRQ_HANDLED; 1283 } 1284 1285 /** 1286 * This sets up the pointers to memory for the next message to 1287 * send out on the SPI bus. 1288 */ 1289 static int set_up_next_transfer(struct pl022 *pl022, 1290 struct spi_transfer *transfer) 1291 { 1292 int residue; 1293 1294 /* Sanity check the message for this bus width */ 1295 residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes; 1296 if (unlikely(residue != 0)) { 1297 dev_err(&pl022->adev->dev, 1298 "message of %u bytes to transmit but the current " 1299 "chip bus has a data width of %u bytes!\n", 1300 pl022->cur_transfer->len, 1301 pl022->cur_chip->n_bytes); 1302 dev_err(&pl022->adev->dev, "skipping this message\n"); 1303 return -EIO; 1304 } 1305 pl022->tx = (void *)transfer->tx_buf; 1306 pl022->tx_end = pl022->tx + pl022->cur_transfer->len; 1307 pl022->rx = (void *)transfer->rx_buf; 1308 pl022->rx_end = pl022->rx + pl022->cur_transfer->len; 1309 pl022->write = 1310 pl022->tx ? pl022->cur_chip->write : WRITING_NULL; 1311 pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL; 1312 return 0; 1313 } 1314 1315 /** 1316 * pump_transfers - Tasklet function which schedules next transfer 1317 * when running in interrupt or DMA transfer mode. 1318 * @data: SSP driver private data structure 1319 * 1320 */ 1321 static void pump_transfers(unsigned long data) 1322 { 1323 struct pl022 *pl022 = (struct pl022 *) data; 1324 struct spi_message *message = NULL; 1325 struct spi_transfer *transfer = NULL; 1326 struct spi_transfer *previous = NULL; 1327 1328 /* Get current state information */ 1329 message = pl022->cur_msg; 1330 transfer = pl022->cur_transfer; 1331 1332 /* Handle for abort */ 1333 if (message->state == STATE_ERROR) { 1334 message->status = -EIO; 1335 giveback(pl022); 1336 return; 1337 } 1338 1339 /* Handle end of message */ 1340 if (message->state == STATE_DONE) { 1341 message->status = 0; 1342 giveback(pl022); 1343 return; 1344 } 1345 1346 /* Delay if requested at end of transfer before CS change */ 1347 if (message->state == STATE_RUNNING) { 1348 previous = list_entry(transfer->transfer_list.prev, 1349 struct spi_transfer, 1350 transfer_list); 1351 if (previous->delay_usecs) 1352 /* 1353 * FIXME: This runs in interrupt context. 1354 * Is this really smart? 1355 */ 1356 udelay(previous->delay_usecs); 1357 1358 /* Drop chip select only if cs_change is requested */ 1359 if (previous->cs_change) 1360 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1361 } else { 1362 /* STATE_START */ 1363 message->state = STATE_RUNNING; 1364 } 1365 1366 if (set_up_next_transfer(pl022, transfer)) { 1367 message->state = STATE_ERROR; 1368 message->status = -EIO; 1369 giveback(pl022); 1370 return; 1371 } 1372 /* Flush the FIFOs and let's go! */ 1373 flush(pl022); 1374 1375 if (pl022->cur_chip->enable_dma) { 1376 if (configure_dma(pl022)) { 1377 dev_dbg(&pl022->adev->dev, 1378 "configuration of DMA failed, fall back to interrupt mode\n"); 1379 goto err_config_dma; 1380 } 1381 return; 1382 } 1383 1384 err_config_dma: 1385 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 1386 } 1387 1388 static void do_interrupt_dma_transfer(struct pl022 *pl022) 1389 { 1390 u32 irqflags = ENABLE_ALL_INTERRUPTS; 1391 1392 /* Enable target chip */ 1393 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1394 if (set_up_next_transfer(pl022, pl022->cur_transfer)) { 1395 /* Error path */ 1396 pl022->cur_msg->state = STATE_ERROR; 1397 pl022->cur_msg->status = -EIO; 1398 giveback(pl022); 1399 return; 1400 } 1401 /* If we're using DMA, set up DMA here */ 1402 if (pl022->cur_chip->enable_dma) { 1403 /* Configure DMA transfer */ 1404 if (configure_dma(pl022)) { 1405 dev_dbg(&pl022->adev->dev, 1406 "configuration of DMA failed, fall back to interrupt mode\n"); 1407 goto err_config_dma; 1408 } 1409 /* Disable interrupts in DMA mode, IRQ from DMA controller */ 1410 irqflags = DISABLE_ALL_INTERRUPTS; 1411 } 1412 err_config_dma: 1413 /* Enable SSP, turn on interrupts */ 1414 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), 1415 SSP_CR1(pl022->virtbase)); 1416 writew(irqflags, SSP_IMSC(pl022->virtbase)); 1417 } 1418 1419 static void do_polling_transfer(struct pl022 *pl022) 1420 { 1421 struct spi_message *message = NULL; 1422 struct spi_transfer *transfer = NULL; 1423 struct spi_transfer *previous = NULL; 1424 struct chip_data *chip; 1425 unsigned long time, timeout; 1426 1427 chip = pl022->cur_chip; 1428 message = pl022->cur_msg; 1429 1430 while (message->state != STATE_DONE) { 1431 /* Handle for abort */ 1432 if (message->state == STATE_ERROR) 1433 break; 1434 transfer = pl022->cur_transfer; 1435 1436 /* Delay if requested at end of transfer */ 1437 if (message->state == STATE_RUNNING) { 1438 previous = 1439 list_entry(transfer->transfer_list.prev, 1440 struct spi_transfer, transfer_list); 1441 if (previous->delay_usecs) 1442 udelay(previous->delay_usecs); 1443 if (previous->cs_change) 1444 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1445 } else { 1446 /* STATE_START */ 1447 message->state = STATE_RUNNING; 1448 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1449 } 1450 1451 /* Configuration Changing Per Transfer */ 1452 if (set_up_next_transfer(pl022, transfer)) { 1453 /* Error path */ 1454 message->state = STATE_ERROR; 1455 break; 1456 } 1457 /* Flush FIFOs and enable SSP */ 1458 flush(pl022); 1459 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), 1460 SSP_CR1(pl022->virtbase)); 1461 1462 dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); 1463 1464 timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); 1465 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { 1466 time = jiffies; 1467 readwriter(pl022); 1468 if (time_after(time, timeout)) { 1469 dev_warn(&pl022->adev->dev, 1470 "%s: timeout!\n", __func__); 1471 message->state = STATE_ERROR; 1472 goto out; 1473 } 1474 cpu_relax(); 1475 } 1476 1477 /* Update total byte transferred */ 1478 message->actual_length += pl022->cur_transfer->len; 1479 if (pl022->cur_transfer->cs_change) 1480 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); 1481 /* Move to next transfer */ 1482 message->state = next_transfer(pl022); 1483 } 1484 out: 1485 /* Handle end of message */ 1486 if (message->state == STATE_DONE) 1487 message->status = 0; 1488 else 1489 message->status = -EIO; 1490 1491 giveback(pl022); 1492 return; 1493 } 1494 1495 /** 1496 * pump_messages - Workqueue function which processes spi message queue 1497 * @data: pointer to private data of SSP driver 1498 * 1499 * This function checks if there is any spi message in the queue that 1500 * needs processing and delegate control to appropriate function 1501 * do_polling_transfer()/do_interrupt_dma_transfer() 1502 * based on the kind of the transfer 1503 * 1504 */ 1505 static void pump_messages(struct work_struct *work) 1506 { 1507 struct pl022 *pl022 = 1508 container_of(work, struct pl022, pump_messages); 1509 unsigned long flags; 1510 1511 /* Lock queue and check for queue work */ 1512 spin_lock_irqsave(&pl022->queue_lock, flags); 1513 if (list_empty(&pl022->queue) || !pl022->running) { 1514 pl022->busy = false; 1515 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1516 return; 1517 } 1518 /* Make sure we are not already running a message */ 1519 if (pl022->cur_msg) { 1520 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1521 return; 1522 } 1523 /* Extract head of queue */ 1524 pl022->cur_msg = 1525 list_entry(pl022->queue.next, struct spi_message, queue); 1526 1527 list_del_init(&pl022->cur_msg->queue); 1528 pl022->busy = true; 1529 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1530 1531 /* Initial message state */ 1532 pl022->cur_msg->state = STATE_START; 1533 pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, 1534 struct spi_transfer, transfer_list); 1535 1536 /* Setup the SPI using the per chip configuration */ 1537 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); 1538 /* 1539 * We enable the core voltage and clocks here, then the clocks 1540 * and core will be disabled when giveback() is called in each method 1541 * (poll/interrupt/DMA) 1542 */ 1543 pm_runtime_get_sync(&pl022->adev->dev); 1544 amba_vcore_enable(pl022->adev); 1545 amba_pclk_enable(pl022->adev); 1546 clk_enable(pl022->clk); 1547 restore_state(pl022); 1548 flush(pl022); 1549 1550 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) 1551 do_polling_transfer(pl022); 1552 else 1553 do_interrupt_dma_transfer(pl022); 1554 } 1555 1556 static int __init init_queue(struct pl022 *pl022) 1557 { 1558 INIT_LIST_HEAD(&pl022->queue); 1559 spin_lock_init(&pl022->queue_lock); 1560 1561 pl022->running = false; 1562 pl022->busy = false; 1563 1564 tasklet_init(&pl022->pump_transfers, pump_transfers, 1565 (unsigned long)pl022); 1566 1567 INIT_WORK(&pl022->pump_messages, pump_messages); 1568 pl022->workqueue = create_singlethread_workqueue( 1569 dev_name(pl022->master->dev.parent)); 1570 if (pl022->workqueue == NULL) 1571 return -EBUSY; 1572 1573 return 0; 1574 } 1575 1576 static int start_queue(struct pl022 *pl022) 1577 { 1578 unsigned long flags; 1579 1580 spin_lock_irqsave(&pl022->queue_lock, flags); 1581 1582 if (pl022->running || pl022->busy) { 1583 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1584 return -EBUSY; 1585 } 1586 1587 pl022->running = true; 1588 pl022->cur_msg = NULL; 1589 pl022->cur_transfer = NULL; 1590 pl022->cur_chip = NULL; 1591 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1592 1593 queue_work(pl022->workqueue, &pl022->pump_messages); 1594 1595 return 0; 1596 } 1597 1598 static int stop_queue(struct pl022 *pl022) 1599 { 1600 unsigned long flags; 1601 unsigned limit = 500; 1602 int status = 0; 1603 1604 spin_lock_irqsave(&pl022->queue_lock, flags); 1605 1606 /* This is a bit lame, but is optimized for the common execution path. 1607 * A wait_queue on the pl022->busy could be used, but then the common 1608 * execution path (pump_messages) would be required to call wake_up or 1609 * friends on every SPI message. Do this instead */ 1610 while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { 1611 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1612 msleep(10); 1613 spin_lock_irqsave(&pl022->queue_lock, flags); 1614 } 1615 1616 if (!list_empty(&pl022->queue) || pl022->busy) 1617 status = -EBUSY; 1618 else 1619 pl022->running = false; 1620 1621 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1622 1623 return status; 1624 } 1625 1626 static int destroy_queue(struct pl022 *pl022) 1627 { 1628 int status; 1629 1630 status = stop_queue(pl022); 1631 /* we are unloading the module or failing to load (only two calls 1632 * to this routine), and neither call can handle a return value. 1633 * However, destroy_workqueue calls flush_workqueue, and that will 1634 * block until all work is done. If the reason that stop_queue 1635 * timed out is that the work will never finish, then it does no 1636 * good to call destroy_workqueue, so return anyway. */ 1637 if (status != 0) 1638 return status; 1639 1640 destroy_workqueue(pl022->workqueue); 1641 1642 return 0; 1643 } 1644 1645 static int verify_controller_parameters(struct pl022 *pl022, 1646 struct pl022_config_chip const *chip_info) 1647 { 1648 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) 1649 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { 1650 dev_err(&pl022->adev->dev, 1651 "interface is configured incorrectly\n"); 1652 return -EINVAL; 1653 } 1654 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && 1655 (!pl022->vendor->unidir)) { 1656 dev_err(&pl022->adev->dev, 1657 "unidirectional mode not supported in this " 1658 "hardware version\n"); 1659 return -EINVAL; 1660 } 1661 if ((chip_info->hierarchy != SSP_MASTER) 1662 && (chip_info->hierarchy != SSP_SLAVE)) { 1663 dev_err(&pl022->adev->dev, 1664 "hierarchy is configured incorrectly\n"); 1665 return -EINVAL; 1666 } 1667 if ((chip_info->com_mode != INTERRUPT_TRANSFER) 1668 && (chip_info->com_mode != DMA_TRANSFER) 1669 && (chip_info->com_mode != POLLING_TRANSFER)) { 1670 dev_err(&pl022->adev->dev, 1671 "Communication mode is configured incorrectly\n"); 1672 return -EINVAL; 1673 } 1674 switch (chip_info->rx_lev_trig) { 1675 case SSP_RX_1_OR_MORE_ELEM: 1676 case SSP_RX_4_OR_MORE_ELEM: 1677 case SSP_RX_8_OR_MORE_ELEM: 1678 /* These are always OK, all variants can handle this */ 1679 break; 1680 case SSP_RX_16_OR_MORE_ELEM: 1681 if (pl022->vendor->fifodepth < 16) { 1682 dev_err(&pl022->adev->dev, 1683 "RX FIFO Trigger Level is configured incorrectly\n"); 1684 return -EINVAL; 1685 } 1686 break; 1687 case SSP_RX_32_OR_MORE_ELEM: 1688 if (pl022->vendor->fifodepth < 32) { 1689 dev_err(&pl022->adev->dev, 1690 "RX FIFO Trigger Level is configured incorrectly\n"); 1691 return -EINVAL; 1692 } 1693 break; 1694 default: 1695 dev_err(&pl022->adev->dev, 1696 "RX FIFO Trigger Level is configured incorrectly\n"); 1697 return -EINVAL; 1698 break; 1699 } 1700 switch (chip_info->tx_lev_trig) { 1701 case SSP_TX_1_OR_MORE_EMPTY_LOC: 1702 case SSP_TX_4_OR_MORE_EMPTY_LOC: 1703 case SSP_TX_8_OR_MORE_EMPTY_LOC: 1704 /* These are always OK, all variants can handle this */ 1705 break; 1706 case SSP_TX_16_OR_MORE_EMPTY_LOC: 1707 if (pl022->vendor->fifodepth < 16) { 1708 dev_err(&pl022->adev->dev, 1709 "TX FIFO Trigger Level is configured incorrectly\n"); 1710 return -EINVAL; 1711 } 1712 break; 1713 case SSP_TX_32_OR_MORE_EMPTY_LOC: 1714 if (pl022->vendor->fifodepth < 32) { 1715 dev_err(&pl022->adev->dev, 1716 "TX FIFO Trigger Level is configured incorrectly\n"); 1717 return -EINVAL; 1718 } 1719 break; 1720 default: 1721 dev_err(&pl022->adev->dev, 1722 "TX FIFO Trigger Level is configured incorrectly\n"); 1723 return -EINVAL; 1724 break; 1725 } 1726 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { 1727 if ((chip_info->ctrl_len < SSP_BITS_4) 1728 || (chip_info->ctrl_len > SSP_BITS_32)) { 1729 dev_err(&pl022->adev->dev, 1730 "CTRL LEN is configured incorrectly\n"); 1731 return -EINVAL; 1732 } 1733 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) 1734 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { 1735 dev_err(&pl022->adev->dev, 1736 "Wait State is configured incorrectly\n"); 1737 return -EINVAL; 1738 } 1739 /* Half duplex is only available in the ST Micro version */ 1740 if (pl022->vendor->extended_cr) { 1741 if ((chip_info->duplex != 1742 SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1743 && (chip_info->duplex != 1744 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { 1745 dev_err(&pl022->adev->dev, 1746 "Microwire duplex mode is configured incorrectly\n"); 1747 return -EINVAL; 1748 } 1749 } else { 1750 if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1751 dev_err(&pl022->adev->dev, 1752 "Microwire half duplex mode requested," 1753 " but this is only available in the" 1754 " ST version of PL022\n"); 1755 return -EINVAL; 1756 } 1757 } 1758 return 0; 1759 } 1760 1761 /** 1762 * pl022_transfer - transfer function registered to SPI master framework 1763 * @spi: spi device which is requesting transfer 1764 * @msg: spi message which is to handled is queued to driver queue 1765 * 1766 * This function is registered to the SPI framework for this SPI master 1767 * controller. It will queue the spi_message in the queue of driver if 1768 * the queue is not stopped and return. 1769 */ 1770 static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) 1771 { 1772 struct pl022 *pl022 = spi_master_get_devdata(spi->master); 1773 unsigned long flags; 1774 1775 spin_lock_irqsave(&pl022->queue_lock, flags); 1776 1777 if (!pl022->running) { 1778 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1779 return -ESHUTDOWN; 1780 } 1781 msg->actual_length = 0; 1782 msg->status = -EINPROGRESS; 1783 msg->state = STATE_START; 1784 1785 list_add_tail(&msg->queue, &pl022->queue); 1786 if (pl022->running && !pl022->busy) 1787 queue_work(pl022->workqueue, &pl022->pump_messages); 1788 1789 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1790 return 0; 1791 } 1792 1793 static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr) 1794 { 1795 return rate / (cpsdvsr * (1 + scr)); 1796 } 1797 1798 static int calculate_effective_freq(struct pl022 *pl022, int freq, struct 1799 ssp_clock_params * clk_freq) 1800 { 1801 /* Lets calculate the frequency parameters */ 1802 u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN; 1803 u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0, 1804 best_scr = 0, tmp, found = 0; 1805 1806 rate = clk_get_rate(pl022->clk); 1807 /* cpsdvscr = 2 & scr 0 */ 1808 max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN); 1809 /* cpsdvsr = 254 & scr = 255 */ 1810 min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX); 1811 1812 if (!((freq <= max_tclk) && (freq >= min_tclk))) { 1813 dev_err(&pl022->adev->dev, 1814 "controller data is incorrect: out of range frequency"); 1815 return -EINVAL; 1816 } 1817 1818 /* 1819 * best_freq will give closest possible available rate (<= requested 1820 * freq) for all values of scr & cpsdvsr. 1821 */ 1822 while ((cpsdvsr <= CPSDVR_MAX) && !found) { 1823 while (scr <= SCR_MAX) { 1824 tmp = spi_rate(rate, cpsdvsr, scr); 1825 1826 if (tmp > freq) 1827 scr++; 1828 /* 1829 * If found exact value, update and break. 1830 * If found more closer value, update and continue. 1831 */ 1832 else if ((tmp == freq) || (tmp > best_freq)) { 1833 best_freq = tmp; 1834 best_cpsdvsr = cpsdvsr; 1835 best_scr = scr; 1836 1837 if (tmp == freq) 1838 break; 1839 } 1840 scr++; 1841 } 1842 cpsdvsr += 2; 1843 scr = SCR_MIN; 1844 } 1845 1846 clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF); 1847 clk_freq->scr = (u8) (best_scr & 0xFF); 1848 dev_dbg(&pl022->adev->dev, 1849 "SSP Target Frequency is: %u, Effective Frequency is %u\n", 1850 freq, best_freq); 1851 dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n", 1852 clk_freq->cpsdvsr, clk_freq->scr); 1853 1854 return 0; 1855 } 1856 1857 /* 1858 * A piece of default chip info unless the platform 1859 * supplies it. 1860 */ 1861 static const struct pl022_config_chip pl022_default_chip_info = { 1862 .com_mode = POLLING_TRANSFER, 1863 .iface = SSP_INTERFACE_MOTOROLA_SPI, 1864 .hierarchy = SSP_SLAVE, 1865 .slave_tx_disable = DO_NOT_DRIVE_TX, 1866 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, 1867 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, 1868 .ctrl_len = SSP_BITS_8, 1869 .wait_state = SSP_MWIRE_WAIT_ZERO, 1870 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, 1871 .cs_control = null_cs_control, 1872 }; 1873 1874 /** 1875 * pl022_setup - setup function registered to SPI master framework 1876 * @spi: spi device which is requesting setup 1877 * 1878 * This function is registered to the SPI framework for this SPI master 1879 * controller. If it is the first time when setup is called by this device, 1880 * this function will initialize the runtime state for this chip and save 1881 * the same in the device structure. Else it will update the runtime info 1882 * with the updated chip info. Nothing is really being written to the 1883 * controller hardware here, that is not done until the actual transfer 1884 * commence. 1885 */ 1886 static int pl022_setup(struct spi_device *spi) 1887 { 1888 struct pl022_config_chip const *chip_info; 1889 struct chip_data *chip; 1890 struct ssp_clock_params clk_freq = {0, }; 1891 int status = 0; 1892 struct pl022 *pl022 = spi_master_get_devdata(spi->master); 1893 unsigned int bits = spi->bits_per_word; 1894 u32 tmp; 1895 1896 if (!spi->max_speed_hz) 1897 return -EINVAL; 1898 1899 /* Get controller_state if one is supplied */ 1900 chip = spi_get_ctldata(spi); 1901 1902 if (chip == NULL) { 1903 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 1904 if (!chip) { 1905 dev_err(&spi->dev, 1906 "cannot allocate controller state\n"); 1907 return -ENOMEM; 1908 } 1909 dev_dbg(&spi->dev, 1910 "allocated memory for controller's runtime state\n"); 1911 } 1912 1913 /* Get controller data if one is supplied */ 1914 chip_info = spi->controller_data; 1915 1916 if (chip_info == NULL) { 1917 chip_info = &pl022_default_chip_info; 1918 /* spi_board_info.controller_data not is supplied */ 1919 dev_dbg(&spi->dev, 1920 "using default controller_data settings\n"); 1921 } else 1922 dev_dbg(&spi->dev, 1923 "using user supplied controller_data settings\n"); 1924 1925 /* 1926 * We can override with custom divisors, else we use the board 1927 * frequency setting 1928 */ 1929 if ((0 == chip_info->clk_freq.cpsdvsr) 1930 && (0 == chip_info->clk_freq.scr)) { 1931 status = calculate_effective_freq(pl022, 1932 spi->max_speed_hz, 1933 &clk_freq); 1934 if (status < 0) 1935 goto err_config_params; 1936 } else { 1937 memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); 1938 if ((clk_freq.cpsdvsr % 2) != 0) 1939 clk_freq.cpsdvsr = 1940 clk_freq.cpsdvsr - 1; 1941 } 1942 if ((clk_freq.cpsdvsr < CPSDVR_MIN) 1943 || (clk_freq.cpsdvsr > CPSDVR_MAX)) { 1944 status = -EINVAL; 1945 dev_err(&spi->dev, 1946 "cpsdvsr is configured incorrectly\n"); 1947 goto err_config_params; 1948 } 1949 1950 status = verify_controller_parameters(pl022, chip_info); 1951 if (status) { 1952 dev_err(&spi->dev, "controller data is incorrect"); 1953 goto err_config_params; 1954 } 1955 1956 pl022->rx_lev_trig = chip_info->rx_lev_trig; 1957 pl022->tx_lev_trig = chip_info->tx_lev_trig; 1958 1959 /* Now set controller state based on controller data */ 1960 chip->xfer_type = chip_info->com_mode; 1961 if (!chip_info->cs_control) { 1962 chip->cs_control = null_cs_control; 1963 dev_warn(&spi->dev, 1964 "chip select function is NULL for this chip\n"); 1965 } else 1966 chip->cs_control = chip_info->cs_control; 1967 1968 if (bits <= 3) { 1969 /* PL022 doesn't support less than 4-bits */ 1970 status = -ENOTSUPP; 1971 goto err_config_params; 1972 } else if (bits <= 8) { 1973 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); 1974 chip->n_bytes = 1; 1975 chip->read = READING_U8; 1976 chip->write = WRITING_U8; 1977 } else if (bits <= 16) { 1978 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); 1979 chip->n_bytes = 2; 1980 chip->read = READING_U16; 1981 chip->write = WRITING_U16; 1982 } else { 1983 if (pl022->vendor->max_bpw >= 32) { 1984 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); 1985 chip->n_bytes = 4; 1986 chip->read = READING_U32; 1987 chip->write = WRITING_U32; 1988 } else { 1989 dev_err(&spi->dev, 1990 "illegal data size for this controller!\n"); 1991 dev_err(&spi->dev, 1992 "a standard pl022 can only handle " 1993 "1 <= n <= 16 bit words\n"); 1994 status = -ENOTSUPP; 1995 goto err_config_params; 1996 } 1997 } 1998 1999 /* Now Initialize all register settings required for this chip */ 2000 chip->cr0 = 0; 2001 chip->cr1 = 0; 2002 chip->dmacr = 0; 2003 chip->cpsr = 0; 2004 if ((chip_info->com_mode == DMA_TRANSFER) 2005 && ((pl022->master_info)->enable_dma)) { 2006 chip->enable_dma = true; 2007 dev_dbg(&spi->dev, "DMA mode set in controller state\n"); 2008 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, 2009 SSP_DMACR_MASK_RXDMAE, 0); 2010 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, 2011 SSP_DMACR_MASK_TXDMAE, 1); 2012 } else { 2013 chip->enable_dma = false; 2014 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); 2015 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, 2016 SSP_DMACR_MASK_RXDMAE, 0); 2017 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, 2018 SSP_DMACR_MASK_TXDMAE, 1); 2019 } 2020 2021 chip->cpsr = clk_freq.cpsdvsr; 2022 2023 /* Special setup for the ST micro extended control registers */ 2024 if (pl022->vendor->extended_cr) { 2025 u32 etx; 2026 2027 if (pl022->vendor->pl023) { 2028 /* These bits are only in the PL023 */ 2029 SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, 2030 SSP_CR1_MASK_FBCLKDEL_ST, 13); 2031 } else { 2032 /* These bits are in the PL022 but not PL023 */ 2033 SSP_WRITE_BITS(chip->cr0, chip_info->duplex, 2034 SSP_CR0_MASK_HALFDUP_ST, 5); 2035 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, 2036 SSP_CR0_MASK_CSS_ST, 16); 2037 SSP_WRITE_BITS(chip->cr0, chip_info->iface, 2038 SSP_CR0_MASK_FRF_ST, 21); 2039 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, 2040 SSP_CR1_MASK_MWAIT_ST, 6); 2041 } 2042 SSP_WRITE_BITS(chip->cr0, bits - 1, 2043 SSP_CR0_MASK_DSS_ST, 0); 2044 2045 if (spi->mode & SPI_LSB_FIRST) { 2046 tmp = SSP_RX_LSB; 2047 etx = SSP_TX_LSB; 2048 } else { 2049 tmp = SSP_RX_MSB; 2050 etx = SSP_TX_MSB; 2051 } 2052 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); 2053 SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); 2054 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, 2055 SSP_CR1_MASK_RXIFLSEL_ST, 7); 2056 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, 2057 SSP_CR1_MASK_TXIFLSEL_ST, 10); 2058 } else { 2059 SSP_WRITE_BITS(chip->cr0, bits - 1, 2060 SSP_CR0_MASK_DSS, 0); 2061 SSP_WRITE_BITS(chip->cr0, chip_info->iface, 2062 SSP_CR0_MASK_FRF, 4); 2063 } 2064 2065 /* Stuff that is common for all versions */ 2066 if (spi->mode & SPI_CPOL) 2067 tmp = SSP_CLK_POL_IDLE_HIGH; 2068 else 2069 tmp = SSP_CLK_POL_IDLE_LOW; 2070 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); 2071 2072 if (spi->mode & SPI_CPHA) 2073 tmp = SSP_CLK_SECOND_EDGE; 2074 else 2075 tmp = SSP_CLK_FIRST_EDGE; 2076 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); 2077 2078 SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); 2079 /* Loopback is available on all versions except PL023 */ 2080 if (pl022->vendor->loopback) { 2081 if (spi->mode & SPI_LOOP) 2082 tmp = LOOPBACK_ENABLED; 2083 else 2084 tmp = LOOPBACK_DISABLED; 2085 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); 2086 } 2087 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); 2088 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); 2089 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 2090 3); 2091 2092 /* Save controller_state */ 2093 spi_set_ctldata(spi, chip); 2094 return status; 2095 err_config_params: 2096 spi_set_ctldata(spi, NULL); 2097 kfree(chip); 2098 return status; 2099 } 2100 2101 /** 2102 * pl022_cleanup - cleanup function registered to SPI master framework 2103 * @spi: spi device which is requesting cleanup 2104 * 2105 * This function is registered to the SPI framework for this SPI master 2106 * controller. It will free the runtime state of chip. 2107 */ 2108 static void pl022_cleanup(struct spi_device *spi) 2109 { 2110 struct chip_data *chip = spi_get_ctldata(spi); 2111 2112 spi_set_ctldata(spi, NULL); 2113 kfree(chip); 2114 } 2115 2116 static int __devinit 2117 pl022_probe(struct amba_device *adev, const struct amba_id *id) 2118 { 2119 struct device *dev = &adev->dev; 2120 struct pl022_ssp_controller *platform_info = adev->dev.platform_data; 2121 struct spi_master *master; 2122 struct pl022 *pl022 = NULL; /*Data for this driver */ 2123 int status = 0; 2124 2125 dev_info(&adev->dev, 2126 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid); 2127 if (platform_info == NULL) { 2128 dev_err(&adev->dev, "probe - no platform data supplied\n"); 2129 status = -ENODEV; 2130 goto err_no_pdata; 2131 } 2132 2133 /* Allocate master with space for data */ 2134 master = spi_alloc_master(dev, sizeof(struct pl022)); 2135 if (master == NULL) { 2136 dev_err(&adev->dev, "probe - cannot alloc SPI master\n"); 2137 status = -ENOMEM; 2138 goto err_no_master; 2139 } 2140 2141 pl022 = spi_master_get_devdata(master); 2142 pl022->master = master; 2143 pl022->master_info = platform_info; 2144 pl022->adev = adev; 2145 pl022->vendor = id->data; 2146 2147 /* 2148 * Bus Number Which has been Assigned to this SSP controller 2149 * on this board 2150 */ 2151 master->bus_num = platform_info->bus_id; 2152 master->num_chipselect = platform_info->num_chipselect; 2153 master->cleanup = pl022_cleanup; 2154 master->setup = pl022_setup; 2155 master->transfer = pl022_transfer; 2156 2157 /* 2158 * Supports mode 0-3, loopback, and active low CS. Transfers are 2159 * always MS bit first on the original pl022. 2160 */ 2161 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 2162 if (pl022->vendor->extended_cr) 2163 master->mode_bits |= SPI_LSB_FIRST; 2164 2165 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); 2166 2167 status = amba_request_regions(adev, NULL); 2168 if (status) 2169 goto err_no_ioregion; 2170 2171 pl022->phybase = adev->res.start; 2172 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); 2173 if (pl022->virtbase == NULL) { 2174 status = -ENOMEM; 2175 goto err_no_ioremap; 2176 } 2177 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", 2178 adev->res.start, pl022->virtbase); 2179 pm_runtime_enable(dev); 2180 pm_runtime_resume(dev); 2181 2182 pl022->clk = clk_get(&adev->dev, NULL); 2183 if (IS_ERR(pl022->clk)) { 2184 status = PTR_ERR(pl022->clk); 2185 dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n"); 2186 goto err_no_clk; 2187 } 2188 2189 /* Disable SSP */ 2190 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 2191 SSP_CR1(pl022->virtbase)); 2192 load_ssp_default_config(pl022); 2193 2194 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", 2195 pl022); 2196 if (status < 0) { 2197 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); 2198 goto err_no_irq; 2199 } 2200 2201 /* Get DMA channels */ 2202 if (platform_info->enable_dma) { 2203 status = pl022_dma_probe(pl022); 2204 if (status != 0) 2205 platform_info->enable_dma = 0; 2206 } 2207 2208 /* Initialize and start queue */ 2209 status = init_queue(pl022); 2210 if (status != 0) { 2211 dev_err(&adev->dev, "probe - problem initializing queue\n"); 2212 goto err_init_queue; 2213 } 2214 status = start_queue(pl022); 2215 if (status != 0) { 2216 dev_err(&adev->dev, "probe - problem starting queue\n"); 2217 goto err_start_queue; 2218 } 2219 /* Register with the SPI framework */ 2220 amba_set_drvdata(adev, pl022); 2221 status = spi_register_master(master); 2222 if (status != 0) { 2223 dev_err(&adev->dev, 2224 "probe - problem registering spi master\n"); 2225 goto err_spi_register; 2226 } 2227 dev_dbg(dev, "probe succeeded\n"); 2228 /* 2229 * Disable the silicon block pclk and any voltage domain and just 2230 * power it up and clock it when it's needed 2231 */ 2232 amba_pclk_disable(adev); 2233 amba_vcore_disable(adev); 2234 return 0; 2235 2236 err_spi_register: 2237 err_start_queue: 2238 err_init_queue: 2239 destroy_queue(pl022); 2240 pl022_dma_remove(pl022); 2241 free_irq(adev->irq[0], pl022); 2242 pm_runtime_disable(&adev->dev); 2243 err_no_irq: 2244 clk_put(pl022->clk); 2245 err_no_clk: 2246 iounmap(pl022->virtbase); 2247 err_no_ioremap: 2248 amba_release_regions(adev); 2249 err_no_ioregion: 2250 spi_master_put(master); 2251 err_no_master: 2252 err_no_pdata: 2253 return status; 2254 } 2255 2256 static int __devexit 2257 pl022_remove(struct amba_device *adev) 2258 { 2259 struct pl022 *pl022 = amba_get_drvdata(adev); 2260 2261 if (!pl022) 2262 return 0; 2263 2264 /* Remove the queue */ 2265 if (destroy_queue(pl022) != 0) 2266 dev_err(&adev->dev, "queue remove failed\n"); 2267 load_ssp_default_config(pl022); 2268 pl022_dma_remove(pl022); 2269 free_irq(adev->irq[0], pl022); 2270 clk_disable(pl022->clk); 2271 clk_put(pl022->clk); 2272 iounmap(pl022->virtbase); 2273 amba_release_regions(adev); 2274 tasklet_disable(&pl022->pump_transfers); 2275 spi_unregister_master(pl022->master); 2276 spi_master_put(pl022->master); 2277 amba_set_drvdata(adev, NULL); 2278 return 0; 2279 } 2280 2281 #ifdef CONFIG_PM 2282 static int pl022_suspend(struct amba_device *adev, pm_message_t state) 2283 { 2284 struct pl022 *pl022 = amba_get_drvdata(adev); 2285 int status = 0; 2286 2287 status = stop_queue(pl022); 2288 if (status) { 2289 dev_warn(&adev->dev, "suspend cannot stop queue\n"); 2290 return status; 2291 } 2292 2293 amba_vcore_enable(adev); 2294 amba_pclk_enable(adev); 2295 load_ssp_default_config(pl022); 2296 amba_pclk_disable(adev); 2297 amba_vcore_disable(adev); 2298 dev_dbg(&adev->dev, "suspended\n"); 2299 return 0; 2300 } 2301 2302 static int pl022_resume(struct amba_device *adev) 2303 { 2304 struct pl022 *pl022 = amba_get_drvdata(adev); 2305 int status = 0; 2306 2307 /* Start the queue running */ 2308 status = start_queue(pl022); 2309 if (status) 2310 dev_err(&adev->dev, "problem starting queue (%d)\n", status); 2311 else 2312 dev_dbg(&adev->dev, "resumed\n"); 2313 2314 return status; 2315 } 2316 #else 2317 #define pl022_suspend NULL 2318 #define pl022_resume NULL 2319 #endif /* CONFIG_PM */ 2320 2321 static struct vendor_data vendor_arm = { 2322 .fifodepth = 8, 2323 .max_bpw = 16, 2324 .unidir = false, 2325 .extended_cr = false, 2326 .pl023 = false, 2327 .loopback = true, 2328 }; 2329 2330 static struct vendor_data vendor_st = { 2331 .fifodepth = 32, 2332 .max_bpw = 32, 2333 .unidir = false, 2334 .extended_cr = true, 2335 .pl023 = false, 2336 .loopback = true, 2337 }; 2338 2339 static struct vendor_data vendor_st_pl023 = { 2340 .fifodepth = 32, 2341 .max_bpw = 32, 2342 .unidir = false, 2343 .extended_cr = true, 2344 .pl023 = true, 2345 .loopback = false, 2346 }; 2347 2348 static struct vendor_data vendor_db5500_pl023 = { 2349 .fifodepth = 32, 2350 .max_bpw = 32, 2351 .unidir = false, 2352 .extended_cr = true, 2353 .pl023 = true, 2354 .loopback = true, 2355 }; 2356 2357 static struct amba_id pl022_ids[] = { 2358 { 2359 /* 2360 * ARM PL022 variant, this has a 16bit wide 2361 * and 8 locations deep TX/RX FIFO 2362 */ 2363 .id = 0x00041022, 2364 .mask = 0x000fffff, 2365 .data = &vendor_arm, 2366 }, 2367 { 2368 /* 2369 * ST Micro derivative, this has 32bit wide 2370 * and 32 locations deep TX/RX FIFO 2371 */ 2372 .id = 0x01080022, 2373 .mask = 0xffffffff, 2374 .data = &vendor_st, 2375 }, 2376 { 2377 /* 2378 * ST-Ericsson derivative "PL023" (this is not 2379 * an official ARM number), this is a PL022 SSP block 2380 * stripped to SPI mode only, it has 32bit wide 2381 * and 32 locations deep TX/RX FIFO but no extended 2382 * CR0/CR1 register 2383 */ 2384 .id = 0x00080023, 2385 .mask = 0xffffffff, 2386 .data = &vendor_st_pl023, 2387 }, 2388 { 2389 .id = 0x10080023, 2390 .mask = 0xffffffff, 2391 .data = &vendor_db5500_pl023, 2392 }, 2393 { 0, 0 }, 2394 }; 2395 2396 static struct amba_driver pl022_driver = { 2397 .drv = { 2398 .name = "ssp-pl022", 2399 }, 2400 .id_table = pl022_ids, 2401 .probe = pl022_probe, 2402 .remove = __devexit_p(pl022_remove), 2403 .suspend = pl022_suspend, 2404 .resume = pl022_resume, 2405 }; 2406 2407 static int __init pl022_init(void) 2408 { 2409 return amba_driver_register(&pl022_driver); 2410 } 2411 subsys_initcall(pl022_init); 2412 2413 static void __exit pl022_exit(void) 2414 { 2415 amba_driver_unregister(&pl022_driver); 2416 } 2417 module_exit(pl022_exit); 2418 2419 MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); 2420 MODULE_DESCRIPTION("PL022 SSP Controller Driver"); 2421 MODULE_LICENSE("GPL"); 2422