1*ca632f55SGrant Likely /* 2*ca632f55SGrant Likely * A driver for the ARM PL022 PrimeCell SSP/SPI bus master. 3*ca632f55SGrant Likely * 4*ca632f55SGrant Likely * Copyright (C) 2008-2009 ST-Ericsson AB 5*ca632f55SGrant Likely * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. 6*ca632f55SGrant Likely * 7*ca632f55SGrant Likely * Author: Linus Walleij <linus.walleij@stericsson.com> 8*ca632f55SGrant Likely * 9*ca632f55SGrant Likely * Initial version inspired by: 10*ca632f55SGrant Likely * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c 11*ca632f55SGrant Likely * Initial adoption to PL022 by: 12*ca632f55SGrant Likely * Sachin Verma <sachin.verma@st.com> 13*ca632f55SGrant Likely * 14*ca632f55SGrant Likely * This program is free software; you can redistribute it and/or modify 15*ca632f55SGrant Likely * it under the terms of the GNU General Public License as published by 16*ca632f55SGrant Likely * the Free Software Foundation; either version 2 of the License, or 17*ca632f55SGrant Likely * (at your option) any later version. 18*ca632f55SGrant Likely * 19*ca632f55SGrant Likely * This program is distributed in the hope that it will be useful, 20*ca632f55SGrant Likely * but WITHOUT ANY WARRANTY; without even the implied warranty of 21*ca632f55SGrant Likely * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22*ca632f55SGrant Likely * GNU General Public License for more details. 23*ca632f55SGrant Likely */ 24*ca632f55SGrant Likely 25*ca632f55SGrant Likely #include <linux/init.h> 26*ca632f55SGrant Likely #include <linux/module.h> 27*ca632f55SGrant Likely #include <linux/device.h> 28*ca632f55SGrant Likely #include <linux/ioport.h> 29*ca632f55SGrant Likely #include <linux/errno.h> 30*ca632f55SGrant Likely #include <linux/interrupt.h> 31*ca632f55SGrant Likely #include <linux/spi/spi.h> 32*ca632f55SGrant Likely #include <linux/workqueue.h> 33*ca632f55SGrant Likely #include <linux/delay.h> 34*ca632f55SGrant Likely #include <linux/clk.h> 35*ca632f55SGrant Likely #include <linux/err.h> 36*ca632f55SGrant Likely #include <linux/amba/bus.h> 37*ca632f55SGrant Likely #include <linux/amba/pl022.h> 38*ca632f55SGrant Likely #include <linux/io.h> 39*ca632f55SGrant Likely #include <linux/slab.h> 40*ca632f55SGrant Likely #include <linux/dmaengine.h> 41*ca632f55SGrant Likely #include <linux/dma-mapping.h> 42*ca632f55SGrant Likely #include <linux/scatterlist.h> 43*ca632f55SGrant Likely 44*ca632f55SGrant Likely /* 45*ca632f55SGrant Likely * This macro is used to define some register default values. 46*ca632f55SGrant Likely * reg is masked with mask, the OR:ed with an (again masked) 47*ca632f55SGrant Likely * val shifted sb steps to the left. 48*ca632f55SGrant Likely */ 49*ca632f55SGrant Likely #define SSP_WRITE_BITS(reg, val, mask, sb) \ 50*ca632f55SGrant Likely ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask)))) 51*ca632f55SGrant Likely 52*ca632f55SGrant Likely /* 53*ca632f55SGrant Likely * This macro is also used to define some default values. 54*ca632f55SGrant Likely * It will just shift val by sb steps to the left and mask 55*ca632f55SGrant Likely * the result with mask. 56*ca632f55SGrant Likely */ 57*ca632f55SGrant Likely #define GEN_MASK_BITS(val, mask, sb) \ 58*ca632f55SGrant Likely (((val)<<(sb)) & (mask)) 59*ca632f55SGrant Likely 60*ca632f55SGrant Likely #define DRIVE_TX 0 61*ca632f55SGrant Likely #define DO_NOT_DRIVE_TX 1 62*ca632f55SGrant Likely 63*ca632f55SGrant Likely #define DO_NOT_QUEUE_DMA 0 64*ca632f55SGrant Likely #define QUEUE_DMA 1 65*ca632f55SGrant Likely 66*ca632f55SGrant Likely #define RX_TRANSFER 1 67*ca632f55SGrant Likely #define TX_TRANSFER 2 68*ca632f55SGrant Likely 69*ca632f55SGrant Likely /* 70*ca632f55SGrant Likely * Macros to access SSP Registers with their offsets 71*ca632f55SGrant Likely */ 72*ca632f55SGrant Likely #define SSP_CR0(r) (r + 0x000) 73*ca632f55SGrant Likely #define SSP_CR1(r) (r + 0x004) 74*ca632f55SGrant Likely #define SSP_DR(r) (r + 0x008) 75*ca632f55SGrant Likely #define SSP_SR(r) (r + 0x00C) 76*ca632f55SGrant Likely #define SSP_CPSR(r) (r + 0x010) 77*ca632f55SGrant Likely #define SSP_IMSC(r) (r + 0x014) 78*ca632f55SGrant Likely #define SSP_RIS(r) (r + 0x018) 79*ca632f55SGrant Likely #define SSP_MIS(r) (r + 0x01C) 80*ca632f55SGrant Likely #define SSP_ICR(r) (r + 0x020) 81*ca632f55SGrant Likely #define SSP_DMACR(r) (r + 0x024) 82*ca632f55SGrant Likely #define SSP_ITCR(r) (r + 0x080) 83*ca632f55SGrant Likely #define SSP_ITIP(r) (r + 0x084) 84*ca632f55SGrant Likely #define SSP_ITOP(r) (r + 0x088) 85*ca632f55SGrant Likely #define SSP_TDR(r) (r + 0x08C) 86*ca632f55SGrant Likely 87*ca632f55SGrant Likely #define SSP_PID0(r) (r + 0xFE0) 88*ca632f55SGrant Likely #define SSP_PID1(r) (r + 0xFE4) 89*ca632f55SGrant Likely #define SSP_PID2(r) (r + 0xFE8) 90*ca632f55SGrant Likely #define SSP_PID3(r) (r + 0xFEC) 91*ca632f55SGrant Likely 92*ca632f55SGrant Likely #define SSP_CID0(r) (r + 0xFF0) 93*ca632f55SGrant Likely #define SSP_CID1(r) (r + 0xFF4) 94*ca632f55SGrant Likely #define SSP_CID2(r) (r + 0xFF8) 95*ca632f55SGrant Likely #define SSP_CID3(r) (r + 0xFFC) 96*ca632f55SGrant Likely 97*ca632f55SGrant Likely /* 98*ca632f55SGrant Likely * SSP Control Register 0 - SSP_CR0 99*ca632f55SGrant Likely */ 100*ca632f55SGrant Likely #define SSP_CR0_MASK_DSS (0x0FUL << 0) 101*ca632f55SGrant Likely #define SSP_CR0_MASK_FRF (0x3UL << 4) 102*ca632f55SGrant Likely #define SSP_CR0_MASK_SPO (0x1UL << 6) 103*ca632f55SGrant Likely #define SSP_CR0_MASK_SPH (0x1UL << 7) 104*ca632f55SGrant Likely #define SSP_CR0_MASK_SCR (0xFFUL << 8) 105*ca632f55SGrant Likely 106*ca632f55SGrant Likely /* 107*ca632f55SGrant Likely * The ST version of this block moves som bits 108*ca632f55SGrant Likely * in SSP_CR0 and extends it to 32 bits 109*ca632f55SGrant Likely */ 110*ca632f55SGrant Likely #define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) 111*ca632f55SGrant Likely #define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) 112*ca632f55SGrant Likely #define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) 113*ca632f55SGrant Likely #define SSP_CR0_MASK_FRF_ST (0x3UL << 21) 114*ca632f55SGrant Likely 115*ca632f55SGrant Likely 116*ca632f55SGrant Likely /* 117*ca632f55SGrant Likely * SSP Control Register 0 - SSP_CR1 118*ca632f55SGrant Likely */ 119*ca632f55SGrant Likely #define SSP_CR1_MASK_LBM (0x1UL << 0) 120*ca632f55SGrant Likely #define SSP_CR1_MASK_SSE (0x1UL << 1) 121*ca632f55SGrant Likely #define SSP_CR1_MASK_MS (0x1UL << 2) 122*ca632f55SGrant Likely #define SSP_CR1_MASK_SOD (0x1UL << 3) 123*ca632f55SGrant Likely 124*ca632f55SGrant Likely /* 125*ca632f55SGrant Likely * The ST version of this block adds some bits 126*ca632f55SGrant Likely * in SSP_CR1 127*ca632f55SGrant Likely */ 128*ca632f55SGrant Likely #define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) 129*ca632f55SGrant Likely #define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) 130*ca632f55SGrant Likely #define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) 131*ca632f55SGrant Likely #define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) 132*ca632f55SGrant Likely #define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) 133*ca632f55SGrant Likely /* This one is only in the PL023 variant */ 134*ca632f55SGrant Likely #define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) 135*ca632f55SGrant Likely 136*ca632f55SGrant Likely /* 137*ca632f55SGrant Likely * SSP Status Register - SSP_SR 138*ca632f55SGrant Likely */ 139*ca632f55SGrant Likely #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ 140*ca632f55SGrant Likely #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ 141*ca632f55SGrant Likely #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ 142*ca632f55SGrant Likely #define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ 143*ca632f55SGrant Likely #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ 144*ca632f55SGrant Likely 145*ca632f55SGrant Likely /* 146*ca632f55SGrant Likely * SSP Clock Prescale Register - SSP_CPSR 147*ca632f55SGrant Likely */ 148*ca632f55SGrant Likely #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0) 149*ca632f55SGrant Likely 150*ca632f55SGrant Likely /* 151*ca632f55SGrant Likely * SSP Interrupt Mask Set/Clear Register - SSP_IMSC 152*ca632f55SGrant Likely */ 153*ca632f55SGrant Likely #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */ 154*ca632f55SGrant Likely #define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */ 155*ca632f55SGrant Likely #define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */ 156*ca632f55SGrant Likely #define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */ 157*ca632f55SGrant Likely 158*ca632f55SGrant Likely /* 159*ca632f55SGrant Likely * SSP Raw Interrupt Status Register - SSP_RIS 160*ca632f55SGrant Likely */ 161*ca632f55SGrant Likely /* Receive Overrun Raw Interrupt status */ 162*ca632f55SGrant Likely #define SSP_RIS_MASK_RORRIS (0x1UL << 0) 163*ca632f55SGrant Likely /* Receive Timeout Raw Interrupt status */ 164*ca632f55SGrant Likely #define SSP_RIS_MASK_RTRIS (0x1UL << 1) 165*ca632f55SGrant Likely /* Receive FIFO Raw Interrupt status */ 166*ca632f55SGrant Likely #define SSP_RIS_MASK_RXRIS (0x1UL << 2) 167*ca632f55SGrant Likely /* Transmit FIFO Raw Interrupt status */ 168*ca632f55SGrant Likely #define SSP_RIS_MASK_TXRIS (0x1UL << 3) 169*ca632f55SGrant Likely 170*ca632f55SGrant Likely /* 171*ca632f55SGrant Likely * SSP Masked Interrupt Status Register - SSP_MIS 172*ca632f55SGrant Likely */ 173*ca632f55SGrant Likely /* Receive Overrun Masked Interrupt status */ 174*ca632f55SGrant Likely #define SSP_MIS_MASK_RORMIS (0x1UL << 0) 175*ca632f55SGrant Likely /* Receive Timeout Masked Interrupt status */ 176*ca632f55SGrant Likely #define SSP_MIS_MASK_RTMIS (0x1UL << 1) 177*ca632f55SGrant Likely /* Receive FIFO Masked Interrupt status */ 178*ca632f55SGrant Likely #define SSP_MIS_MASK_RXMIS (0x1UL << 2) 179*ca632f55SGrant Likely /* Transmit FIFO Masked Interrupt status */ 180*ca632f55SGrant Likely #define SSP_MIS_MASK_TXMIS (0x1UL << 3) 181*ca632f55SGrant Likely 182*ca632f55SGrant Likely /* 183*ca632f55SGrant Likely * SSP Interrupt Clear Register - SSP_ICR 184*ca632f55SGrant Likely */ 185*ca632f55SGrant Likely /* Receive Overrun Raw Clear Interrupt bit */ 186*ca632f55SGrant Likely #define SSP_ICR_MASK_RORIC (0x1UL << 0) 187*ca632f55SGrant Likely /* Receive Timeout Clear Interrupt bit */ 188*ca632f55SGrant Likely #define SSP_ICR_MASK_RTIC (0x1UL << 1) 189*ca632f55SGrant Likely 190*ca632f55SGrant Likely /* 191*ca632f55SGrant Likely * SSP DMA Control Register - SSP_DMACR 192*ca632f55SGrant Likely */ 193*ca632f55SGrant Likely /* Receive DMA Enable bit */ 194*ca632f55SGrant Likely #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0) 195*ca632f55SGrant Likely /* Transmit DMA Enable bit */ 196*ca632f55SGrant Likely #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1) 197*ca632f55SGrant Likely 198*ca632f55SGrant Likely /* 199*ca632f55SGrant Likely * SSP Integration Test control Register - SSP_ITCR 200*ca632f55SGrant Likely */ 201*ca632f55SGrant Likely #define SSP_ITCR_MASK_ITEN (0x1UL << 0) 202*ca632f55SGrant Likely #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1) 203*ca632f55SGrant Likely 204*ca632f55SGrant Likely /* 205*ca632f55SGrant Likely * SSP Integration Test Input Register - SSP_ITIP 206*ca632f55SGrant Likely */ 207*ca632f55SGrant Likely #define ITIP_MASK_SSPRXD (0x1UL << 0) 208*ca632f55SGrant Likely #define ITIP_MASK_SSPFSSIN (0x1UL << 1) 209*ca632f55SGrant Likely #define ITIP_MASK_SSPCLKIN (0x1UL << 2) 210*ca632f55SGrant Likely #define ITIP_MASK_RXDMAC (0x1UL << 3) 211*ca632f55SGrant Likely #define ITIP_MASK_TXDMAC (0x1UL << 4) 212*ca632f55SGrant Likely #define ITIP_MASK_SSPTXDIN (0x1UL << 5) 213*ca632f55SGrant Likely 214*ca632f55SGrant Likely /* 215*ca632f55SGrant Likely * SSP Integration Test output Register - SSP_ITOP 216*ca632f55SGrant Likely */ 217*ca632f55SGrant Likely #define ITOP_MASK_SSPTXD (0x1UL << 0) 218*ca632f55SGrant Likely #define ITOP_MASK_SSPFSSOUT (0x1UL << 1) 219*ca632f55SGrant Likely #define ITOP_MASK_SSPCLKOUT (0x1UL << 2) 220*ca632f55SGrant Likely #define ITOP_MASK_SSPOEn (0x1UL << 3) 221*ca632f55SGrant Likely #define ITOP_MASK_SSPCTLOEn (0x1UL << 4) 222*ca632f55SGrant Likely #define ITOP_MASK_RORINTR (0x1UL << 5) 223*ca632f55SGrant Likely #define ITOP_MASK_RTINTR (0x1UL << 6) 224*ca632f55SGrant Likely #define ITOP_MASK_RXINTR (0x1UL << 7) 225*ca632f55SGrant Likely #define ITOP_MASK_TXINTR (0x1UL << 8) 226*ca632f55SGrant Likely #define ITOP_MASK_INTR (0x1UL << 9) 227*ca632f55SGrant Likely #define ITOP_MASK_RXDMABREQ (0x1UL << 10) 228*ca632f55SGrant Likely #define ITOP_MASK_RXDMASREQ (0x1UL << 11) 229*ca632f55SGrant Likely #define ITOP_MASK_TXDMABREQ (0x1UL << 12) 230*ca632f55SGrant Likely #define ITOP_MASK_TXDMASREQ (0x1UL << 13) 231*ca632f55SGrant Likely 232*ca632f55SGrant Likely /* 233*ca632f55SGrant Likely * SSP Test Data Register - SSP_TDR 234*ca632f55SGrant Likely */ 235*ca632f55SGrant Likely #define TDR_MASK_TESTDATA (0xFFFFFFFF) 236*ca632f55SGrant Likely 237*ca632f55SGrant Likely /* 238*ca632f55SGrant Likely * Message State 239*ca632f55SGrant Likely * we use the spi_message.state (void *) pointer to 240*ca632f55SGrant Likely * hold a single state value, that's why all this 241*ca632f55SGrant Likely * (void *) casting is done here. 242*ca632f55SGrant Likely */ 243*ca632f55SGrant Likely #define STATE_START ((void *) 0) 244*ca632f55SGrant Likely #define STATE_RUNNING ((void *) 1) 245*ca632f55SGrant Likely #define STATE_DONE ((void *) 2) 246*ca632f55SGrant Likely #define STATE_ERROR ((void *) -1) 247*ca632f55SGrant Likely 248*ca632f55SGrant Likely /* 249*ca632f55SGrant Likely * SSP State - Whether Enabled or Disabled 250*ca632f55SGrant Likely */ 251*ca632f55SGrant Likely #define SSP_DISABLED (0) 252*ca632f55SGrant Likely #define SSP_ENABLED (1) 253*ca632f55SGrant Likely 254*ca632f55SGrant Likely /* 255*ca632f55SGrant Likely * SSP DMA State - Whether DMA Enabled or Disabled 256*ca632f55SGrant Likely */ 257*ca632f55SGrant Likely #define SSP_DMA_DISABLED (0) 258*ca632f55SGrant Likely #define SSP_DMA_ENABLED (1) 259*ca632f55SGrant Likely 260*ca632f55SGrant Likely /* 261*ca632f55SGrant Likely * SSP Clock Defaults 262*ca632f55SGrant Likely */ 263*ca632f55SGrant Likely #define SSP_DEFAULT_CLKRATE 0x2 264*ca632f55SGrant Likely #define SSP_DEFAULT_PRESCALE 0x40 265*ca632f55SGrant Likely 266*ca632f55SGrant Likely /* 267*ca632f55SGrant Likely * SSP Clock Parameter ranges 268*ca632f55SGrant Likely */ 269*ca632f55SGrant Likely #define CPSDVR_MIN 0x02 270*ca632f55SGrant Likely #define CPSDVR_MAX 0xFE 271*ca632f55SGrant Likely #define SCR_MIN 0x00 272*ca632f55SGrant Likely #define SCR_MAX 0xFF 273*ca632f55SGrant Likely 274*ca632f55SGrant Likely /* 275*ca632f55SGrant Likely * SSP Interrupt related Macros 276*ca632f55SGrant Likely */ 277*ca632f55SGrant Likely #define DEFAULT_SSP_REG_IMSC 0x0UL 278*ca632f55SGrant Likely #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC 279*ca632f55SGrant Likely #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) 280*ca632f55SGrant Likely 281*ca632f55SGrant Likely #define CLEAR_ALL_INTERRUPTS 0x3 282*ca632f55SGrant Likely 283*ca632f55SGrant Likely #define SPI_POLLING_TIMEOUT 1000 284*ca632f55SGrant Likely 285*ca632f55SGrant Likely 286*ca632f55SGrant Likely /* 287*ca632f55SGrant Likely * The type of reading going on on this chip 288*ca632f55SGrant Likely */ 289*ca632f55SGrant Likely enum ssp_reading { 290*ca632f55SGrant Likely READING_NULL, 291*ca632f55SGrant Likely READING_U8, 292*ca632f55SGrant Likely READING_U16, 293*ca632f55SGrant Likely READING_U32 294*ca632f55SGrant Likely }; 295*ca632f55SGrant Likely 296*ca632f55SGrant Likely /** 297*ca632f55SGrant Likely * The type of writing going on on this chip 298*ca632f55SGrant Likely */ 299*ca632f55SGrant Likely enum ssp_writing { 300*ca632f55SGrant Likely WRITING_NULL, 301*ca632f55SGrant Likely WRITING_U8, 302*ca632f55SGrant Likely WRITING_U16, 303*ca632f55SGrant Likely WRITING_U32 304*ca632f55SGrant Likely }; 305*ca632f55SGrant Likely 306*ca632f55SGrant Likely /** 307*ca632f55SGrant Likely * struct vendor_data - vendor-specific config parameters 308*ca632f55SGrant Likely * for PL022 derivates 309*ca632f55SGrant Likely * @fifodepth: depth of FIFOs (both) 310*ca632f55SGrant Likely * @max_bpw: maximum number of bits per word 311*ca632f55SGrant Likely * @unidir: supports unidirection transfers 312*ca632f55SGrant Likely * @extended_cr: 32 bit wide control register 0 with extra 313*ca632f55SGrant Likely * features and extra features in CR1 as found in the ST variants 314*ca632f55SGrant Likely * @pl023: supports a subset of the ST extensions called "PL023" 315*ca632f55SGrant Likely */ 316*ca632f55SGrant Likely struct vendor_data { 317*ca632f55SGrant Likely int fifodepth; 318*ca632f55SGrant Likely int max_bpw; 319*ca632f55SGrant Likely bool unidir; 320*ca632f55SGrant Likely bool extended_cr; 321*ca632f55SGrant Likely bool pl023; 322*ca632f55SGrant Likely bool loopback; 323*ca632f55SGrant Likely }; 324*ca632f55SGrant Likely 325*ca632f55SGrant Likely /** 326*ca632f55SGrant Likely * struct pl022 - This is the private SSP driver data structure 327*ca632f55SGrant Likely * @adev: AMBA device model hookup 328*ca632f55SGrant Likely * @vendor: vendor data for the IP block 329*ca632f55SGrant Likely * @phybase: the physical memory where the SSP device resides 330*ca632f55SGrant Likely * @virtbase: the virtual memory where the SSP is mapped 331*ca632f55SGrant Likely * @clk: outgoing clock "SPICLK" for the SPI bus 332*ca632f55SGrant Likely * @master: SPI framework hookup 333*ca632f55SGrant Likely * @master_info: controller-specific data from machine setup 334*ca632f55SGrant Likely * @workqueue: a workqueue on which any spi_message request is queued 335*ca632f55SGrant Likely * @pump_messages: work struct for scheduling work to the workqueue 336*ca632f55SGrant Likely * @queue_lock: spinlock to syncronise access to message queue 337*ca632f55SGrant Likely * @queue: message queue 338*ca632f55SGrant Likely * @busy: workqueue is busy 339*ca632f55SGrant Likely * @running: workqueue is running 340*ca632f55SGrant Likely * @pump_transfers: Tasklet used in Interrupt Transfer mode 341*ca632f55SGrant Likely * @cur_msg: Pointer to current spi_message being processed 342*ca632f55SGrant Likely * @cur_transfer: Pointer to current spi_transfer 343*ca632f55SGrant Likely * @cur_chip: pointer to current clients chip(assigned from controller_state) 344*ca632f55SGrant Likely * @tx: current position in TX buffer to be read 345*ca632f55SGrant Likely * @tx_end: end position in TX buffer to be read 346*ca632f55SGrant Likely * @rx: current position in RX buffer to be written 347*ca632f55SGrant Likely * @rx_end: end position in RX buffer to be written 348*ca632f55SGrant Likely * @read: the type of read currently going on 349*ca632f55SGrant Likely * @write: the type of write currently going on 350*ca632f55SGrant Likely * @exp_fifo_level: expected FIFO level 351*ca632f55SGrant Likely * @dma_rx_channel: optional channel for RX DMA 352*ca632f55SGrant Likely * @dma_tx_channel: optional channel for TX DMA 353*ca632f55SGrant Likely * @sgt_rx: scattertable for the RX transfer 354*ca632f55SGrant Likely * @sgt_tx: scattertable for the TX transfer 355*ca632f55SGrant Likely * @dummypage: a dummy page used for driving data on the bus with DMA 356*ca632f55SGrant Likely */ 357*ca632f55SGrant Likely struct pl022 { 358*ca632f55SGrant Likely struct amba_device *adev; 359*ca632f55SGrant Likely struct vendor_data *vendor; 360*ca632f55SGrant Likely resource_size_t phybase; 361*ca632f55SGrant Likely void __iomem *virtbase; 362*ca632f55SGrant Likely struct clk *clk; 363*ca632f55SGrant Likely struct spi_master *master; 364*ca632f55SGrant Likely struct pl022_ssp_controller *master_info; 365*ca632f55SGrant Likely /* Driver message queue */ 366*ca632f55SGrant Likely struct workqueue_struct *workqueue; 367*ca632f55SGrant Likely struct work_struct pump_messages; 368*ca632f55SGrant Likely spinlock_t queue_lock; 369*ca632f55SGrant Likely struct list_head queue; 370*ca632f55SGrant Likely bool busy; 371*ca632f55SGrant Likely bool running; 372*ca632f55SGrant Likely /* Message transfer pump */ 373*ca632f55SGrant Likely struct tasklet_struct pump_transfers; 374*ca632f55SGrant Likely struct spi_message *cur_msg; 375*ca632f55SGrant Likely struct spi_transfer *cur_transfer; 376*ca632f55SGrant Likely struct chip_data *cur_chip; 377*ca632f55SGrant Likely void *tx; 378*ca632f55SGrant Likely void *tx_end; 379*ca632f55SGrant Likely void *rx; 380*ca632f55SGrant Likely void *rx_end; 381*ca632f55SGrant Likely enum ssp_reading read; 382*ca632f55SGrant Likely enum ssp_writing write; 383*ca632f55SGrant Likely u32 exp_fifo_level; 384*ca632f55SGrant Likely /* DMA settings */ 385*ca632f55SGrant Likely #ifdef CONFIG_DMA_ENGINE 386*ca632f55SGrant Likely struct dma_chan *dma_rx_channel; 387*ca632f55SGrant Likely struct dma_chan *dma_tx_channel; 388*ca632f55SGrant Likely struct sg_table sgt_rx; 389*ca632f55SGrant Likely struct sg_table sgt_tx; 390*ca632f55SGrant Likely char *dummypage; 391*ca632f55SGrant Likely #endif 392*ca632f55SGrant Likely }; 393*ca632f55SGrant Likely 394*ca632f55SGrant Likely /** 395*ca632f55SGrant Likely * struct chip_data - To maintain runtime state of SSP for each client chip 396*ca632f55SGrant Likely * @cr0: Value of control register CR0 of SSP - on later ST variants this 397*ca632f55SGrant Likely * register is 32 bits wide rather than just 16 398*ca632f55SGrant Likely * @cr1: Value of control register CR1 of SSP 399*ca632f55SGrant Likely * @dmacr: Value of DMA control Register of SSP 400*ca632f55SGrant Likely * @cpsr: Value of Clock prescale register 401*ca632f55SGrant Likely * @n_bytes: how many bytes(power of 2) reqd for a given data width of client 402*ca632f55SGrant Likely * @enable_dma: Whether to enable DMA or not 403*ca632f55SGrant Likely * @read: function ptr to be used to read when doing xfer for this chip 404*ca632f55SGrant Likely * @write: function ptr to be used to write when doing xfer for this chip 405*ca632f55SGrant Likely * @cs_control: chip select callback provided by chip 406*ca632f55SGrant Likely * @xfer_type: polling/interrupt/DMA 407*ca632f55SGrant Likely * 408*ca632f55SGrant Likely * Runtime state of the SSP controller, maintained per chip, 409*ca632f55SGrant Likely * This would be set according to the current message that would be served 410*ca632f55SGrant Likely */ 411*ca632f55SGrant Likely struct chip_data { 412*ca632f55SGrant Likely u32 cr0; 413*ca632f55SGrant Likely u16 cr1; 414*ca632f55SGrant Likely u16 dmacr; 415*ca632f55SGrant Likely u16 cpsr; 416*ca632f55SGrant Likely u8 n_bytes; 417*ca632f55SGrant Likely bool enable_dma; 418*ca632f55SGrant Likely enum ssp_reading read; 419*ca632f55SGrant Likely enum ssp_writing write; 420*ca632f55SGrant Likely void (*cs_control) (u32 command); 421*ca632f55SGrant Likely int xfer_type; 422*ca632f55SGrant Likely }; 423*ca632f55SGrant Likely 424*ca632f55SGrant Likely /** 425*ca632f55SGrant Likely * null_cs_control - Dummy chip select function 426*ca632f55SGrant Likely * @command: select/delect the chip 427*ca632f55SGrant Likely * 428*ca632f55SGrant Likely * If no chip select function is provided by client this is used as dummy 429*ca632f55SGrant Likely * chip select 430*ca632f55SGrant Likely */ 431*ca632f55SGrant Likely static void null_cs_control(u32 command) 432*ca632f55SGrant Likely { 433*ca632f55SGrant Likely pr_debug("pl022: dummy chip select control, CS=0x%x\n", command); 434*ca632f55SGrant Likely } 435*ca632f55SGrant Likely 436*ca632f55SGrant Likely /** 437*ca632f55SGrant Likely * giveback - current spi_message is over, schedule next message and call 438*ca632f55SGrant Likely * callback of this message. Assumes that caller already 439*ca632f55SGrant Likely * set message->status; dma and pio irqs are blocked 440*ca632f55SGrant Likely * @pl022: SSP driver private data structure 441*ca632f55SGrant Likely */ 442*ca632f55SGrant Likely static void giveback(struct pl022 *pl022) 443*ca632f55SGrant Likely { 444*ca632f55SGrant Likely struct spi_transfer *last_transfer; 445*ca632f55SGrant Likely unsigned long flags; 446*ca632f55SGrant Likely struct spi_message *msg; 447*ca632f55SGrant Likely void (*curr_cs_control) (u32 command); 448*ca632f55SGrant Likely 449*ca632f55SGrant Likely /* 450*ca632f55SGrant Likely * This local reference to the chip select function 451*ca632f55SGrant Likely * is needed because we set curr_chip to NULL 452*ca632f55SGrant Likely * as a step toward termininating the message. 453*ca632f55SGrant Likely */ 454*ca632f55SGrant Likely curr_cs_control = pl022->cur_chip->cs_control; 455*ca632f55SGrant Likely spin_lock_irqsave(&pl022->queue_lock, flags); 456*ca632f55SGrant Likely msg = pl022->cur_msg; 457*ca632f55SGrant Likely pl022->cur_msg = NULL; 458*ca632f55SGrant Likely pl022->cur_transfer = NULL; 459*ca632f55SGrant Likely pl022->cur_chip = NULL; 460*ca632f55SGrant Likely queue_work(pl022->workqueue, &pl022->pump_messages); 461*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 462*ca632f55SGrant Likely 463*ca632f55SGrant Likely last_transfer = list_entry(msg->transfers.prev, 464*ca632f55SGrant Likely struct spi_transfer, 465*ca632f55SGrant Likely transfer_list); 466*ca632f55SGrant Likely 467*ca632f55SGrant Likely /* Delay if requested before any change in chip select */ 468*ca632f55SGrant Likely if (last_transfer->delay_usecs) 469*ca632f55SGrant Likely /* 470*ca632f55SGrant Likely * FIXME: This runs in interrupt context. 471*ca632f55SGrant Likely * Is this really smart? 472*ca632f55SGrant Likely */ 473*ca632f55SGrant Likely udelay(last_transfer->delay_usecs); 474*ca632f55SGrant Likely 475*ca632f55SGrant Likely /* 476*ca632f55SGrant Likely * Drop chip select UNLESS cs_change is true or we are returning 477*ca632f55SGrant Likely * a message with an error, or next message is for another chip 478*ca632f55SGrant Likely */ 479*ca632f55SGrant Likely if (!last_transfer->cs_change) 480*ca632f55SGrant Likely curr_cs_control(SSP_CHIP_DESELECT); 481*ca632f55SGrant Likely else { 482*ca632f55SGrant Likely struct spi_message *next_msg; 483*ca632f55SGrant Likely 484*ca632f55SGrant Likely /* Holding of cs was hinted, but we need to make sure 485*ca632f55SGrant Likely * the next message is for the same chip. Don't waste 486*ca632f55SGrant Likely * time with the following tests unless this was hinted. 487*ca632f55SGrant Likely * 488*ca632f55SGrant Likely * We cannot postpone this until pump_messages, because 489*ca632f55SGrant Likely * after calling msg->complete (below) the driver that 490*ca632f55SGrant Likely * sent the current message could be unloaded, which 491*ca632f55SGrant Likely * could invalidate the cs_control() callback... 492*ca632f55SGrant Likely */ 493*ca632f55SGrant Likely 494*ca632f55SGrant Likely /* get a pointer to the next message, if any */ 495*ca632f55SGrant Likely spin_lock_irqsave(&pl022->queue_lock, flags); 496*ca632f55SGrant Likely if (list_empty(&pl022->queue)) 497*ca632f55SGrant Likely next_msg = NULL; 498*ca632f55SGrant Likely else 499*ca632f55SGrant Likely next_msg = list_entry(pl022->queue.next, 500*ca632f55SGrant Likely struct spi_message, queue); 501*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 502*ca632f55SGrant Likely 503*ca632f55SGrant Likely /* see if the next and current messages point 504*ca632f55SGrant Likely * to the same chip 505*ca632f55SGrant Likely */ 506*ca632f55SGrant Likely if (next_msg && next_msg->spi != msg->spi) 507*ca632f55SGrant Likely next_msg = NULL; 508*ca632f55SGrant Likely if (!next_msg || msg->state == STATE_ERROR) 509*ca632f55SGrant Likely curr_cs_control(SSP_CHIP_DESELECT); 510*ca632f55SGrant Likely } 511*ca632f55SGrant Likely msg->state = NULL; 512*ca632f55SGrant Likely if (msg->complete) 513*ca632f55SGrant Likely msg->complete(msg->context); 514*ca632f55SGrant Likely /* This message is completed, so let's turn off the clocks & power */ 515*ca632f55SGrant Likely clk_disable(pl022->clk); 516*ca632f55SGrant Likely amba_pclk_disable(pl022->adev); 517*ca632f55SGrant Likely amba_vcore_disable(pl022->adev); 518*ca632f55SGrant Likely } 519*ca632f55SGrant Likely 520*ca632f55SGrant Likely /** 521*ca632f55SGrant Likely * flush - flush the FIFO to reach a clean state 522*ca632f55SGrant Likely * @pl022: SSP driver private data structure 523*ca632f55SGrant Likely */ 524*ca632f55SGrant Likely static int flush(struct pl022 *pl022) 525*ca632f55SGrant Likely { 526*ca632f55SGrant Likely unsigned long limit = loops_per_jiffy << 1; 527*ca632f55SGrant Likely 528*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, "flush\n"); 529*ca632f55SGrant Likely do { 530*ca632f55SGrant Likely while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) 531*ca632f55SGrant Likely readw(SSP_DR(pl022->virtbase)); 532*ca632f55SGrant Likely } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); 533*ca632f55SGrant Likely 534*ca632f55SGrant Likely pl022->exp_fifo_level = 0; 535*ca632f55SGrant Likely 536*ca632f55SGrant Likely return limit; 537*ca632f55SGrant Likely } 538*ca632f55SGrant Likely 539*ca632f55SGrant Likely /** 540*ca632f55SGrant Likely * restore_state - Load configuration of current chip 541*ca632f55SGrant Likely * @pl022: SSP driver private data structure 542*ca632f55SGrant Likely */ 543*ca632f55SGrant Likely static void restore_state(struct pl022 *pl022) 544*ca632f55SGrant Likely { 545*ca632f55SGrant Likely struct chip_data *chip = pl022->cur_chip; 546*ca632f55SGrant Likely 547*ca632f55SGrant Likely if (pl022->vendor->extended_cr) 548*ca632f55SGrant Likely writel(chip->cr0, SSP_CR0(pl022->virtbase)); 549*ca632f55SGrant Likely else 550*ca632f55SGrant Likely writew(chip->cr0, SSP_CR0(pl022->virtbase)); 551*ca632f55SGrant Likely writew(chip->cr1, SSP_CR1(pl022->virtbase)); 552*ca632f55SGrant Likely writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); 553*ca632f55SGrant Likely writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); 554*ca632f55SGrant Likely writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 555*ca632f55SGrant Likely writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 556*ca632f55SGrant Likely } 557*ca632f55SGrant Likely 558*ca632f55SGrant Likely /* 559*ca632f55SGrant Likely * Default SSP Register Values 560*ca632f55SGrant Likely */ 561*ca632f55SGrant Likely #define DEFAULT_SSP_REG_CR0 ( \ 562*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ 563*ca632f55SGrant Likely GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ 564*ca632f55SGrant Likely GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 565*ca632f55SGrant Likely GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ 566*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ 567*ca632f55SGrant Likely ) 568*ca632f55SGrant Likely 569*ca632f55SGrant Likely /* ST versions have slightly different bit layout */ 570*ca632f55SGrant Likely #define DEFAULT_SSP_REG_CR0_ST ( \ 571*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ 572*ca632f55SGrant Likely GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ 573*ca632f55SGrant Likely GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 574*ca632f55SGrant Likely GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ 575*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ 576*ca632f55SGrant Likely GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ 577*ca632f55SGrant Likely GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ 578*ca632f55SGrant Likely ) 579*ca632f55SGrant Likely 580*ca632f55SGrant Likely /* The PL023 version is slightly different again */ 581*ca632f55SGrant Likely #define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ 582*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ 583*ca632f55SGrant Likely GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 584*ca632f55SGrant Likely GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ 585*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ 586*ca632f55SGrant Likely ) 587*ca632f55SGrant Likely 588*ca632f55SGrant Likely #define DEFAULT_SSP_REG_CR1 ( \ 589*ca632f55SGrant Likely GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ 590*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ 591*ca632f55SGrant Likely GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ 592*ca632f55SGrant Likely GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ 593*ca632f55SGrant Likely ) 594*ca632f55SGrant Likely 595*ca632f55SGrant Likely /* ST versions extend this register to use all 16 bits */ 596*ca632f55SGrant Likely #define DEFAULT_SSP_REG_CR1_ST ( \ 597*ca632f55SGrant Likely DEFAULT_SSP_REG_CR1 | \ 598*ca632f55SGrant Likely GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ 599*ca632f55SGrant Likely GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ 600*ca632f55SGrant Likely GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ 601*ca632f55SGrant Likely GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ 602*ca632f55SGrant Likely GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ 603*ca632f55SGrant Likely ) 604*ca632f55SGrant Likely 605*ca632f55SGrant Likely /* 606*ca632f55SGrant Likely * The PL023 variant has further differences: no loopback mode, no microwire 607*ca632f55SGrant Likely * support, and a new clock feedback delay setting. 608*ca632f55SGrant Likely */ 609*ca632f55SGrant Likely #define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ 610*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ 611*ca632f55SGrant Likely GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ 612*ca632f55SGrant Likely GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ 613*ca632f55SGrant Likely GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ 614*ca632f55SGrant Likely GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ 615*ca632f55SGrant Likely GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ 616*ca632f55SGrant Likely GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ 617*ca632f55SGrant Likely GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ 618*ca632f55SGrant Likely ) 619*ca632f55SGrant Likely 620*ca632f55SGrant Likely #define DEFAULT_SSP_REG_CPSR ( \ 621*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ 622*ca632f55SGrant Likely ) 623*ca632f55SGrant Likely 624*ca632f55SGrant Likely #define DEFAULT_SSP_REG_DMACR (\ 625*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \ 626*ca632f55SGrant Likely GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ 627*ca632f55SGrant Likely ) 628*ca632f55SGrant Likely 629*ca632f55SGrant Likely /** 630*ca632f55SGrant Likely * load_ssp_default_config - Load default configuration for SSP 631*ca632f55SGrant Likely * @pl022: SSP driver private data structure 632*ca632f55SGrant Likely */ 633*ca632f55SGrant Likely static void load_ssp_default_config(struct pl022 *pl022) 634*ca632f55SGrant Likely { 635*ca632f55SGrant Likely if (pl022->vendor->pl023) { 636*ca632f55SGrant Likely writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); 637*ca632f55SGrant Likely writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); 638*ca632f55SGrant Likely } else if (pl022->vendor->extended_cr) { 639*ca632f55SGrant Likely writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); 640*ca632f55SGrant Likely writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); 641*ca632f55SGrant Likely } else { 642*ca632f55SGrant Likely writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); 643*ca632f55SGrant Likely writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); 644*ca632f55SGrant Likely } 645*ca632f55SGrant Likely writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); 646*ca632f55SGrant Likely writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); 647*ca632f55SGrant Likely writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 648*ca632f55SGrant Likely writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 649*ca632f55SGrant Likely } 650*ca632f55SGrant Likely 651*ca632f55SGrant Likely /** 652*ca632f55SGrant Likely * This will write to TX and read from RX according to the parameters 653*ca632f55SGrant Likely * set in pl022. 654*ca632f55SGrant Likely */ 655*ca632f55SGrant Likely static void readwriter(struct pl022 *pl022) 656*ca632f55SGrant Likely { 657*ca632f55SGrant Likely 658*ca632f55SGrant Likely /* 659*ca632f55SGrant Likely * The FIFO depth is different between primecell variants. 660*ca632f55SGrant Likely * I believe filling in too much in the FIFO might cause 661*ca632f55SGrant Likely * errons in 8bit wide transfers on ARM variants (just 8 words 662*ca632f55SGrant Likely * FIFO, means only 8x8 = 64 bits in FIFO) at least. 663*ca632f55SGrant Likely * 664*ca632f55SGrant Likely * To prevent this issue, the TX FIFO is only filled to the 665*ca632f55SGrant Likely * unused RX FIFO fill length, regardless of what the TX 666*ca632f55SGrant Likely * FIFO status flag indicates. 667*ca632f55SGrant Likely */ 668*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, 669*ca632f55SGrant Likely "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", 670*ca632f55SGrant Likely __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end); 671*ca632f55SGrant Likely 672*ca632f55SGrant Likely /* Read as much as you can */ 673*ca632f55SGrant Likely while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) 674*ca632f55SGrant Likely && (pl022->rx < pl022->rx_end)) { 675*ca632f55SGrant Likely switch (pl022->read) { 676*ca632f55SGrant Likely case READING_NULL: 677*ca632f55SGrant Likely readw(SSP_DR(pl022->virtbase)); 678*ca632f55SGrant Likely break; 679*ca632f55SGrant Likely case READING_U8: 680*ca632f55SGrant Likely *(u8 *) (pl022->rx) = 681*ca632f55SGrant Likely readw(SSP_DR(pl022->virtbase)) & 0xFFU; 682*ca632f55SGrant Likely break; 683*ca632f55SGrant Likely case READING_U16: 684*ca632f55SGrant Likely *(u16 *) (pl022->rx) = 685*ca632f55SGrant Likely (u16) readw(SSP_DR(pl022->virtbase)); 686*ca632f55SGrant Likely break; 687*ca632f55SGrant Likely case READING_U32: 688*ca632f55SGrant Likely *(u32 *) (pl022->rx) = 689*ca632f55SGrant Likely readl(SSP_DR(pl022->virtbase)); 690*ca632f55SGrant Likely break; 691*ca632f55SGrant Likely } 692*ca632f55SGrant Likely pl022->rx += (pl022->cur_chip->n_bytes); 693*ca632f55SGrant Likely pl022->exp_fifo_level--; 694*ca632f55SGrant Likely } 695*ca632f55SGrant Likely /* 696*ca632f55SGrant Likely * Write as much as possible up to the RX FIFO size 697*ca632f55SGrant Likely */ 698*ca632f55SGrant Likely while ((pl022->exp_fifo_level < pl022->vendor->fifodepth) 699*ca632f55SGrant Likely && (pl022->tx < pl022->tx_end)) { 700*ca632f55SGrant Likely switch (pl022->write) { 701*ca632f55SGrant Likely case WRITING_NULL: 702*ca632f55SGrant Likely writew(0x0, SSP_DR(pl022->virtbase)); 703*ca632f55SGrant Likely break; 704*ca632f55SGrant Likely case WRITING_U8: 705*ca632f55SGrant Likely writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase)); 706*ca632f55SGrant Likely break; 707*ca632f55SGrant Likely case WRITING_U16: 708*ca632f55SGrant Likely writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase)); 709*ca632f55SGrant Likely break; 710*ca632f55SGrant Likely case WRITING_U32: 711*ca632f55SGrant Likely writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase)); 712*ca632f55SGrant Likely break; 713*ca632f55SGrant Likely } 714*ca632f55SGrant Likely pl022->tx += (pl022->cur_chip->n_bytes); 715*ca632f55SGrant Likely pl022->exp_fifo_level++; 716*ca632f55SGrant Likely /* 717*ca632f55SGrant Likely * This inner reader takes care of things appearing in the RX 718*ca632f55SGrant Likely * FIFO as we're transmitting. This will happen a lot since the 719*ca632f55SGrant Likely * clock starts running when you put things into the TX FIFO, 720*ca632f55SGrant Likely * and then things are continuously clocked into the RX FIFO. 721*ca632f55SGrant Likely */ 722*ca632f55SGrant Likely while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) 723*ca632f55SGrant Likely && (pl022->rx < pl022->rx_end)) { 724*ca632f55SGrant Likely switch (pl022->read) { 725*ca632f55SGrant Likely case READING_NULL: 726*ca632f55SGrant Likely readw(SSP_DR(pl022->virtbase)); 727*ca632f55SGrant Likely break; 728*ca632f55SGrant Likely case READING_U8: 729*ca632f55SGrant Likely *(u8 *) (pl022->rx) = 730*ca632f55SGrant Likely readw(SSP_DR(pl022->virtbase)) & 0xFFU; 731*ca632f55SGrant Likely break; 732*ca632f55SGrant Likely case READING_U16: 733*ca632f55SGrant Likely *(u16 *) (pl022->rx) = 734*ca632f55SGrant Likely (u16) readw(SSP_DR(pl022->virtbase)); 735*ca632f55SGrant Likely break; 736*ca632f55SGrant Likely case READING_U32: 737*ca632f55SGrant Likely *(u32 *) (pl022->rx) = 738*ca632f55SGrant Likely readl(SSP_DR(pl022->virtbase)); 739*ca632f55SGrant Likely break; 740*ca632f55SGrant Likely } 741*ca632f55SGrant Likely pl022->rx += (pl022->cur_chip->n_bytes); 742*ca632f55SGrant Likely pl022->exp_fifo_level--; 743*ca632f55SGrant Likely } 744*ca632f55SGrant Likely } 745*ca632f55SGrant Likely /* 746*ca632f55SGrant Likely * When we exit here the TX FIFO should be full and the RX FIFO 747*ca632f55SGrant Likely * should be empty 748*ca632f55SGrant Likely */ 749*ca632f55SGrant Likely } 750*ca632f55SGrant Likely 751*ca632f55SGrant Likely 752*ca632f55SGrant Likely /** 753*ca632f55SGrant Likely * next_transfer - Move to the Next transfer in the current spi message 754*ca632f55SGrant Likely * @pl022: SSP driver private data structure 755*ca632f55SGrant Likely * 756*ca632f55SGrant Likely * This function moves though the linked list of spi transfers in the 757*ca632f55SGrant Likely * current spi message and returns with the state of current spi 758*ca632f55SGrant Likely * message i.e whether its last transfer is done(STATE_DONE) or 759*ca632f55SGrant Likely * Next transfer is ready(STATE_RUNNING) 760*ca632f55SGrant Likely */ 761*ca632f55SGrant Likely static void *next_transfer(struct pl022 *pl022) 762*ca632f55SGrant Likely { 763*ca632f55SGrant Likely struct spi_message *msg = pl022->cur_msg; 764*ca632f55SGrant Likely struct spi_transfer *trans = pl022->cur_transfer; 765*ca632f55SGrant Likely 766*ca632f55SGrant Likely /* Move to next transfer */ 767*ca632f55SGrant Likely if (trans->transfer_list.next != &msg->transfers) { 768*ca632f55SGrant Likely pl022->cur_transfer = 769*ca632f55SGrant Likely list_entry(trans->transfer_list.next, 770*ca632f55SGrant Likely struct spi_transfer, transfer_list); 771*ca632f55SGrant Likely return STATE_RUNNING; 772*ca632f55SGrant Likely } 773*ca632f55SGrant Likely return STATE_DONE; 774*ca632f55SGrant Likely } 775*ca632f55SGrant Likely 776*ca632f55SGrant Likely /* 777*ca632f55SGrant Likely * This DMA functionality is only compiled in if we have 778*ca632f55SGrant Likely * access to the generic DMA devices/DMA engine. 779*ca632f55SGrant Likely */ 780*ca632f55SGrant Likely #ifdef CONFIG_DMA_ENGINE 781*ca632f55SGrant Likely static void unmap_free_dma_scatter(struct pl022 *pl022) 782*ca632f55SGrant Likely { 783*ca632f55SGrant Likely /* Unmap and free the SG tables */ 784*ca632f55SGrant Likely dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, 785*ca632f55SGrant Likely pl022->sgt_tx.nents, DMA_TO_DEVICE); 786*ca632f55SGrant Likely dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, 787*ca632f55SGrant Likely pl022->sgt_rx.nents, DMA_FROM_DEVICE); 788*ca632f55SGrant Likely sg_free_table(&pl022->sgt_rx); 789*ca632f55SGrant Likely sg_free_table(&pl022->sgt_tx); 790*ca632f55SGrant Likely } 791*ca632f55SGrant Likely 792*ca632f55SGrant Likely static void dma_callback(void *data) 793*ca632f55SGrant Likely { 794*ca632f55SGrant Likely struct pl022 *pl022 = data; 795*ca632f55SGrant Likely struct spi_message *msg = pl022->cur_msg; 796*ca632f55SGrant Likely 797*ca632f55SGrant Likely BUG_ON(!pl022->sgt_rx.sgl); 798*ca632f55SGrant Likely 799*ca632f55SGrant Likely #ifdef VERBOSE_DEBUG 800*ca632f55SGrant Likely /* 801*ca632f55SGrant Likely * Optionally dump out buffers to inspect contents, this is 802*ca632f55SGrant Likely * good if you want to convince yourself that the loopback 803*ca632f55SGrant Likely * read/write contents are the same, when adopting to a new 804*ca632f55SGrant Likely * DMA engine. 805*ca632f55SGrant Likely */ 806*ca632f55SGrant Likely { 807*ca632f55SGrant Likely struct scatterlist *sg; 808*ca632f55SGrant Likely unsigned int i; 809*ca632f55SGrant Likely 810*ca632f55SGrant Likely dma_sync_sg_for_cpu(&pl022->adev->dev, 811*ca632f55SGrant Likely pl022->sgt_rx.sgl, 812*ca632f55SGrant Likely pl022->sgt_rx.nents, 813*ca632f55SGrant Likely DMA_FROM_DEVICE); 814*ca632f55SGrant Likely 815*ca632f55SGrant Likely for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { 816*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); 817*ca632f55SGrant Likely print_hex_dump(KERN_ERR, "SPI RX: ", 818*ca632f55SGrant Likely DUMP_PREFIX_OFFSET, 819*ca632f55SGrant Likely 16, 820*ca632f55SGrant Likely 1, 821*ca632f55SGrant Likely sg_virt(sg), 822*ca632f55SGrant Likely sg_dma_len(sg), 823*ca632f55SGrant Likely 1); 824*ca632f55SGrant Likely } 825*ca632f55SGrant Likely for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { 826*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); 827*ca632f55SGrant Likely print_hex_dump(KERN_ERR, "SPI TX: ", 828*ca632f55SGrant Likely DUMP_PREFIX_OFFSET, 829*ca632f55SGrant Likely 16, 830*ca632f55SGrant Likely 1, 831*ca632f55SGrant Likely sg_virt(sg), 832*ca632f55SGrant Likely sg_dma_len(sg), 833*ca632f55SGrant Likely 1); 834*ca632f55SGrant Likely } 835*ca632f55SGrant Likely } 836*ca632f55SGrant Likely #endif 837*ca632f55SGrant Likely 838*ca632f55SGrant Likely unmap_free_dma_scatter(pl022); 839*ca632f55SGrant Likely 840*ca632f55SGrant Likely /* Update total bytes transferred */ 841*ca632f55SGrant Likely msg->actual_length += pl022->cur_transfer->len; 842*ca632f55SGrant Likely if (pl022->cur_transfer->cs_change) 843*ca632f55SGrant Likely pl022->cur_chip-> 844*ca632f55SGrant Likely cs_control(SSP_CHIP_DESELECT); 845*ca632f55SGrant Likely 846*ca632f55SGrant Likely /* Move to next transfer */ 847*ca632f55SGrant Likely msg->state = next_transfer(pl022); 848*ca632f55SGrant Likely tasklet_schedule(&pl022->pump_transfers); 849*ca632f55SGrant Likely } 850*ca632f55SGrant Likely 851*ca632f55SGrant Likely static void setup_dma_scatter(struct pl022 *pl022, 852*ca632f55SGrant Likely void *buffer, 853*ca632f55SGrant Likely unsigned int length, 854*ca632f55SGrant Likely struct sg_table *sgtab) 855*ca632f55SGrant Likely { 856*ca632f55SGrant Likely struct scatterlist *sg; 857*ca632f55SGrant Likely int bytesleft = length; 858*ca632f55SGrant Likely void *bufp = buffer; 859*ca632f55SGrant Likely int mapbytes; 860*ca632f55SGrant Likely int i; 861*ca632f55SGrant Likely 862*ca632f55SGrant Likely if (buffer) { 863*ca632f55SGrant Likely for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { 864*ca632f55SGrant Likely /* 865*ca632f55SGrant Likely * If there are less bytes left than what fits 866*ca632f55SGrant Likely * in the current page (plus page alignment offset) 867*ca632f55SGrant Likely * we just feed in this, else we stuff in as much 868*ca632f55SGrant Likely * as we can. 869*ca632f55SGrant Likely */ 870*ca632f55SGrant Likely if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) 871*ca632f55SGrant Likely mapbytes = bytesleft; 872*ca632f55SGrant Likely else 873*ca632f55SGrant Likely mapbytes = PAGE_SIZE - offset_in_page(bufp); 874*ca632f55SGrant Likely sg_set_page(sg, virt_to_page(bufp), 875*ca632f55SGrant Likely mapbytes, offset_in_page(bufp)); 876*ca632f55SGrant Likely bufp += mapbytes; 877*ca632f55SGrant Likely bytesleft -= mapbytes; 878*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, 879*ca632f55SGrant Likely "set RX/TX target page @ %p, %d bytes, %d left\n", 880*ca632f55SGrant Likely bufp, mapbytes, bytesleft); 881*ca632f55SGrant Likely } 882*ca632f55SGrant Likely } else { 883*ca632f55SGrant Likely /* Map the dummy buffer on every page */ 884*ca632f55SGrant Likely for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { 885*ca632f55SGrant Likely if (bytesleft < PAGE_SIZE) 886*ca632f55SGrant Likely mapbytes = bytesleft; 887*ca632f55SGrant Likely else 888*ca632f55SGrant Likely mapbytes = PAGE_SIZE; 889*ca632f55SGrant Likely sg_set_page(sg, virt_to_page(pl022->dummypage), 890*ca632f55SGrant Likely mapbytes, 0); 891*ca632f55SGrant Likely bytesleft -= mapbytes; 892*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, 893*ca632f55SGrant Likely "set RX/TX to dummy page %d bytes, %d left\n", 894*ca632f55SGrant Likely mapbytes, bytesleft); 895*ca632f55SGrant Likely 896*ca632f55SGrant Likely } 897*ca632f55SGrant Likely } 898*ca632f55SGrant Likely BUG_ON(bytesleft); 899*ca632f55SGrant Likely } 900*ca632f55SGrant Likely 901*ca632f55SGrant Likely /** 902*ca632f55SGrant Likely * configure_dma - configures the channels for the next transfer 903*ca632f55SGrant Likely * @pl022: SSP driver's private data structure 904*ca632f55SGrant Likely */ 905*ca632f55SGrant Likely static int configure_dma(struct pl022 *pl022) 906*ca632f55SGrant Likely { 907*ca632f55SGrant Likely struct dma_slave_config rx_conf = { 908*ca632f55SGrant Likely .src_addr = SSP_DR(pl022->phybase), 909*ca632f55SGrant Likely .direction = DMA_FROM_DEVICE, 910*ca632f55SGrant Likely .src_maxburst = pl022->vendor->fifodepth >> 1, 911*ca632f55SGrant Likely }; 912*ca632f55SGrant Likely struct dma_slave_config tx_conf = { 913*ca632f55SGrant Likely .dst_addr = SSP_DR(pl022->phybase), 914*ca632f55SGrant Likely .direction = DMA_TO_DEVICE, 915*ca632f55SGrant Likely .dst_maxburst = pl022->vendor->fifodepth >> 1, 916*ca632f55SGrant Likely }; 917*ca632f55SGrant Likely unsigned int pages; 918*ca632f55SGrant Likely int ret; 919*ca632f55SGrant Likely int rx_sglen, tx_sglen; 920*ca632f55SGrant Likely struct dma_chan *rxchan = pl022->dma_rx_channel; 921*ca632f55SGrant Likely struct dma_chan *txchan = pl022->dma_tx_channel; 922*ca632f55SGrant Likely struct dma_async_tx_descriptor *rxdesc; 923*ca632f55SGrant Likely struct dma_async_tx_descriptor *txdesc; 924*ca632f55SGrant Likely 925*ca632f55SGrant Likely /* Check that the channels are available */ 926*ca632f55SGrant Likely if (!rxchan || !txchan) 927*ca632f55SGrant Likely return -ENODEV; 928*ca632f55SGrant Likely 929*ca632f55SGrant Likely switch (pl022->read) { 930*ca632f55SGrant Likely case READING_NULL: 931*ca632f55SGrant Likely /* Use the same as for writing */ 932*ca632f55SGrant Likely rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 933*ca632f55SGrant Likely break; 934*ca632f55SGrant Likely case READING_U8: 935*ca632f55SGrant Likely rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 936*ca632f55SGrant Likely break; 937*ca632f55SGrant Likely case READING_U16: 938*ca632f55SGrant Likely rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 939*ca632f55SGrant Likely break; 940*ca632f55SGrant Likely case READING_U32: 941*ca632f55SGrant Likely rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 942*ca632f55SGrant Likely break; 943*ca632f55SGrant Likely } 944*ca632f55SGrant Likely 945*ca632f55SGrant Likely switch (pl022->write) { 946*ca632f55SGrant Likely case WRITING_NULL: 947*ca632f55SGrant Likely /* Use the same as for reading */ 948*ca632f55SGrant Likely tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 949*ca632f55SGrant Likely break; 950*ca632f55SGrant Likely case WRITING_U8: 951*ca632f55SGrant Likely tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 952*ca632f55SGrant Likely break; 953*ca632f55SGrant Likely case WRITING_U16: 954*ca632f55SGrant Likely tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 955*ca632f55SGrant Likely break; 956*ca632f55SGrant Likely case WRITING_U32: 957*ca632f55SGrant Likely tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 958*ca632f55SGrant Likely break; 959*ca632f55SGrant Likely } 960*ca632f55SGrant Likely 961*ca632f55SGrant Likely /* SPI pecularity: we need to read and write the same width */ 962*ca632f55SGrant Likely if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 963*ca632f55SGrant Likely rx_conf.src_addr_width = tx_conf.dst_addr_width; 964*ca632f55SGrant Likely if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 965*ca632f55SGrant Likely tx_conf.dst_addr_width = rx_conf.src_addr_width; 966*ca632f55SGrant Likely BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); 967*ca632f55SGrant Likely 968*ca632f55SGrant Likely dmaengine_slave_config(rxchan, &rx_conf); 969*ca632f55SGrant Likely dmaengine_slave_config(txchan, &tx_conf); 970*ca632f55SGrant Likely 971*ca632f55SGrant Likely /* Create sglists for the transfers */ 972*ca632f55SGrant Likely pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; 973*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); 974*ca632f55SGrant Likely 975*ca632f55SGrant Likely ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); 976*ca632f55SGrant Likely if (ret) 977*ca632f55SGrant Likely goto err_alloc_rx_sg; 978*ca632f55SGrant Likely 979*ca632f55SGrant Likely ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); 980*ca632f55SGrant Likely if (ret) 981*ca632f55SGrant Likely goto err_alloc_tx_sg; 982*ca632f55SGrant Likely 983*ca632f55SGrant Likely /* Fill in the scatterlists for the RX+TX buffers */ 984*ca632f55SGrant Likely setup_dma_scatter(pl022, pl022->rx, 985*ca632f55SGrant Likely pl022->cur_transfer->len, &pl022->sgt_rx); 986*ca632f55SGrant Likely setup_dma_scatter(pl022, pl022->tx, 987*ca632f55SGrant Likely pl022->cur_transfer->len, &pl022->sgt_tx); 988*ca632f55SGrant Likely 989*ca632f55SGrant Likely /* Map DMA buffers */ 990*ca632f55SGrant Likely rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, 991*ca632f55SGrant Likely pl022->sgt_rx.nents, DMA_FROM_DEVICE); 992*ca632f55SGrant Likely if (!rx_sglen) 993*ca632f55SGrant Likely goto err_rx_sgmap; 994*ca632f55SGrant Likely 995*ca632f55SGrant Likely tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, 996*ca632f55SGrant Likely pl022->sgt_tx.nents, DMA_TO_DEVICE); 997*ca632f55SGrant Likely if (!tx_sglen) 998*ca632f55SGrant Likely goto err_tx_sgmap; 999*ca632f55SGrant Likely 1000*ca632f55SGrant Likely /* Send both scatterlists */ 1001*ca632f55SGrant Likely rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 1002*ca632f55SGrant Likely pl022->sgt_rx.sgl, 1003*ca632f55SGrant Likely rx_sglen, 1004*ca632f55SGrant Likely DMA_FROM_DEVICE, 1005*ca632f55SGrant Likely DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1006*ca632f55SGrant Likely if (!rxdesc) 1007*ca632f55SGrant Likely goto err_rxdesc; 1008*ca632f55SGrant Likely 1009*ca632f55SGrant Likely txdesc = txchan->device->device_prep_slave_sg(txchan, 1010*ca632f55SGrant Likely pl022->sgt_tx.sgl, 1011*ca632f55SGrant Likely tx_sglen, 1012*ca632f55SGrant Likely DMA_TO_DEVICE, 1013*ca632f55SGrant Likely DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1014*ca632f55SGrant Likely if (!txdesc) 1015*ca632f55SGrant Likely goto err_txdesc; 1016*ca632f55SGrant Likely 1017*ca632f55SGrant Likely /* Put the callback on the RX transfer only, that should finish last */ 1018*ca632f55SGrant Likely rxdesc->callback = dma_callback; 1019*ca632f55SGrant Likely rxdesc->callback_param = pl022; 1020*ca632f55SGrant Likely 1021*ca632f55SGrant Likely /* Submit and fire RX and TX with TX last so we're ready to read! */ 1022*ca632f55SGrant Likely dmaengine_submit(rxdesc); 1023*ca632f55SGrant Likely dmaengine_submit(txdesc); 1024*ca632f55SGrant Likely dma_async_issue_pending(rxchan); 1025*ca632f55SGrant Likely dma_async_issue_pending(txchan); 1026*ca632f55SGrant Likely 1027*ca632f55SGrant Likely return 0; 1028*ca632f55SGrant Likely 1029*ca632f55SGrant Likely err_txdesc: 1030*ca632f55SGrant Likely dmaengine_terminate_all(txchan); 1031*ca632f55SGrant Likely err_rxdesc: 1032*ca632f55SGrant Likely dmaengine_terminate_all(rxchan); 1033*ca632f55SGrant Likely dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, 1034*ca632f55SGrant Likely pl022->sgt_tx.nents, DMA_TO_DEVICE); 1035*ca632f55SGrant Likely err_tx_sgmap: 1036*ca632f55SGrant Likely dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, 1037*ca632f55SGrant Likely pl022->sgt_tx.nents, DMA_FROM_DEVICE); 1038*ca632f55SGrant Likely err_rx_sgmap: 1039*ca632f55SGrant Likely sg_free_table(&pl022->sgt_tx); 1040*ca632f55SGrant Likely err_alloc_tx_sg: 1041*ca632f55SGrant Likely sg_free_table(&pl022->sgt_rx); 1042*ca632f55SGrant Likely err_alloc_rx_sg: 1043*ca632f55SGrant Likely return -ENOMEM; 1044*ca632f55SGrant Likely } 1045*ca632f55SGrant Likely 1046*ca632f55SGrant Likely static int __init pl022_dma_probe(struct pl022 *pl022) 1047*ca632f55SGrant Likely { 1048*ca632f55SGrant Likely dma_cap_mask_t mask; 1049*ca632f55SGrant Likely 1050*ca632f55SGrant Likely /* Try to acquire a generic DMA engine slave channel */ 1051*ca632f55SGrant Likely dma_cap_zero(mask); 1052*ca632f55SGrant Likely dma_cap_set(DMA_SLAVE, mask); 1053*ca632f55SGrant Likely /* 1054*ca632f55SGrant Likely * We need both RX and TX channels to do DMA, else do none 1055*ca632f55SGrant Likely * of them. 1056*ca632f55SGrant Likely */ 1057*ca632f55SGrant Likely pl022->dma_rx_channel = dma_request_channel(mask, 1058*ca632f55SGrant Likely pl022->master_info->dma_filter, 1059*ca632f55SGrant Likely pl022->master_info->dma_rx_param); 1060*ca632f55SGrant Likely if (!pl022->dma_rx_channel) { 1061*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n"); 1062*ca632f55SGrant Likely goto err_no_rxchan; 1063*ca632f55SGrant Likely } 1064*ca632f55SGrant Likely 1065*ca632f55SGrant Likely pl022->dma_tx_channel = dma_request_channel(mask, 1066*ca632f55SGrant Likely pl022->master_info->dma_filter, 1067*ca632f55SGrant Likely pl022->master_info->dma_tx_param); 1068*ca632f55SGrant Likely if (!pl022->dma_tx_channel) { 1069*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n"); 1070*ca632f55SGrant Likely goto err_no_txchan; 1071*ca632f55SGrant Likely } 1072*ca632f55SGrant Likely 1073*ca632f55SGrant Likely pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); 1074*ca632f55SGrant Likely if (!pl022->dummypage) { 1075*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); 1076*ca632f55SGrant Likely goto err_no_dummypage; 1077*ca632f55SGrant Likely } 1078*ca632f55SGrant Likely 1079*ca632f55SGrant Likely dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", 1080*ca632f55SGrant Likely dma_chan_name(pl022->dma_rx_channel), 1081*ca632f55SGrant Likely dma_chan_name(pl022->dma_tx_channel)); 1082*ca632f55SGrant Likely 1083*ca632f55SGrant Likely return 0; 1084*ca632f55SGrant Likely 1085*ca632f55SGrant Likely err_no_dummypage: 1086*ca632f55SGrant Likely dma_release_channel(pl022->dma_tx_channel); 1087*ca632f55SGrant Likely err_no_txchan: 1088*ca632f55SGrant Likely dma_release_channel(pl022->dma_rx_channel); 1089*ca632f55SGrant Likely pl022->dma_rx_channel = NULL; 1090*ca632f55SGrant Likely err_no_rxchan: 1091*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1092*ca632f55SGrant Likely "Failed to work in dma mode, work without dma!\n"); 1093*ca632f55SGrant Likely return -ENODEV; 1094*ca632f55SGrant Likely } 1095*ca632f55SGrant Likely 1096*ca632f55SGrant Likely static void terminate_dma(struct pl022 *pl022) 1097*ca632f55SGrant Likely { 1098*ca632f55SGrant Likely struct dma_chan *rxchan = pl022->dma_rx_channel; 1099*ca632f55SGrant Likely struct dma_chan *txchan = pl022->dma_tx_channel; 1100*ca632f55SGrant Likely 1101*ca632f55SGrant Likely dmaengine_terminate_all(rxchan); 1102*ca632f55SGrant Likely dmaengine_terminate_all(txchan); 1103*ca632f55SGrant Likely unmap_free_dma_scatter(pl022); 1104*ca632f55SGrant Likely } 1105*ca632f55SGrant Likely 1106*ca632f55SGrant Likely static void pl022_dma_remove(struct pl022 *pl022) 1107*ca632f55SGrant Likely { 1108*ca632f55SGrant Likely if (pl022->busy) 1109*ca632f55SGrant Likely terminate_dma(pl022); 1110*ca632f55SGrant Likely if (pl022->dma_tx_channel) 1111*ca632f55SGrant Likely dma_release_channel(pl022->dma_tx_channel); 1112*ca632f55SGrant Likely if (pl022->dma_rx_channel) 1113*ca632f55SGrant Likely dma_release_channel(pl022->dma_rx_channel); 1114*ca632f55SGrant Likely kfree(pl022->dummypage); 1115*ca632f55SGrant Likely } 1116*ca632f55SGrant Likely 1117*ca632f55SGrant Likely #else 1118*ca632f55SGrant Likely static inline int configure_dma(struct pl022 *pl022) 1119*ca632f55SGrant Likely { 1120*ca632f55SGrant Likely return -ENODEV; 1121*ca632f55SGrant Likely } 1122*ca632f55SGrant Likely 1123*ca632f55SGrant Likely static inline int pl022_dma_probe(struct pl022 *pl022) 1124*ca632f55SGrant Likely { 1125*ca632f55SGrant Likely return 0; 1126*ca632f55SGrant Likely } 1127*ca632f55SGrant Likely 1128*ca632f55SGrant Likely static inline void pl022_dma_remove(struct pl022 *pl022) 1129*ca632f55SGrant Likely { 1130*ca632f55SGrant Likely } 1131*ca632f55SGrant Likely #endif 1132*ca632f55SGrant Likely 1133*ca632f55SGrant Likely /** 1134*ca632f55SGrant Likely * pl022_interrupt_handler - Interrupt handler for SSP controller 1135*ca632f55SGrant Likely * 1136*ca632f55SGrant Likely * This function handles interrupts generated for an interrupt based transfer. 1137*ca632f55SGrant Likely * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the 1138*ca632f55SGrant Likely * current message's state as STATE_ERROR and schedule the tasklet 1139*ca632f55SGrant Likely * pump_transfers which will do the postprocessing of the current message by 1140*ca632f55SGrant Likely * calling giveback(). Otherwise it reads data from RX FIFO till there is no 1141*ca632f55SGrant Likely * more data, and writes data in TX FIFO till it is not full. If we complete 1142*ca632f55SGrant Likely * the transfer we move to the next transfer and schedule the tasklet. 1143*ca632f55SGrant Likely */ 1144*ca632f55SGrant Likely static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) 1145*ca632f55SGrant Likely { 1146*ca632f55SGrant Likely struct pl022 *pl022 = dev_id; 1147*ca632f55SGrant Likely struct spi_message *msg = pl022->cur_msg; 1148*ca632f55SGrant Likely u16 irq_status = 0; 1149*ca632f55SGrant Likely u16 flag = 0; 1150*ca632f55SGrant Likely 1151*ca632f55SGrant Likely if (unlikely(!msg)) { 1152*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1153*ca632f55SGrant Likely "bad message state in interrupt handler"); 1154*ca632f55SGrant Likely /* Never fail */ 1155*ca632f55SGrant Likely return IRQ_HANDLED; 1156*ca632f55SGrant Likely } 1157*ca632f55SGrant Likely 1158*ca632f55SGrant Likely /* Read the Interrupt Status Register */ 1159*ca632f55SGrant Likely irq_status = readw(SSP_MIS(pl022->virtbase)); 1160*ca632f55SGrant Likely 1161*ca632f55SGrant Likely if (unlikely(!irq_status)) 1162*ca632f55SGrant Likely return IRQ_NONE; 1163*ca632f55SGrant Likely 1164*ca632f55SGrant Likely /* 1165*ca632f55SGrant Likely * This handles the FIFO interrupts, the timeout 1166*ca632f55SGrant Likely * interrupts are flatly ignored, they cannot be 1167*ca632f55SGrant Likely * trusted. 1168*ca632f55SGrant Likely */ 1169*ca632f55SGrant Likely if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { 1170*ca632f55SGrant Likely /* 1171*ca632f55SGrant Likely * Overrun interrupt - bail out since our Data has been 1172*ca632f55SGrant Likely * corrupted 1173*ca632f55SGrant Likely */ 1174*ca632f55SGrant Likely dev_err(&pl022->adev->dev, "FIFO overrun\n"); 1175*ca632f55SGrant Likely if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) 1176*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1177*ca632f55SGrant Likely "RXFIFO is full\n"); 1178*ca632f55SGrant Likely if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) 1179*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1180*ca632f55SGrant Likely "TXFIFO is full\n"); 1181*ca632f55SGrant Likely 1182*ca632f55SGrant Likely /* 1183*ca632f55SGrant Likely * Disable and clear interrupts, disable SSP, 1184*ca632f55SGrant Likely * mark message with bad status so it can be 1185*ca632f55SGrant Likely * retried. 1186*ca632f55SGrant Likely */ 1187*ca632f55SGrant Likely writew(DISABLE_ALL_INTERRUPTS, 1188*ca632f55SGrant Likely SSP_IMSC(pl022->virtbase)); 1189*ca632f55SGrant Likely writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 1190*ca632f55SGrant Likely writew((readw(SSP_CR1(pl022->virtbase)) & 1191*ca632f55SGrant Likely (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); 1192*ca632f55SGrant Likely msg->state = STATE_ERROR; 1193*ca632f55SGrant Likely 1194*ca632f55SGrant Likely /* Schedule message queue handler */ 1195*ca632f55SGrant Likely tasklet_schedule(&pl022->pump_transfers); 1196*ca632f55SGrant Likely return IRQ_HANDLED; 1197*ca632f55SGrant Likely } 1198*ca632f55SGrant Likely 1199*ca632f55SGrant Likely readwriter(pl022); 1200*ca632f55SGrant Likely 1201*ca632f55SGrant Likely if ((pl022->tx == pl022->tx_end) && (flag == 0)) { 1202*ca632f55SGrant Likely flag = 1; 1203*ca632f55SGrant Likely /* Disable Transmit interrupt */ 1204*ca632f55SGrant Likely writew(readw(SSP_IMSC(pl022->virtbase)) & 1205*ca632f55SGrant Likely (~SSP_IMSC_MASK_TXIM), 1206*ca632f55SGrant Likely SSP_IMSC(pl022->virtbase)); 1207*ca632f55SGrant Likely } 1208*ca632f55SGrant Likely 1209*ca632f55SGrant Likely /* 1210*ca632f55SGrant Likely * Since all transactions must write as much as shall be read, 1211*ca632f55SGrant Likely * we can conclude the entire transaction once RX is complete. 1212*ca632f55SGrant Likely * At this point, all TX will always be finished. 1213*ca632f55SGrant Likely */ 1214*ca632f55SGrant Likely if (pl022->rx >= pl022->rx_end) { 1215*ca632f55SGrant Likely writew(DISABLE_ALL_INTERRUPTS, 1216*ca632f55SGrant Likely SSP_IMSC(pl022->virtbase)); 1217*ca632f55SGrant Likely writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 1218*ca632f55SGrant Likely if (unlikely(pl022->rx > pl022->rx_end)) { 1219*ca632f55SGrant Likely dev_warn(&pl022->adev->dev, "read %u surplus " 1220*ca632f55SGrant Likely "bytes (did you request an odd " 1221*ca632f55SGrant Likely "number of bytes on a 16bit bus?)\n", 1222*ca632f55SGrant Likely (u32) (pl022->rx - pl022->rx_end)); 1223*ca632f55SGrant Likely } 1224*ca632f55SGrant Likely /* Update total bytes transferred */ 1225*ca632f55SGrant Likely msg->actual_length += pl022->cur_transfer->len; 1226*ca632f55SGrant Likely if (pl022->cur_transfer->cs_change) 1227*ca632f55SGrant Likely pl022->cur_chip-> 1228*ca632f55SGrant Likely cs_control(SSP_CHIP_DESELECT); 1229*ca632f55SGrant Likely /* Move to next transfer */ 1230*ca632f55SGrant Likely msg->state = next_transfer(pl022); 1231*ca632f55SGrant Likely tasklet_schedule(&pl022->pump_transfers); 1232*ca632f55SGrant Likely return IRQ_HANDLED; 1233*ca632f55SGrant Likely } 1234*ca632f55SGrant Likely 1235*ca632f55SGrant Likely return IRQ_HANDLED; 1236*ca632f55SGrant Likely } 1237*ca632f55SGrant Likely 1238*ca632f55SGrant Likely /** 1239*ca632f55SGrant Likely * This sets up the pointers to memory for the next message to 1240*ca632f55SGrant Likely * send out on the SPI bus. 1241*ca632f55SGrant Likely */ 1242*ca632f55SGrant Likely static int set_up_next_transfer(struct pl022 *pl022, 1243*ca632f55SGrant Likely struct spi_transfer *transfer) 1244*ca632f55SGrant Likely { 1245*ca632f55SGrant Likely int residue; 1246*ca632f55SGrant Likely 1247*ca632f55SGrant Likely /* Sanity check the message for this bus width */ 1248*ca632f55SGrant Likely residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes; 1249*ca632f55SGrant Likely if (unlikely(residue != 0)) { 1250*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1251*ca632f55SGrant Likely "message of %u bytes to transmit but the current " 1252*ca632f55SGrant Likely "chip bus has a data width of %u bytes!\n", 1253*ca632f55SGrant Likely pl022->cur_transfer->len, 1254*ca632f55SGrant Likely pl022->cur_chip->n_bytes); 1255*ca632f55SGrant Likely dev_err(&pl022->adev->dev, "skipping this message\n"); 1256*ca632f55SGrant Likely return -EIO; 1257*ca632f55SGrant Likely } 1258*ca632f55SGrant Likely pl022->tx = (void *)transfer->tx_buf; 1259*ca632f55SGrant Likely pl022->tx_end = pl022->tx + pl022->cur_transfer->len; 1260*ca632f55SGrant Likely pl022->rx = (void *)transfer->rx_buf; 1261*ca632f55SGrant Likely pl022->rx_end = pl022->rx + pl022->cur_transfer->len; 1262*ca632f55SGrant Likely pl022->write = 1263*ca632f55SGrant Likely pl022->tx ? pl022->cur_chip->write : WRITING_NULL; 1264*ca632f55SGrant Likely pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL; 1265*ca632f55SGrant Likely return 0; 1266*ca632f55SGrant Likely } 1267*ca632f55SGrant Likely 1268*ca632f55SGrant Likely /** 1269*ca632f55SGrant Likely * pump_transfers - Tasklet function which schedules next transfer 1270*ca632f55SGrant Likely * when running in interrupt or DMA transfer mode. 1271*ca632f55SGrant Likely * @data: SSP driver private data structure 1272*ca632f55SGrant Likely * 1273*ca632f55SGrant Likely */ 1274*ca632f55SGrant Likely static void pump_transfers(unsigned long data) 1275*ca632f55SGrant Likely { 1276*ca632f55SGrant Likely struct pl022 *pl022 = (struct pl022 *) data; 1277*ca632f55SGrant Likely struct spi_message *message = NULL; 1278*ca632f55SGrant Likely struct spi_transfer *transfer = NULL; 1279*ca632f55SGrant Likely struct spi_transfer *previous = NULL; 1280*ca632f55SGrant Likely 1281*ca632f55SGrant Likely /* Get current state information */ 1282*ca632f55SGrant Likely message = pl022->cur_msg; 1283*ca632f55SGrant Likely transfer = pl022->cur_transfer; 1284*ca632f55SGrant Likely 1285*ca632f55SGrant Likely /* Handle for abort */ 1286*ca632f55SGrant Likely if (message->state == STATE_ERROR) { 1287*ca632f55SGrant Likely message->status = -EIO; 1288*ca632f55SGrant Likely giveback(pl022); 1289*ca632f55SGrant Likely return; 1290*ca632f55SGrant Likely } 1291*ca632f55SGrant Likely 1292*ca632f55SGrant Likely /* Handle end of message */ 1293*ca632f55SGrant Likely if (message->state == STATE_DONE) { 1294*ca632f55SGrant Likely message->status = 0; 1295*ca632f55SGrant Likely giveback(pl022); 1296*ca632f55SGrant Likely return; 1297*ca632f55SGrant Likely } 1298*ca632f55SGrant Likely 1299*ca632f55SGrant Likely /* Delay if requested at end of transfer before CS change */ 1300*ca632f55SGrant Likely if (message->state == STATE_RUNNING) { 1301*ca632f55SGrant Likely previous = list_entry(transfer->transfer_list.prev, 1302*ca632f55SGrant Likely struct spi_transfer, 1303*ca632f55SGrant Likely transfer_list); 1304*ca632f55SGrant Likely if (previous->delay_usecs) 1305*ca632f55SGrant Likely /* 1306*ca632f55SGrant Likely * FIXME: This runs in interrupt context. 1307*ca632f55SGrant Likely * Is this really smart? 1308*ca632f55SGrant Likely */ 1309*ca632f55SGrant Likely udelay(previous->delay_usecs); 1310*ca632f55SGrant Likely 1311*ca632f55SGrant Likely /* Drop chip select only if cs_change is requested */ 1312*ca632f55SGrant Likely if (previous->cs_change) 1313*ca632f55SGrant Likely pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1314*ca632f55SGrant Likely } else { 1315*ca632f55SGrant Likely /* STATE_START */ 1316*ca632f55SGrant Likely message->state = STATE_RUNNING; 1317*ca632f55SGrant Likely } 1318*ca632f55SGrant Likely 1319*ca632f55SGrant Likely if (set_up_next_transfer(pl022, transfer)) { 1320*ca632f55SGrant Likely message->state = STATE_ERROR; 1321*ca632f55SGrant Likely message->status = -EIO; 1322*ca632f55SGrant Likely giveback(pl022); 1323*ca632f55SGrant Likely return; 1324*ca632f55SGrant Likely } 1325*ca632f55SGrant Likely /* Flush the FIFOs and let's go! */ 1326*ca632f55SGrant Likely flush(pl022); 1327*ca632f55SGrant Likely 1328*ca632f55SGrant Likely if (pl022->cur_chip->enable_dma) { 1329*ca632f55SGrant Likely if (configure_dma(pl022)) { 1330*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, 1331*ca632f55SGrant Likely "configuration of DMA failed, fall back to interrupt mode\n"); 1332*ca632f55SGrant Likely goto err_config_dma; 1333*ca632f55SGrant Likely } 1334*ca632f55SGrant Likely return; 1335*ca632f55SGrant Likely } 1336*ca632f55SGrant Likely 1337*ca632f55SGrant Likely err_config_dma: 1338*ca632f55SGrant Likely writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 1339*ca632f55SGrant Likely } 1340*ca632f55SGrant Likely 1341*ca632f55SGrant Likely static void do_interrupt_dma_transfer(struct pl022 *pl022) 1342*ca632f55SGrant Likely { 1343*ca632f55SGrant Likely u32 irqflags = ENABLE_ALL_INTERRUPTS; 1344*ca632f55SGrant Likely 1345*ca632f55SGrant Likely /* Enable target chip */ 1346*ca632f55SGrant Likely pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1347*ca632f55SGrant Likely if (set_up_next_transfer(pl022, pl022->cur_transfer)) { 1348*ca632f55SGrant Likely /* Error path */ 1349*ca632f55SGrant Likely pl022->cur_msg->state = STATE_ERROR; 1350*ca632f55SGrant Likely pl022->cur_msg->status = -EIO; 1351*ca632f55SGrant Likely giveback(pl022); 1352*ca632f55SGrant Likely return; 1353*ca632f55SGrant Likely } 1354*ca632f55SGrant Likely /* If we're using DMA, set up DMA here */ 1355*ca632f55SGrant Likely if (pl022->cur_chip->enable_dma) { 1356*ca632f55SGrant Likely /* Configure DMA transfer */ 1357*ca632f55SGrant Likely if (configure_dma(pl022)) { 1358*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, 1359*ca632f55SGrant Likely "configuration of DMA failed, fall back to interrupt mode\n"); 1360*ca632f55SGrant Likely goto err_config_dma; 1361*ca632f55SGrant Likely } 1362*ca632f55SGrant Likely /* Disable interrupts in DMA mode, IRQ from DMA controller */ 1363*ca632f55SGrant Likely irqflags = DISABLE_ALL_INTERRUPTS; 1364*ca632f55SGrant Likely } 1365*ca632f55SGrant Likely err_config_dma: 1366*ca632f55SGrant Likely /* Enable SSP, turn on interrupts */ 1367*ca632f55SGrant Likely writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), 1368*ca632f55SGrant Likely SSP_CR1(pl022->virtbase)); 1369*ca632f55SGrant Likely writew(irqflags, SSP_IMSC(pl022->virtbase)); 1370*ca632f55SGrant Likely } 1371*ca632f55SGrant Likely 1372*ca632f55SGrant Likely static void do_polling_transfer(struct pl022 *pl022) 1373*ca632f55SGrant Likely { 1374*ca632f55SGrant Likely struct spi_message *message = NULL; 1375*ca632f55SGrant Likely struct spi_transfer *transfer = NULL; 1376*ca632f55SGrant Likely struct spi_transfer *previous = NULL; 1377*ca632f55SGrant Likely struct chip_data *chip; 1378*ca632f55SGrant Likely unsigned long time, timeout; 1379*ca632f55SGrant Likely 1380*ca632f55SGrant Likely chip = pl022->cur_chip; 1381*ca632f55SGrant Likely message = pl022->cur_msg; 1382*ca632f55SGrant Likely 1383*ca632f55SGrant Likely while (message->state != STATE_DONE) { 1384*ca632f55SGrant Likely /* Handle for abort */ 1385*ca632f55SGrant Likely if (message->state == STATE_ERROR) 1386*ca632f55SGrant Likely break; 1387*ca632f55SGrant Likely transfer = pl022->cur_transfer; 1388*ca632f55SGrant Likely 1389*ca632f55SGrant Likely /* Delay if requested at end of transfer */ 1390*ca632f55SGrant Likely if (message->state == STATE_RUNNING) { 1391*ca632f55SGrant Likely previous = 1392*ca632f55SGrant Likely list_entry(transfer->transfer_list.prev, 1393*ca632f55SGrant Likely struct spi_transfer, transfer_list); 1394*ca632f55SGrant Likely if (previous->delay_usecs) 1395*ca632f55SGrant Likely udelay(previous->delay_usecs); 1396*ca632f55SGrant Likely if (previous->cs_change) 1397*ca632f55SGrant Likely pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1398*ca632f55SGrant Likely } else { 1399*ca632f55SGrant Likely /* STATE_START */ 1400*ca632f55SGrant Likely message->state = STATE_RUNNING; 1401*ca632f55SGrant Likely pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1402*ca632f55SGrant Likely } 1403*ca632f55SGrant Likely 1404*ca632f55SGrant Likely /* Configuration Changing Per Transfer */ 1405*ca632f55SGrant Likely if (set_up_next_transfer(pl022, transfer)) { 1406*ca632f55SGrant Likely /* Error path */ 1407*ca632f55SGrant Likely message->state = STATE_ERROR; 1408*ca632f55SGrant Likely break; 1409*ca632f55SGrant Likely } 1410*ca632f55SGrant Likely /* Flush FIFOs and enable SSP */ 1411*ca632f55SGrant Likely flush(pl022); 1412*ca632f55SGrant Likely writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), 1413*ca632f55SGrant Likely SSP_CR1(pl022->virtbase)); 1414*ca632f55SGrant Likely 1415*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); 1416*ca632f55SGrant Likely 1417*ca632f55SGrant Likely timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); 1418*ca632f55SGrant Likely while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { 1419*ca632f55SGrant Likely time = jiffies; 1420*ca632f55SGrant Likely readwriter(pl022); 1421*ca632f55SGrant Likely if (time_after(time, timeout)) { 1422*ca632f55SGrant Likely dev_warn(&pl022->adev->dev, 1423*ca632f55SGrant Likely "%s: timeout!\n", __func__); 1424*ca632f55SGrant Likely message->state = STATE_ERROR; 1425*ca632f55SGrant Likely goto out; 1426*ca632f55SGrant Likely } 1427*ca632f55SGrant Likely cpu_relax(); 1428*ca632f55SGrant Likely } 1429*ca632f55SGrant Likely 1430*ca632f55SGrant Likely /* Update total byte transferred */ 1431*ca632f55SGrant Likely message->actual_length += pl022->cur_transfer->len; 1432*ca632f55SGrant Likely if (pl022->cur_transfer->cs_change) 1433*ca632f55SGrant Likely pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); 1434*ca632f55SGrant Likely /* Move to next transfer */ 1435*ca632f55SGrant Likely message->state = next_transfer(pl022); 1436*ca632f55SGrant Likely } 1437*ca632f55SGrant Likely out: 1438*ca632f55SGrant Likely /* Handle end of message */ 1439*ca632f55SGrant Likely if (message->state == STATE_DONE) 1440*ca632f55SGrant Likely message->status = 0; 1441*ca632f55SGrant Likely else 1442*ca632f55SGrant Likely message->status = -EIO; 1443*ca632f55SGrant Likely 1444*ca632f55SGrant Likely giveback(pl022); 1445*ca632f55SGrant Likely return; 1446*ca632f55SGrant Likely } 1447*ca632f55SGrant Likely 1448*ca632f55SGrant Likely /** 1449*ca632f55SGrant Likely * pump_messages - Workqueue function which processes spi message queue 1450*ca632f55SGrant Likely * @data: pointer to private data of SSP driver 1451*ca632f55SGrant Likely * 1452*ca632f55SGrant Likely * This function checks if there is any spi message in the queue that 1453*ca632f55SGrant Likely * needs processing and delegate control to appropriate function 1454*ca632f55SGrant Likely * do_polling_transfer()/do_interrupt_dma_transfer() 1455*ca632f55SGrant Likely * based on the kind of the transfer 1456*ca632f55SGrant Likely * 1457*ca632f55SGrant Likely */ 1458*ca632f55SGrant Likely static void pump_messages(struct work_struct *work) 1459*ca632f55SGrant Likely { 1460*ca632f55SGrant Likely struct pl022 *pl022 = 1461*ca632f55SGrant Likely container_of(work, struct pl022, pump_messages); 1462*ca632f55SGrant Likely unsigned long flags; 1463*ca632f55SGrant Likely 1464*ca632f55SGrant Likely /* Lock queue and check for queue work */ 1465*ca632f55SGrant Likely spin_lock_irqsave(&pl022->queue_lock, flags); 1466*ca632f55SGrant Likely if (list_empty(&pl022->queue) || !pl022->running) { 1467*ca632f55SGrant Likely pl022->busy = false; 1468*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 1469*ca632f55SGrant Likely return; 1470*ca632f55SGrant Likely } 1471*ca632f55SGrant Likely /* Make sure we are not already running a message */ 1472*ca632f55SGrant Likely if (pl022->cur_msg) { 1473*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 1474*ca632f55SGrant Likely return; 1475*ca632f55SGrant Likely } 1476*ca632f55SGrant Likely /* Extract head of queue */ 1477*ca632f55SGrant Likely pl022->cur_msg = 1478*ca632f55SGrant Likely list_entry(pl022->queue.next, struct spi_message, queue); 1479*ca632f55SGrant Likely 1480*ca632f55SGrant Likely list_del_init(&pl022->cur_msg->queue); 1481*ca632f55SGrant Likely pl022->busy = true; 1482*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 1483*ca632f55SGrant Likely 1484*ca632f55SGrant Likely /* Initial message state */ 1485*ca632f55SGrant Likely pl022->cur_msg->state = STATE_START; 1486*ca632f55SGrant Likely pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, 1487*ca632f55SGrant Likely struct spi_transfer, 1488*ca632f55SGrant Likely transfer_list); 1489*ca632f55SGrant Likely 1490*ca632f55SGrant Likely /* Setup the SPI using the per chip configuration */ 1491*ca632f55SGrant Likely pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); 1492*ca632f55SGrant Likely /* 1493*ca632f55SGrant Likely * We enable the core voltage and clocks here, then the clocks 1494*ca632f55SGrant Likely * and core will be disabled when giveback() is called in each method 1495*ca632f55SGrant Likely * (poll/interrupt/DMA) 1496*ca632f55SGrant Likely */ 1497*ca632f55SGrant Likely amba_vcore_enable(pl022->adev); 1498*ca632f55SGrant Likely amba_pclk_enable(pl022->adev); 1499*ca632f55SGrant Likely clk_enable(pl022->clk); 1500*ca632f55SGrant Likely restore_state(pl022); 1501*ca632f55SGrant Likely flush(pl022); 1502*ca632f55SGrant Likely 1503*ca632f55SGrant Likely if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) 1504*ca632f55SGrant Likely do_polling_transfer(pl022); 1505*ca632f55SGrant Likely else 1506*ca632f55SGrant Likely do_interrupt_dma_transfer(pl022); 1507*ca632f55SGrant Likely } 1508*ca632f55SGrant Likely 1509*ca632f55SGrant Likely 1510*ca632f55SGrant Likely static int __init init_queue(struct pl022 *pl022) 1511*ca632f55SGrant Likely { 1512*ca632f55SGrant Likely INIT_LIST_HEAD(&pl022->queue); 1513*ca632f55SGrant Likely spin_lock_init(&pl022->queue_lock); 1514*ca632f55SGrant Likely 1515*ca632f55SGrant Likely pl022->running = false; 1516*ca632f55SGrant Likely pl022->busy = false; 1517*ca632f55SGrant Likely 1518*ca632f55SGrant Likely tasklet_init(&pl022->pump_transfers, 1519*ca632f55SGrant Likely pump_transfers, (unsigned long)pl022); 1520*ca632f55SGrant Likely 1521*ca632f55SGrant Likely INIT_WORK(&pl022->pump_messages, pump_messages); 1522*ca632f55SGrant Likely pl022->workqueue = create_singlethread_workqueue( 1523*ca632f55SGrant Likely dev_name(pl022->master->dev.parent)); 1524*ca632f55SGrant Likely if (pl022->workqueue == NULL) 1525*ca632f55SGrant Likely return -EBUSY; 1526*ca632f55SGrant Likely 1527*ca632f55SGrant Likely return 0; 1528*ca632f55SGrant Likely } 1529*ca632f55SGrant Likely 1530*ca632f55SGrant Likely 1531*ca632f55SGrant Likely static int start_queue(struct pl022 *pl022) 1532*ca632f55SGrant Likely { 1533*ca632f55SGrant Likely unsigned long flags; 1534*ca632f55SGrant Likely 1535*ca632f55SGrant Likely spin_lock_irqsave(&pl022->queue_lock, flags); 1536*ca632f55SGrant Likely 1537*ca632f55SGrant Likely if (pl022->running || pl022->busy) { 1538*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 1539*ca632f55SGrant Likely return -EBUSY; 1540*ca632f55SGrant Likely } 1541*ca632f55SGrant Likely 1542*ca632f55SGrant Likely pl022->running = true; 1543*ca632f55SGrant Likely pl022->cur_msg = NULL; 1544*ca632f55SGrant Likely pl022->cur_transfer = NULL; 1545*ca632f55SGrant Likely pl022->cur_chip = NULL; 1546*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 1547*ca632f55SGrant Likely 1548*ca632f55SGrant Likely queue_work(pl022->workqueue, &pl022->pump_messages); 1549*ca632f55SGrant Likely 1550*ca632f55SGrant Likely return 0; 1551*ca632f55SGrant Likely } 1552*ca632f55SGrant Likely 1553*ca632f55SGrant Likely 1554*ca632f55SGrant Likely static int stop_queue(struct pl022 *pl022) 1555*ca632f55SGrant Likely { 1556*ca632f55SGrant Likely unsigned long flags; 1557*ca632f55SGrant Likely unsigned limit = 500; 1558*ca632f55SGrant Likely int status = 0; 1559*ca632f55SGrant Likely 1560*ca632f55SGrant Likely spin_lock_irqsave(&pl022->queue_lock, flags); 1561*ca632f55SGrant Likely 1562*ca632f55SGrant Likely /* This is a bit lame, but is optimized for the common execution path. 1563*ca632f55SGrant Likely * A wait_queue on the pl022->busy could be used, but then the common 1564*ca632f55SGrant Likely * execution path (pump_messages) would be required to call wake_up or 1565*ca632f55SGrant Likely * friends on every SPI message. Do this instead */ 1566*ca632f55SGrant Likely while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { 1567*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 1568*ca632f55SGrant Likely msleep(10); 1569*ca632f55SGrant Likely spin_lock_irqsave(&pl022->queue_lock, flags); 1570*ca632f55SGrant Likely } 1571*ca632f55SGrant Likely 1572*ca632f55SGrant Likely if (!list_empty(&pl022->queue) || pl022->busy) 1573*ca632f55SGrant Likely status = -EBUSY; 1574*ca632f55SGrant Likely else 1575*ca632f55SGrant Likely pl022->running = false; 1576*ca632f55SGrant Likely 1577*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 1578*ca632f55SGrant Likely 1579*ca632f55SGrant Likely return status; 1580*ca632f55SGrant Likely } 1581*ca632f55SGrant Likely 1582*ca632f55SGrant Likely static int destroy_queue(struct pl022 *pl022) 1583*ca632f55SGrant Likely { 1584*ca632f55SGrant Likely int status; 1585*ca632f55SGrant Likely 1586*ca632f55SGrant Likely status = stop_queue(pl022); 1587*ca632f55SGrant Likely /* we are unloading the module or failing to load (only two calls 1588*ca632f55SGrant Likely * to this routine), and neither call can handle a return value. 1589*ca632f55SGrant Likely * However, destroy_workqueue calls flush_workqueue, and that will 1590*ca632f55SGrant Likely * block until all work is done. If the reason that stop_queue 1591*ca632f55SGrant Likely * timed out is that the work will never finish, then it does no 1592*ca632f55SGrant Likely * good to call destroy_workqueue, so return anyway. */ 1593*ca632f55SGrant Likely if (status != 0) 1594*ca632f55SGrant Likely return status; 1595*ca632f55SGrant Likely 1596*ca632f55SGrant Likely destroy_workqueue(pl022->workqueue); 1597*ca632f55SGrant Likely 1598*ca632f55SGrant Likely return 0; 1599*ca632f55SGrant Likely } 1600*ca632f55SGrant Likely 1601*ca632f55SGrant Likely static int verify_controller_parameters(struct pl022 *pl022, 1602*ca632f55SGrant Likely struct pl022_config_chip const *chip_info) 1603*ca632f55SGrant Likely { 1604*ca632f55SGrant Likely if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) 1605*ca632f55SGrant Likely || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { 1606*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1607*ca632f55SGrant Likely "interface is configured incorrectly\n"); 1608*ca632f55SGrant Likely return -EINVAL; 1609*ca632f55SGrant Likely } 1610*ca632f55SGrant Likely if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && 1611*ca632f55SGrant Likely (!pl022->vendor->unidir)) { 1612*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1613*ca632f55SGrant Likely "unidirectional mode not supported in this " 1614*ca632f55SGrant Likely "hardware version\n"); 1615*ca632f55SGrant Likely return -EINVAL; 1616*ca632f55SGrant Likely } 1617*ca632f55SGrant Likely if ((chip_info->hierarchy != SSP_MASTER) 1618*ca632f55SGrant Likely && (chip_info->hierarchy != SSP_SLAVE)) { 1619*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1620*ca632f55SGrant Likely "hierarchy is configured incorrectly\n"); 1621*ca632f55SGrant Likely return -EINVAL; 1622*ca632f55SGrant Likely } 1623*ca632f55SGrant Likely if ((chip_info->com_mode != INTERRUPT_TRANSFER) 1624*ca632f55SGrant Likely && (chip_info->com_mode != DMA_TRANSFER) 1625*ca632f55SGrant Likely && (chip_info->com_mode != POLLING_TRANSFER)) { 1626*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1627*ca632f55SGrant Likely "Communication mode is configured incorrectly\n"); 1628*ca632f55SGrant Likely return -EINVAL; 1629*ca632f55SGrant Likely } 1630*ca632f55SGrant Likely if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) 1631*ca632f55SGrant Likely || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { 1632*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1633*ca632f55SGrant Likely "RX FIFO Trigger Level is configured incorrectly\n"); 1634*ca632f55SGrant Likely return -EINVAL; 1635*ca632f55SGrant Likely } 1636*ca632f55SGrant Likely if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) 1637*ca632f55SGrant Likely || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { 1638*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1639*ca632f55SGrant Likely "TX FIFO Trigger Level is configured incorrectly\n"); 1640*ca632f55SGrant Likely return -EINVAL; 1641*ca632f55SGrant Likely } 1642*ca632f55SGrant Likely if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { 1643*ca632f55SGrant Likely if ((chip_info->ctrl_len < SSP_BITS_4) 1644*ca632f55SGrant Likely || (chip_info->ctrl_len > SSP_BITS_32)) { 1645*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1646*ca632f55SGrant Likely "CTRL LEN is configured incorrectly\n"); 1647*ca632f55SGrant Likely return -EINVAL; 1648*ca632f55SGrant Likely } 1649*ca632f55SGrant Likely if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) 1650*ca632f55SGrant Likely && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { 1651*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1652*ca632f55SGrant Likely "Wait State is configured incorrectly\n"); 1653*ca632f55SGrant Likely return -EINVAL; 1654*ca632f55SGrant Likely } 1655*ca632f55SGrant Likely /* Half duplex is only available in the ST Micro version */ 1656*ca632f55SGrant Likely if (pl022->vendor->extended_cr) { 1657*ca632f55SGrant Likely if ((chip_info->duplex != 1658*ca632f55SGrant Likely SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1659*ca632f55SGrant Likely && (chip_info->duplex != 1660*ca632f55SGrant Likely SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { 1661*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1662*ca632f55SGrant Likely "Microwire duplex mode is configured incorrectly\n"); 1663*ca632f55SGrant Likely return -EINVAL; 1664*ca632f55SGrant Likely } 1665*ca632f55SGrant Likely } else { 1666*ca632f55SGrant Likely if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1667*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1668*ca632f55SGrant Likely "Microwire half duplex mode requested," 1669*ca632f55SGrant Likely " but this is only available in the" 1670*ca632f55SGrant Likely " ST version of PL022\n"); 1671*ca632f55SGrant Likely return -EINVAL; 1672*ca632f55SGrant Likely } 1673*ca632f55SGrant Likely } 1674*ca632f55SGrant Likely return 0; 1675*ca632f55SGrant Likely } 1676*ca632f55SGrant Likely 1677*ca632f55SGrant Likely /** 1678*ca632f55SGrant Likely * pl022_transfer - transfer function registered to SPI master framework 1679*ca632f55SGrant Likely * @spi: spi device which is requesting transfer 1680*ca632f55SGrant Likely * @msg: spi message which is to handled is queued to driver queue 1681*ca632f55SGrant Likely * 1682*ca632f55SGrant Likely * This function is registered to the SPI framework for this SPI master 1683*ca632f55SGrant Likely * controller. It will queue the spi_message in the queue of driver if 1684*ca632f55SGrant Likely * the queue is not stopped and return. 1685*ca632f55SGrant Likely */ 1686*ca632f55SGrant Likely static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) 1687*ca632f55SGrant Likely { 1688*ca632f55SGrant Likely struct pl022 *pl022 = spi_master_get_devdata(spi->master); 1689*ca632f55SGrant Likely unsigned long flags; 1690*ca632f55SGrant Likely 1691*ca632f55SGrant Likely spin_lock_irqsave(&pl022->queue_lock, flags); 1692*ca632f55SGrant Likely 1693*ca632f55SGrant Likely if (!pl022->running) { 1694*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 1695*ca632f55SGrant Likely return -ESHUTDOWN; 1696*ca632f55SGrant Likely } 1697*ca632f55SGrant Likely msg->actual_length = 0; 1698*ca632f55SGrant Likely msg->status = -EINPROGRESS; 1699*ca632f55SGrant Likely msg->state = STATE_START; 1700*ca632f55SGrant Likely 1701*ca632f55SGrant Likely list_add_tail(&msg->queue, &pl022->queue); 1702*ca632f55SGrant Likely if (pl022->running && !pl022->busy) 1703*ca632f55SGrant Likely queue_work(pl022->workqueue, &pl022->pump_messages); 1704*ca632f55SGrant Likely 1705*ca632f55SGrant Likely spin_unlock_irqrestore(&pl022->queue_lock, flags); 1706*ca632f55SGrant Likely return 0; 1707*ca632f55SGrant Likely } 1708*ca632f55SGrant Likely 1709*ca632f55SGrant Likely static int calculate_effective_freq(struct pl022 *pl022, 1710*ca632f55SGrant Likely int freq, 1711*ca632f55SGrant Likely struct ssp_clock_params *clk_freq) 1712*ca632f55SGrant Likely { 1713*ca632f55SGrant Likely /* Lets calculate the frequency parameters */ 1714*ca632f55SGrant Likely u16 cpsdvsr = 2; 1715*ca632f55SGrant Likely u16 scr = 0; 1716*ca632f55SGrant Likely bool freq_found = false; 1717*ca632f55SGrant Likely u32 rate; 1718*ca632f55SGrant Likely u32 max_tclk; 1719*ca632f55SGrant Likely u32 min_tclk; 1720*ca632f55SGrant Likely 1721*ca632f55SGrant Likely rate = clk_get_rate(pl022->clk); 1722*ca632f55SGrant Likely /* cpsdvscr = 2 & scr 0 */ 1723*ca632f55SGrant Likely max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN))); 1724*ca632f55SGrant Likely /* cpsdvsr = 254 & scr = 255 */ 1725*ca632f55SGrant Likely min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX))); 1726*ca632f55SGrant Likely 1727*ca632f55SGrant Likely if ((freq <= max_tclk) && (freq >= min_tclk)) { 1728*ca632f55SGrant Likely while (cpsdvsr <= CPSDVR_MAX && !freq_found) { 1729*ca632f55SGrant Likely while (scr <= SCR_MAX && !freq_found) { 1730*ca632f55SGrant Likely if ((rate / 1731*ca632f55SGrant Likely (cpsdvsr * (1 + scr))) > freq) 1732*ca632f55SGrant Likely scr += 1; 1733*ca632f55SGrant Likely else { 1734*ca632f55SGrant Likely /* 1735*ca632f55SGrant Likely * This bool is made true when 1736*ca632f55SGrant Likely * effective frequency >= 1737*ca632f55SGrant Likely * target frequency is found 1738*ca632f55SGrant Likely */ 1739*ca632f55SGrant Likely freq_found = true; 1740*ca632f55SGrant Likely if ((rate / 1741*ca632f55SGrant Likely (cpsdvsr * (1 + scr))) != freq) { 1742*ca632f55SGrant Likely if (scr == SCR_MIN) { 1743*ca632f55SGrant Likely cpsdvsr -= 2; 1744*ca632f55SGrant Likely scr = SCR_MAX; 1745*ca632f55SGrant Likely } else 1746*ca632f55SGrant Likely scr -= 1; 1747*ca632f55SGrant Likely } 1748*ca632f55SGrant Likely } 1749*ca632f55SGrant Likely } 1750*ca632f55SGrant Likely if (!freq_found) { 1751*ca632f55SGrant Likely cpsdvsr += 2; 1752*ca632f55SGrant Likely scr = SCR_MIN; 1753*ca632f55SGrant Likely } 1754*ca632f55SGrant Likely } 1755*ca632f55SGrant Likely if (cpsdvsr != 0) { 1756*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, 1757*ca632f55SGrant Likely "SSP Effective Frequency is %u\n", 1758*ca632f55SGrant Likely (rate / (cpsdvsr * (1 + scr)))); 1759*ca632f55SGrant Likely clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF); 1760*ca632f55SGrant Likely clk_freq->scr = (u8) (scr & 0xFF); 1761*ca632f55SGrant Likely dev_dbg(&pl022->adev->dev, 1762*ca632f55SGrant Likely "SSP cpsdvsr = %d, scr = %d\n", 1763*ca632f55SGrant Likely clk_freq->cpsdvsr, clk_freq->scr); 1764*ca632f55SGrant Likely } 1765*ca632f55SGrant Likely } else { 1766*ca632f55SGrant Likely dev_err(&pl022->adev->dev, 1767*ca632f55SGrant Likely "controller data is incorrect: out of range frequency"); 1768*ca632f55SGrant Likely return -EINVAL; 1769*ca632f55SGrant Likely } 1770*ca632f55SGrant Likely return 0; 1771*ca632f55SGrant Likely } 1772*ca632f55SGrant Likely 1773*ca632f55SGrant Likely 1774*ca632f55SGrant Likely /* 1775*ca632f55SGrant Likely * A piece of default chip info unless the platform 1776*ca632f55SGrant Likely * supplies it. 1777*ca632f55SGrant Likely */ 1778*ca632f55SGrant Likely static const struct pl022_config_chip pl022_default_chip_info = { 1779*ca632f55SGrant Likely .com_mode = POLLING_TRANSFER, 1780*ca632f55SGrant Likely .iface = SSP_INTERFACE_MOTOROLA_SPI, 1781*ca632f55SGrant Likely .hierarchy = SSP_SLAVE, 1782*ca632f55SGrant Likely .slave_tx_disable = DO_NOT_DRIVE_TX, 1783*ca632f55SGrant Likely .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, 1784*ca632f55SGrant Likely .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, 1785*ca632f55SGrant Likely .ctrl_len = SSP_BITS_8, 1786*ca632f55SGrant Likely .wait_state = SSP_MWIRE_WAIT_ZERO, 1787*ca632f55SGrant Likely .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, 1788*ca632f55SGrant Likely .cs_control = null_cs_control, 1789*ca632f55SGrant Likely }; 1790*ca632f55SGrant Likely 1791*ca632f55SGrant Likely 1792*ca632f55SGrant Likely /** 1793*ca632f55SGrant Likely * pl022_setup - setup function registered to SPI master framework 1794*ca632f55SGrant Likely * @spi: spi device which is requesting setup 1795*ca632f55SGrant Likely * 1796*ca632f55SGrant Likely * This function is registered to the SPI framework for this SPI master 1797*ca632f55SGrant Likely * controller. If it is the first time when setup is called by this device, 1798*ca632f55SGrant Likely * this function will initialize the runtime state for this chip and save 1799*ca632f55SGrant Likely * the same in the device structure. Else it will update the runtime info 1800*ca632f55SGrant Likely * with the updated chip info. Nothing is really being written to the 1801*ca632f55SGrant Likely * controller hardware here, that is not done until the actual transfer 1802*ca632f55SGrant Likely * commence. 1803*ca632f55SGrant Likely */ 1804*ca632f55SGrant Likely static int pl022_setup(struct spi_device *spi) 1805*ca632f55SGrant Likely { 1806*ca632f55SGrant Likely struct pl022_config_chip const *chip_info; 1807*ca632f55SGrant Likely struct chip_data *chip; 1808*ca632f55SGrant Likely struct ssp_clock_params clk_freq = {0, }; 1809*ca632f55SGrant Likely int status = 0; 1810*ca632f55SGrant Likely struct pl022 *pl022 = spi_master_get_devdata(spi->master); 1811*ca632f55SGrant Likely unsigned int bits = spi->bits_per_word; 1812*ca632f55SGrant Likely u32 tmp; 1813*ca632f55SGrant Likely 1814*ca632f55SGrant Likely if (!spi->max_speed_hz) 1815*ca632f55SGrant Likely return -EINVAL; 1816*ca632f55SGrant Likely 1817*ca632f55SGrant Likely /* Get controller_state if one is supplied */ 1818*ca632f55SGrant Likely chip = spi_get_ctldata(spi); 1819*ca632f55SGrant Likely 1820*ca632f55SGrant Likely if (chip == NULL) { 1821*ca632f55SGrant Likely chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 1822*ca632f55SGrant Likely if (!chip) { 1823*ca632f55SGrant Likely dev_err(&spi->dev, 1824*ca632f55SGrant Likely "cannot allocate controller state\n"); 1825*ca632f55SGrant Likely return -ENOMEM; 1826*ca632f55SGrant Likely } 1827*ca632f55SGrant Likely dev_dbg(&spi->dev, 1828*ca632f55SGrant Likely "allocated memory for controller's runtime state\n"); 1829*ca632f55SGrant Likely } 1830*ca632f55SGrant Likely 1831*ca632f55SGrant Likely /* Get controller data if one is supplied */ 1832*ca632f55SGrant Likely chip_info = spi->controller_data; 1833*ca632f55SGrant Likely 1834*ca632f55SGrant Likely if (chip_info == NULL) { 1835*ca632f55SGrant Likely chip_info = &pl022_default_chip_info; 1836*ca632f55SGrant Likely /* spi_board_info.controller_data not is supplied */ 1837*ca632f55SGrant Likely dev_dbg(&spi->dev, 1838*ca632f55SGrant Likely "using default controller_data settings\n"); 1839*ca632f55SGrant Likely } else 1840*ca632f55SGrant Likely dev_dbg(&spi->dev, 1841*ca632f55SGrant Likely "using user supplied controller_data settings\n"); 1842*ca632f55SGrant Likely 1843*ca632f55SGrant Likely /* 1844*ca632f55SGrant Likely * We can override with custom divisors, else we use the board 1845*ca632f55SGrant Likely * frequency setting 1846*ca632f55SGrant Likely */ 1847*ca632f55SGrant Likely if ((0 == chip_info->clk_freq.cpsdvsr) 1848*ca632f55SGrant Likely && (0 == chip_info->clk_freq.scr)) { 1849*ca632f55SGrant Likely status = calculate_effective_freq(pl022, 1850*ca632f55SGrant Likely spi->max_speed_hz, 1851*ca632f55SGrant Likely &clk_freq); 1852*ca632f55SGrant Likely if (status < 0) 1853*ca632f55SGrant Likely goto err_config_params; 1854*ca632f55SGrant Likely } else { 1855*ca632f55SGrant Likely memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); 1856*ca632f55SGrant Likely if ((clk_freq.cpsdvsr % 2) != 0) 1857*ca632f55SGrant Likely clk_freq.cpsdvsr = 1858*ca632f55SGrant Likely clk_freq.cpsdvsr - 1; 1859*ca632f55SGrant Likely } 1860*ca632f55SGrant Likely if ((clk_freq.cpsdvsr < CPSDVR_MIN) 1861*ca632f55SGrant Likely || (clk_freq.cpsdvsr > CPSDVR_MAX)) { 1862*ca632f55SGrant Likely dev_err(&spi->dev, 1863*ca632f55SGrant Likely "cpsdvsr is configured incorrectly\n"); 1864*ca632f55SGrant Likely goto err_config_params; 1865*ca632f55SGrant Likely } 1866*ca632f55SGrant Likely 1867*ca632f55SGrant Likely 1868*ca632f55SGrant Likely status = verify_controller_parameters(pl022, chip_info); 1869*ca632f55SGrant Likely if (status) { 1870*ca632f55SGrant Likely dev_err(&spi->dev, "controller data is incorrect"); 1871*ca632f55SGrant Likely goto err_config_params; 1872*ca632f55SGrant Likely } 1873*ca632f55SGrant Likely 1874*ca632f55SGrant Likely /* Now set controller state based on controller data */ 1875*ca632f55SGrant Likely chip->xfer_type = chip_info->com_mode; 1876*ca632f55SGrant Likely if (!chip_info->cs_control) { 1877*ca632f55SGrant Likely chip->cs_control = null_cs_control; 1878*ca632f55SGrant Likely dev_warn(&spi->dev, 1879*ca632f55SGrant Likely "chip select function is NULL for this chip\n"); 1880*ca632f55SGrant Likely } else 1881*ca632f55SGrant Likely chip->cs_control = chip_info->cs_control; 1882*ca632f55SGrant Likely 1883*ca632f55SGrant Likely if (bits <= 3) { 1884*ca632f55SGrant Likely /* PL022 doesn't support less than 4-bits */ 1885*ca632f55SGrant Likely status = -ENOTSUPP; 1886*ca632f55SGrant Likely goto err_config_params; 1887*ca632f55SGrant Likely } else if (bits <= 8) { 1888*ca632f55SGrant Likely dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); 1889*ca632f55SGrant Likely chip->n_bytes = 1; 1890*ca632f55SGrant Likely chip->read = READING_U8; 1891*ca632f55SGrant Likely chip->write = WRITING_U8; 1892*ca632f55SGrant Likely } else if (bits <= 16) { 1893*ca632f55SGrant Likely dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); 1894*ca632f55SGrant Likely chip->n_bytes = 2; 1895*ca632f55SGrant Likely chip->read = READING_U16; 1896*ca632f55SGrant Likely chip->write = WRITING_U16; 1897*ca632f55SGrant Likely } else { 1898*ca632f55SGrant Likely if (pl022->vendor->max_bpw >= 32) { 1899*ca632f55SGrant Likely dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); 1900*ca632f55SGrant Likely chip->n_bytes = 4; 1901*ca632f55SGrant Likely chip->read = READING_U32; 1902*ca632f55SGrant Likely chip->write = WRITING_U32; 1903*ca632f55SGrant Likely } else { 1904*ca632f55SGrant Likely dev_err(&spi->dev, 1905*ca632f55SGrant Likely "illegal data size for this controller!\n"); 1906*ca632f55SGrant Likely dev_err(&spi->dev, 1907*ca632f55SGrant Likely "a standard pl022 can only handle " 1908*ca632f55SGrant Likely "1 <= n <= 16 bit words\n"); 1909*ca632f55SGrant Likely status = -ENOTSUPP; 1910*ca632f55SGrant Likely goto err_config_params; 1911*ca632f55SGrant Likely } 1912*ca632f55SGrant Likely } 1913*ca632f55SGrant Likely 1914*ca632f55SGrant Likely /* Now Initialize all register settings required for this chip */ 1915*ca632f55SGrant Likely chip->cr0 = 0; 1916*ca632f55SGrant Likely chip->cr1 = 0; 1917*ca632f55SGrant Likely chip->dmacr = 0; 1918*ca632f55SGrant Likely chip->cpsr = 0; 1919*ca632f55SGrant Likely if ((chip_info->com_mode == DMA_TRANSFER) 1920*ca632f55SGrant Likely && ((pl022->master_info)->enable_dma)) { 1921*ca632f55SGrant Likely chip->enable_dma = true; 1922*ca632f55SGrant Likely dev_dbg(&spi->dev, "DMA mode set in controller state\n"); 1923*ca632f55SGrant Likely SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, 1924*ca632f55SGrant Likely SSP_DMACR_MASK_RXDMAE, 0); 1925*ca632f55SGrant Likely SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, 1926*ca632f55SGrant Likely SSP_DMACR_MASK_TXDMAE, 1); 1927*ca632f55SGrant Likely } else { 1928*ca632f55SGrant Likely chip->enable_dma = false; 1929*ca632f55SGrant Likely dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); 1930*ca632f55SGrant Likely SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, 1931*ca632f55SGrant Likely SSP_DMACR_MASK_RXDMAE, 0); 1932*ca632f55SGrant Likely SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, 1933*ca632f55SGrant Likely SSP_DMACR_MASK_TXDMAE, 1); 1934*ca632f55SGrant Likely } 1935*ca632f55SGrant Likely 1936*ca632f55SGrant Likely chip->cpsr = clk_freq.cpsdvsr; 1937*ca632f55SGrant Likely 1938*ca632f55SGrant Likely /* Special setup for the ST micro extended control registers */ 1939*ca632f55SGrant Likely if (pl022->vendor->extended_cr) { 1940*ca632f55SGrant Likely u32 etx; 1941*ca632f55SGrant Likely 1942*ca632f55SGrant Likely if (pl022->vendor->pl023) { 1943*ca632f55SGrant Likely /* These bits are only in the PL023 */ 1944*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, 1945*ca632f55SGrant Likely SSP_CR1_MASK_FBCLKDEL_ST, 13); 1946*ca632f55SGrant Likely } else { 1947*ca632f55SGrant Likely /* These bits are in the PL022 but not PL023 */ 1948*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr0, chip_info->duplex, 1949*ca632f55SGrant Likely SSP_CR0_MASK_HALFDUP_ST, 5); 1950*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, 1951*ca632f55SGrant Likely SSP_CR0_MASK_CSS_ST, 16); 1952*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr0, chip_info->iface, 1953*ca632f55SGrant Likely SSP_CR0_MASK_FRF_ST, 21); 1954*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, 1955*ca632f55SGrant Likely SSP_CR1_MASK_MWAIT_ST, 6); 1956*ca632f55SGrant Likely } 1957*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr0, bits - 1, 1958*ca632f55SGrant Likely SSP_CR0_MASK_DSS_ST, 0); 1959*ca632f55SGrant Likely 1960*ca632f55SGrant Likely if (spi->mode & SPI_LSB_FIRST) { 1961*ca632f55SGrant Likely tmp = SSP_RX_LSB; 1962*ca632f55SGrant Likely etx = SSP_TX_LSB; 1963*ca632f55SGrant Likely } else { 1964*ca632f55SGrant Likely tmp = SSP_RX_MSB; 1965*ca632f55SGrant Likely etx = SSP_TX_MSB; 1966*ca632f55SGrant Likely } 1967*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); 1968*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); 1969*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, 1970*ca632f55SGrant Likely SSP_CR1_MASK_RXIFLSEL_ST, 7); 1971*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, 1972*ca632f55SGrant Likely SSP_CR1_MASK_TXIFLSEL_ST, 10); 1973*ca632f55SGrant Likely } else { 1974*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr0, bits - 1, 1975*ca632f55SGrant Likely SSP_CR0_MASK_DSS, 0); 1976*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr0, chip_info->iface, 1977*ca632f55SGrant Likely SSP_CR0_MASK_FRF, 4); 1978*ca632f55SGrant Likely } 1979*ca632f55SGrant Likely 1980*ca632f55SGrant Likely /* Stuff that is common for all versions */ 1981*ca632f55SGrant Likely if (spi->mode & SPI_CPOL) 1982*ca632f55SGrant Likely tmp = SSP_CLK_POL_IDLE_HIGH; 1983*ca632f55SGrant Likely else 1984*ca632f55SGrant Likely tmp = SSP_CLK_POL_IDLE_LOW; 1985*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); 1986*ca632f55SGrant Likely 1987*ca632f55SGrant Likely if (spi->mode & SPI_CPHA) 1988*ca632f55SGrant Likely tmp = SSP_CLK_SECOND_EDGE; 1989*ca632f55SGrant Likely else 1990*ca632f55SGrant Likely tmp = SSP_CLK_FIRST_EDGE; 1991*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); 1992*ca632f55SGrant Likely 1993*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); 1994*ca632f55SGrant Likely /* Loopback is available on all versions except PL023 */ 1995*ca632f55SGrant Likely if (pl022->vendor->loopback) { 1996*ca632f55SGrant Likely if (spi->mode & SPI_LOOP) 1997*ca632f55SGrant Likely tmp = LOOPBACK_ENABLED; 1998*ca632f55SGrant Likely else 1999*ca632f55SGrant Likely tmp = LOOPBACK_DISABLED; 2000*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); 2001*ca632f55SGrant Likely } 2002*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); 2003*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); 2004*ca632f55SGrant Likely SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); 2005*ca632f55SGrant Likely 2006*ca632f55SGrant Likely /* Save controller_state */ 2007*ca632f55SGrant Likely spi_set_ctldata(spi, chip); 2008*ca632f55SGrant Likely return status; 2009*ca632f55SGrant Likely err_config_params: 2010*ca632f55SGrant Likely spi_set_ctldata(spi, NULL); 2011*ca632f55SGrant Likely kfree(chip); 2012*ca632f55SGrant Likely return status; 2013*ca632f55SGrant Likely } 2014*ca632f55SGrant Likely 2015*ca632f55SGrant Likely /** 2016*ca632f55SGrant Likely * pl022_cleanup - cleanup function registered to SPI master framework 2017*ca632f55SGrant Likely * @spi: spi device which is requesting cleanup 2018*ca632f55SGrant Likely * 2019*ca632f55SGrant Likely * This function is registered to the SPI framework for this SPI master 2020*ca632f55SGrant Likely * controller. It will free the runtime state of chip. 2021*ca632f55SGrant Likely */ 2022*ca632f55SGrant Likely static void pl022_cleanup(struct spi_device *spi) 2023*ca632f55SGrant Likely { 2024*ca632f55SGrant Likely struct chip_data *chip = spi_get_ctldata(spi); 2025*ca632f55SGrant Likely 2026*ca632f55SGrant Likely spi_set_ctldata(spi, NULL); 2027*ca632f55SGrant Likely kfree(chip); 2028*ca632f55SGrant Likely } 2029*ca632f55SGrant Likely 2030*ca632f55SGrant Likely 2031*ca632f55SGrant Likely static int __devinit 2032*ca632f55SGrant Likely pl022_probe(struct amba_device *adev, const struct amba_id *id) 2033*ca632f55SGrant Likely { 2034*ca632f55SGrant Likely struct device *dev = &adev->dev; 2035*ca632f55SGrant Likely struct pl022_ssp_controller *platform_info = adev->dev.platform_data; 2036*ca632f55SGrant Likely struct spi_master *master; 2037*ca632f55SGrant Likely struct pl022 *pl022 = NULL; /*Data for this driver */ 2038*ca632f55SGrant Likely int status = 0; 2039*ca632f55SGrant Likely 2040*ca632f55SGrant Likely dev_info(&adev->dev, 2041*ca632f55SGrant Likely "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid); 2042*ca632f55SGrant Likely if (platform_info == NULL) { 2043*ca632f55SGrant Likely dev_err(&adev->dev, "probe - no platform data supplied\n"); 2044*ca632f55SGrant Likely status = -ENODEV; 2045*ca632f55SGrant Likely goto err_no_pdata; 2046*ca632f55SGrant Likely } 2047*ca632f55SGrant Likely 2048*ca632f55SGrant Likely /* Allocate master with space for data */ 2049*ca632f55SGrant Likely master = spi_alloc_master(dev, sizeof(struct pl022)); 2050*ca632f55SGrant Likely if (master == NULL) { 2051*ca632f55SGrant Likely dev_err(&adev->dev, "probe - cannot alloc SPI master\n"); 2052*ca632f55SGrant Likely status = -ENOMEM; 2053*ca632f55SGrant Likely goto err_no_master; 2054*ca632f55SGrant Likely } 2055*ca632f55SGrant Likely 2056*ca632f55SGrant Likely pl022 = spi_master_get_devdata(master); 2057*ca632f55SGrant Likely pl022->master = master; 2058*ca632f55SGrant Likely pl022->master_info = platform_info; 2059*ca632f55SGrant Likely pl022->adev = adev; 2060*ca632f55SGrant Likely pl022->vendor = id->data; 2061*ca632f55SGrant Likely 2062*ca632f55SGrant Likely /* 2063*ca632f55SGrant Likely * Bus Number Which has been Assigned to this SSP controller 2064*ca632f55SGrant Likely * on this board 2065*ca632f55SGrant Likely */ 2066*ca632f55SGrant Likely master->bus_num = platform_info->bus_id; 2067*ca632f55SGrant Likely master->num_chipselect = platform_info->num_chipselect; 2068*ca632f55SGrant Likely master->cleanup = pl022_cleanup; 2069*ca632f55SGrant Likely master->setup = pl022_setup; 2070*ca632f55SGrant Likely master->transfer = pl022_transfer; 2071*ca632f55SGrant Likely 2072*ca632f55SGrant Likely /* 2073*ca632f55SGrant Likely * Supports mode 0-3, loopback, and active low CS. Transfers are 2074*ca632f55SGrant Likely * always MS bit first on the original pl022. 2075*ca632f55SGrant Likely */ 2076*ca632f55SGrant Likely master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 2077*ca632f55SGrant Likely if (pl022->vendor->extended_cr) 2078*ca632f55SGrant Likely master->mode_bits |= SPI_LSB_FIRST; 2079*ca632f55SGrant Likely 2080*ca632f55SGrant Likely dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); 2081*ca632f55SGrant Likely 2082*ca632f55SGrant Likely status = amba_request_regions(adev, NULL); 2083*ca632f55SGrant Likely if (status) 2084*ca632f55SGrant Likely goto err_no_ioregion; 2085*ca632f55SGrant Likely 2086*ca632f55SGrant Likely pl022->phybase = adev->res.start; 2087*ca632f55SGrant Likely pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); 2088*ca632f55SGrant Likely if (pl022->virtbase == NULL) { 2089*ca632f55SGrant Likely status = -ENOMEM; 2090*ca632f55SGrant Likely goto err_no_ioremap; 2091*ca632f55SGrant Likely } 2092*ca632f55SGrant Likely printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", 2093*ca632f55SGrant Likely adev->res.start, pl022->virtbase); 2094*ca632f55SGrant Likely 2095*ca632f55SGrant Likely pl022->clk = clk_get(&adev->dev, NULL); 2096*ca632f55SGrant Likely if (IS_ERR(pl022->clk)) { 2097*ca632f55SGrant Likely status = PTR_ERR(pl022->clk); 2098*ca632f55SGrant Likely dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n"); 2099*ca632f55SGrant Likely goto err_no_clk; 2100*ca632f55SGrant Likely } 2101*ca632f55SGrant Likely 2102*ca632f55SGrant Likely /* Disable SSP */ 2103*ca632f55SGrant Likely writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 2104*ca632f55SGrant Likely SSP_CR1(pl022->virtbase)); 2105*ca632f55SGrant Likely load_ssp_default_config(pl022); 2106*ca632f55SGrant Likely 2107*ca632f55SGrant Likely status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", 2108*ca632f55SGrant Likely pl022); 2109*ca632f55SGrant Likely if (status < 0) { 2110*ca632f55SGrant Likely dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); 2111*ca632f55SGrant Likely goto err_no_irq; 2112*ca632f55SGrant Likely } 2113*ca632f55SGrant Likely 2114*ca632f55SGrant Likely /* Get DMA channels */ 2115*ca632f55SGrant Likely if (platform_info->enable_dma) { 2116*ca632f55SGrant Likely status = pl022_dma_probe(pl022); 2117*ca632f55SGrant Likely if (status != 0) 2118*ca632f55SGrant Likely platform_info->enable_dma = 0; 2119*ca632f55SGrant Likely } 2120*ca632f55SGrant Likely 2121*ca632f55SGrant Likely /* Initialize and start queue */ 2122*ca632f55SGrant Likely status = init_queue(pl022); 2123*ca632f55SGrant Likely if (status != 0) { 2124*ca632f55SGrant Likely dev_err(&adev->dev, "probe - problem initializing queue\n"); 2125*ca632f55SGrant Likely goto err_init_queue; 2126*ca632f55SGrant Likely } 2127*ca632f55SGrant Likely status = start_queue(pl022); 2128*ca632f55SGrant Likely if (status != 0) { 2129*ca632f55SGrant Likely dev_err(&adev->dev, "probe - problem starting queue\n"); 2130*ca632f55SGrant Likely goto err_start_queue; 2131*ca632f55SGrant Likely } 2132*ca632f55SGrant Likely /* Register with the SPI framework */ 2133*ca632f55SGrant Likely amba_set_drvdata(adev, pl022); 2134*ca632f55SGrant Likely status = spi_register_master(master); 2135*ca632f55SGrant Likely if (status != 0) { 2136*ca632f55SGrant Likely dev_err(&adev->dev, 2137*ca632f55SGrant Likely "probe - problem registering spi master\n"); 2138*ca632f55SGrant Likely goto err_spi_register; 2139*ca632f55SGrant Likely } 2140*ca632f55SGrant Likely dev_dbg(dev, "probe succeeded\n"); 2141*ca632f55SGrant Likely /* 2142*ca632f55SGrant Likely * Disable the silicon block pclk and any voltage domain and just 2143*ca632f55SGrant Likely * power it up and clock it when it's needed 2144*ca632f55SGrant Likely */ 2145*ca632f55SGrant Likely amba_pclk_disable(adev); 2146*ca632f55SGrant Likely amba_vcore_disable(adev); 2147*ca632f55SGrant Likely return 0; 2148*ca632f55SGrant Likely 2149*ca632f55SGrant Likely err_spi_register: 2150*ca632f55SGrant Likely err_start_queue: 2151*ca632f55SGrant Likely err_init_queue: 2152*ca632f55SGrant Likely destroy_queue(pl022); 2153*ca632f55SGrant Likely pl022_dma_remove(pl022); 2154*ca632f55SGrant Likely free_irq(adev->irq[0], pl022); 2155*ca632f55SGrant Likely err_no_irq: 2156*ca632f55SGrant Likely clk_put(pl022->clk); 2157*ca632f55SGrant Likely err_no_clk: 2158*ca632f55SGrant Likely iounmap(pl022->virtbase); 2159*ca632f55SGrant Likely err_no_ioremap: 2160*ca632f55SGrant Likely amba_release_regions(adev); 2161*ca632f55SGrant Likely err_no_ioregion: 2162*ca632f55SGrant Likely spi_master_put(master); 2163*ca632f55SGrant Likely err_no_master: 2164*ca632f55SGrant Likely err_no_pdata: 2165*ca632f55SGrant Likely return status; 2166*ca632f55SGrant Likely } 2167*ca632f55SGrant Likely 2168*ca632f55SGrant Likely static int __devexit 2169*ca632f55SGrant Likely pl022_remove(struct amba_device *adev) 2170*ca632f55SGrant Likely { 2171*ca632f55SGrant Likely struct pl022 *pl022 = amba_get_drvdata(adev); 2172*ca632f55SGrant Likely int status = 0; 2173*ca632f55SGrant Likely if (!pl022) 2174*ca632f55SGrant Likely return 0; 2175*ca632f55SGrant Likely 2176*ca632f55SGrant Likely /* Remove the queue */ 2177*ca632f55SGrant Likely status = destroy_queue(pl022); 2178*ca632f55SGrant Likely if (status != 0) { 2179*ca632f55SGrant Likely dev_err(&adev->dev, 2180*ca632f55SGrant Likely "queue remove failed (%d)\n", status); 2181*ca632f55SGrant Likely return status; 2182*ca632f55SGrant Likely } 2183*ca632f55SGrant Likely load_ssp_default_config(pl022); 2184*ca632f55SGrant Likely pl022_dma_remove(pl022); 2185*ca632f55SGrant Likely free_irq(adev->irq[0], pl022); 2186*ca632f55SGrant Likely clk_disable(pl022->clk); 2187*ca632f55SGrant Likely clk_put(pl022->clk); 2188*ca632f55SGrant Likely iounmap(pl022->virtbase); 2189*ca632f55SGrant Likely amba_release_regions(adev); 2190*ca632f55SGrant Likely tasklet_disable(&pl022->pump_transfers); 2191*ca632f55SGrant Likely spi_unregister_master(pl022->master); 2192*ca632f55SGrant Likely spi_master_put(pl022->master); 2193*ca632f55SGrant Likely amba_set_drvdata(adev, NULL); 2194*ca632f55SGrant Likely dev_dbg(&adev->dev, "remove succeeded\n"); 2195*ca632f55SGrant Likely return 0; 2196*ca632f55SGrant Likely } 2197*ca632f55SGrant Likely 2198*ca632f55SGrant Likely #ifdef CONFIG_PM 2199*ca632f55SGrant Likely static int pl022_suspend(struct amba_device *adev, pm_message_t state) 2200*ca632f55SGrant Likely { 2201*ca632f55SGrant Likely struct pl022 *pl022 = amba_get_drvdata(adev); 2202*ca632f55SGrant Likely int status = 0; 2203*ca632f55SGrant Likely 2204*ca632f55SGrant Likely status = stop_queue(pl022); 2205*ca632f55SGrant Likely if (status) { 2206*ca632f55SGrant Likely dev_warn(&adev->dev, "suspend cannot stop queue\n"); 2207*ca632f55SGrant Likely return status; 2208*ca632f55SGrant Likely } 2209*ca632f55SGrant Likely 2210*ca632f55SGrant Likely amba_vcore_enable(adev); 2211*ca632f55SGrant Likely amba_pclk_enable(adev); 2212*ca632f55SGrant Likely load_ssp_default_config(pl022); 2213*ca632f55SGrant Likely amba_pclk_disable(adev); 2214*ca632f55SGrant Likely amba_vcore_disable(adev); 2215*ca632f55SGrant Likely dev_dbg(&adev->dev, "suspended\n"); 2216*ca632f55SGrant Likely return 0; 2217*ca632f55SGrant Likely } 2218*ca632f55SGrant Likely 2219*ca632f55SGrant Likely static int pl022_resume(struct amba_device *adev) 2220*ca632f55SGrant Likely { 2221*ca632f55SGrant Likely struct pl022 *pl022 = amba_get_drvdata(adev); 2222*ca632f55SGrant Likely int status = 0; 2223*ca632f55SGrant Likely 2224*ca632f55SGrant Likely /* Start the queue running */ 2225*ca632f55SGrant Likely status = start_queue(pl022); 2226*ca632f55SGrant Likely if (status) 2227*ca632f55SGrant Likely dev_err(&adev->dev, "problem starting queue (%d)\n", status); 2228*ca632f55SGrant Likely else 2229*ca632f55SGrant Likely dev_dbg(&adev->dev, "resumed\n"); 2230*ca632f55SGrant Likely 2231*ca632f55SGrant Likely return status; 2232*ca632f55SGrant Likely } 2233*ca632f55SGrant Likely #else 2234*ca632f55SGrant Likely #define pl022_suspend NULL 2235*ca632f55SGrant Likely #define pl022_resume NULL 2236*ca632f55SGrant Likely #endif /* CONFIG_PM */ 2237*ca632f55SGrant Likely 2238*ca632f55SGrant Likely static struct vendor_data vendor_arm = { 2239*ca632f55SGrant Likely .fifodepth = 8, 2240*ca632f55SGrant Likely .max_bpw = 16, 2241*ca632f55SGrant Likely .unidir = false, 2242*ca632f55SGrant Likely .extended_cr = false, 2243*ca632f55SGrant Likely .pl023 = false, 2244*ca632f55SGrant Likely .loopback = true, 2245*ca632f55SGrant Likely }; 2246*ca632f55SGrant Likely 2247*ca632f55SGrant Likely 2248*ca632f55SGrant Likely static struct vendor_data vendor_st = { 2249*ca632f55SGrant Likely .fifodepth = 32, 2250*ca632f55SGrant Likely .max_bpw = 32, 2251*ca632f55SGrant Likely .unidir = false, 2252*ca632f55SGrant Likely .extended_cr = true, 2253*ca632f55SGrant Likely .pl023 = false, 2254*ca632f55SGrant Likely .loopback = true, 2255*ca632f55SGrant Likely }; 2256*ca632f55SGrant Likely 2257*ca632f55SGrant Likely static struct vendor_data vendor_st_pl023 = { 2258*ca632f55SGrant Likely .fifodepth = 32, 2259*ca632f55SGrant Likely .max_bpw = 32, 2260*ca632f55SGrant Likely .unidir = false, 2261*ca632f55SGrant Likely .extended_cr = true, 2262*ca632f55SGrant Likely .pl023 = true, 2263*ca632f55SGrant Likely .loopback = false, 2264*ca632f55SGrant Likely }; 2265*ca632f55SGrant Likely 2266*ca632f55SGrant Likely static struct vendor_data vendor_db5500_pl023 = { 2267*ca632f55SGrant Likely .fifodepth = 32, 2268*ca632f55SGrant Likely .max_bpw = 32, 2269*ca632f55SGrant Likely .unidir = false, 2270*ca632f55SGrant Likely .extended_cr = true, 2271*ca632f55SGrant Likely .pl023 = true, 2272*ca632f55SGrant Likely .loopback = true, 2273*ca632f55SGrant Likely }; 2274*ca632f55SGrant Likely 2275*ca632f55SGrant Likely static struct amba_id pl022_ids[] = { 2276*ca632f55SGrant Likely { 2277*ca632f55SGrant Likely /* 2278*ca632f55SGrant Likely * ARM PL022 variant, this has a 16bit wide 2279*ca632f55SGrant Likely * and 8 locations deep TX/RX FIFO 2280*ca632f55SGrant Likely */ 2281*ca632f55SGrant Likely .id = 0x00041022, 2282*ca632f55SGrant Likely .mask = 0x000fffff, 2283*ca632f55SGrant Likely .data = &vendor_arm, 2284*ca632f55SGrant Likely }, 2285*ca632f55SGrant Likely { 2286*ca632f55SGrant Likely /* 2287*ca632f55SGrant Likely * ST Micro derivative, this has 32bit wide 2288*ca632f55SGrant Likely * and 32 locations deep TX/RX FIFO 2289*ca632f55SGrant Likely */ 2290*ca632f55SGrant Likely .id = 0x01080022, 2291*ca632f55SGrant Likely .mask = 0xffffffff, 2292*ca632f55SGrant Likely .data = &vendor_st, 2293*ca632f55SGrant Likely }, 2294*ca632f55SGrant Likely { 2295*ca632f55SGrant Likely /* 2296*ca632f55SGrant Likely * ST-Ericsson derivative "PL023" (this is not 2297*ca632f55SGrant Likely * an official ARM number), this is a PL022 SSP block 2298*ca632f55SGrant Likely * stripped to SPI mode only, it has 32bit wide 2299*ca632f55SGrant Likely * and 32 locations deep TX/RX FIFO but no extended 2300*ca632f55SGrant Likely * CR0/CR1 register 2301*ca632f55SGrant Likely */ 2302*ca632f55SGrant Likely .id = 0x00080023, 2303*ca632f55SGrant Likely .mask = 0xffffffff, 2304*ca632f55SGrant Likely .data = &vendor_st_pl023, 2305*ca632f55SGrant Likely }, 2306*ca632f55SGrant Likely { 2307*ca632f55SGrant Likely .id = 0x10080023, 2308*ca632f55SGrant Likely .mask = 0xffffffff, 2309*ca632f55SGrant Likely .data = &vendor_db5500_pl023, 2310*ca632f55SGrant Likely }, 2311*ca632f55SGrant Likely { 0, 0 }, 2312*ca632f55SGrant Likely }; 2313*ca632f55SGrant Likely 2314*ca632f55SGrant Likely static struct amba_driver pl022_driver = { 2315*ca632f55SGrant Likely .drv = { 2316*ca632f55SGrant Likely .name = "ssp-pl022", 2317*ca632f55SGrant Likely }, 2318*ca632f55SGrant Likely .id_table = pl022_ids, 2319*ca632f55SGrant Likely .probe = pl022_probe, 2320*ca632f55SGrant Likely .remove = __devexit_p(pl022_remove), 2321*ca632f55SGrant Likely .suspend = pl022_suspend, 2322*ca632f55SGrant Likely .resume = pl022_resume, 2323*ca632f55SGrant Likely }; 2324*ca632f55SGrant Likely 2325*ca632f55SGrant Likely 2326*ca632f55SGrant Likely static int __init pl022_init(void) 2327*ca632f55SGrant Likely { 2328*ca632f55SGrant Likely return amba_driver_register(&pl022_driver); 2329*ca632f55SGrant Likely } 2330*ca632f55SGrant Likely 2331*ca632f55SGrant Likely subsys_initcall(pl022_init); 2332*ca632f55SGrant Likely 2333*ca632f55SGrant Likely static void __exit pl022_exit(void) 2334*ca632f55SGrant Likely { 2335*ca632f55SGrant Likely amba_driver_unregister(&pl022_driver); 2336*ca632f55SGrant Likely } 2337*ca632f55SGrant Likely 2338*ca632f55SGrant Likely module_exit(pl022_exit); 2339*ca632f55SGrant Likely 2340*ca632f55SGrant Likely MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); 2341*ca632f55SGrant Likely MODULE_DESCRIPTION("PL022 SSP Controller Driver"); 2342*ca632f55SGrant Likely MODULE_LICENSE("GPL"); 2343