xref: /openbmc/linux/drivers/spi/spi-pxa2xx.c (revision ca632f556697d45d67ed5cada7cedf3ddfe0db4b)
1*ca632f55SGrant Likely /*
2*ca632f55SGrant Likely  * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3*ca632f55SGrant Likely  *
4*ca632f55SGrant Likely  * This program is free software; you can redistribute it and/or modify
5*ca632f55SGrant Likely  * it under the terms of the GNU General Public License as published by
6*ca632f55SGrant Likely  * the Free Software Foundation; either version 2 of the License, or
7*ca632f55SGrant Likely  * (at your option) any later version.
8*ca632f55SGrant Likely  *
9*ca632f55SGrant Likely  * This program is distributed in the hope that it will be useful,
10*ca632f55SGrant Likely  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11*ca632f55SGrant Likely  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12*ca632f55SGrant Likely  * GNU General Public License for more details.
13*ca632f55SGrant Likely  *
14*ca632f55SGrant Likely  * You should have received a copy of the GNU General Public License
15*ca632f55SGrant Likely  * along with this program; if not, write to the Free Software
16*ca632f55SGrant Likely  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17*ca632f55SGrant Likely  */
18*ca632f55SGrant Likely 
19*ca632f55SGrant Likely #include <linux/init.h>
20*ca632f55SGrant Likely #include <linux/module.h>
21*ca632f55SGrant Likely #include <linux/device.h>
22*ca632f55SGrant Likely #include <linux/ioport.h>
23*ca632f55SGrant Likely #include <linux/errno.h>
24*ca632f55SGrant Likely #include <linux/interrupt.h>
25*ca632f55SGrant Likely #include <linux/platform_device.h>
26*ca632f55SGrant Likely #include <linux/spi/pxa2xx_spi.h>
27*ca632f55SGrant Likely #include <linux/dma-mapping.h>
28*ca632f55SGrant Likely #include <linux/spi/spi.h>
29*ca632f55SGrant Likely #include <linux/workqueue.h>
30*ca632f55SGrant Likely #include <linux/delay.h>
31*ca632f55SGrant Likely #include <linux/gpio.h>
32*ca632f55SGrant Likely #include <linux/slab.h>
33*ca632f55SGrant Likely 
34*ca632f55SGrant Likely #include <asm/io.h>
35*ca632f55SGrant Likely #include <asm/irq.h>
36*ca632f55SGrant Likely #include <asm/delay.h>
37*ca632f55SGrant Likely 
38*ca632f55SGrant Likely 
39*ca632f55SGrant Likely MODULE_AUTHOR("Stephen Street");
40*ca632f55SGrant Likely MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
41*ca632f55SGrant Likely MODULE_LICENSE("GPL");
42*ca632f55SGrant Likely MODULE_ALIAS("platform:pxa2xx-spi");
43*ca632f55SGrant Likely 
44*ca632f55SGrant Likely #define MAX_BUSES 3
45*ca632f55SGrant Likely 
46*ca632f55SGrant Likely #define TIMOUT_DFLT		1000
47*ca632f55SGrant Likely 
48*ca632f55SGrant Likely #define DMA_INT_MASK		(DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
49*ca632f55SGrant Likely #define RESET_DMA_CHANNEL	(DCSR_NODESC | DMA_INT_MASK)
50*ca632f55SGrant Likely #define IS_DMA_ALIGNED(x)	((((u32)(x)) & 0x07) == 0)
51*ca632f55SGrant Likely #define MAX_DMA_LEN		8191
52*ca632f55SGrant Likely #define DMA_ALIGNMENT		8
53*ca632f55SGrant Likely 
54*ca632f55SGrant Likely /*
55*ca632f55SGrant Likely  * for testing SSCR1 changes that require SSP restart, basically
56*ca632f55SGrant Likely  * everything except the service and interrupt enables, the pxa270 developer
57*ca632f55SGrant Likely  * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
58*ca632f55SGrant Likely  * list, but the PXA255 dev man says all bits without really meaning the
59*ca632f55SGrant Likely  * service and interrupt enables
60*ca632f55SGrant Likely  */
61*ca632f55SGrant Likely #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
62*ca632f55SGrant Likely 				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
63*ca632f55SGrant Likely 				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
64*ca632f55SGrant Likely 				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
65*ca632f55SGrant Likely 				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
66*ca632f55SGrant Likely 				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
67*ca632f55SGrant Likely 
68*ca632f55SGrant Likely #define DEFINE_SSP_REG(reg, off) \
69*ca632f55SGrant Likely static inline u32 read_##reg(void const __iomem *p) \
70*ca632f55SGrant Likely { return __raw_readl(p + (off)); } \
71*ca632f55SGrant Likely \
72*ca632f55SGrant Likely static inline void write_##reg(u32 v, void __iomem *p) \
73*ca632f55SGrant Likely { __raw_writel(v, p + (off)); }
74*ca632f55SGrant Likely 
75*ca632f55SGrant Likely DEFINE_SSP_REG(SSCR0, 0x00)
76*ca632f55SGrant Likely DEFINE_SSP_REG(SSCR1, 0x04)
77*ca632f55SGrant Likely DEFINE_SSP_REG(SSSR, 0x08)
78*ca632f55SGrant Likely DEFINE_SSP_REG(SSITR, 0x0c)
79*ca632f55SGrant Likely DEFINE_SSP_REG(SSDR, 0x10)
80*ca632f55SGrant Likely DEFINE_SSP_REG(SSTO, 0x28)
81*ca632f55SGrant Likely DEFINE_SSP_REG(SSPSP, 0x2c)
82*ca632f55SGrant Likely 
83*ca632f55SGrant Likely #define START_STATE ((void*)0)
84*ca632f55SGrant Likely #define RUNNING_STATE ((void*)1)
85*ca632f55SGrant Likely #define DONE_STATE ((void*)2)
86*ca632f55SGrant Likely #define ERROR_STATE ((void*)-1)
87*ca632f55SGrant Likely 
88*ca632f55SGrant Likely #define QUEUE_RUNNING 0
89*ca632f55SGrant Likely #define QUEUE_STOPPED 1
90*ca632f55SGrant Likely 
91*ca632f55SGrant Likely struct driver_data {
92*ca632f55SGrant Likely 	/* Driver model hookup */
93*ca632f55SGrant Likely 	struct platform_device *pdev;
94*ca632f55SGrant Likely 
95*ca632f55SGrant Likely 	/* SSP Info */
96*ca632f55SGrant Likely 	struct ssp_device *ssp;
97*ca632f55SGrant Likely 
98*ca632f55SGrant Likely 	/* SPI framework hookup */
99*ca632f55SGrant Likely 	enum pxa_ssp_type ssp_type;
100*ca632f55SGrant Likely 	struct spi_master *master;
101*ca632f55SGrant Likely 
102*ca632f55SGrant Likely 	/* PXA hookup */
103*ca632f55SGrant Likely 	struct pxa2xx_spi_master *master_info;
104*ca632f55SGrant Likely 
105*ca632f55SGrant Likely 	/* DMA setup stuff */
106*ca632f55SGrant Likely 	int rx_channel;
107*ca632f55SGrant Likely 	int tx_channel;
108*ca632f55SGrant Likely 	u32 *null_dma_buf;
109*ca632f55SGrant Likely 
110*ca632f55SGrant Likely 	/* SSP register addresses */
111*ca632f55SGrant Likely 	void __iomem *ioaddr;
112*ca632f55SGrant Likely 	u32 ssdr_physical;
113*ca632f55SGrant Likely 
114*ca632f55SGrant Likely 	/* SSP masks*/
115*ca632f55SGrant Likely 	u32 dma_cr1;
116*ca632f55SGrant Likely 	u32 int_cr1;
117*ca632f55SGrant Likely 	u32 clear_sr;
118*ca632f55SGrant Likely 	u32 mask_sr;
119*ca632f55SGrant Likely 
120*ca632f55SGrant Likely 	/* Driver message queue */
121*ca632f55SGrant Likely 	struct workqueue_struct	*workqueue;
122*ca632f55SGrant Likely 	struct work_struct pump_messages;
123*ca632f55SGrant Likely 	spinlock_t lock;
124*ca632f55SGrant Likely 	struct list_head queue;
125*ca632f55SGrant Likely 	int busy;
126*ca632f55SGrant Likely 	int run;
127*ca632f55SGrant Likely 
128*ca632f55SGrant Likely 	/* Message Transfer pump */
129*ca632f55SGrant Likely 	struct tasklet_struct pump_transfers;
130*ca632f55SGrant Likely 
131*ca632f55SGrant Likely 	/* Current message transfer state info */
132*ca632f55SGrant Likely 	struct spi_message* cur_msg;
133*ca632f55SGrant Likely 	struct spi_transfer* cur_transfer;
134*ca632f55SGrant Likely 	struct chip_data *cur_chip;
135*ca632f55SGrant Likely 	size_t len;
136*ca632f55SGrant Likely 	void *tx;
137*ca632f55SGrant Likely 	void *tx_end;
138*ca632f55SGrant Likely 	void *rx;
139*ca632f55SGrant Likely 	void *rx_end;
140*ca632f55SGrant Likely 	int dma_mapped;
141*ca632f55SGrant Likely 	dma_addr_t rx_dma;
142*ca632f55SGrant Likely 	dma_addr_t tx_dma;
143*ca632f55SGrant Likely 	size_t rx_map_len;
144*ca632f55SGrant Likely 	size_t tx_map_len;
145*ca632f55SGrant Likely 	u8 n_bytes;
146*ca632f55SGrant Likely 	u32 dma_width;
147*ca632f55SGrant Likely 	int (*write)(struct driver_data *drv_data);
148*ca632f55SGrant Likely 	int (*read)(struct driver_data *drv_data);
149*ca632f55SGrant Likely 	irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
150*ca632f55SGrant Likely 	void (*cs_control)(u32 command);
151*ca632f55SGrant Likely };
152*ca632f55SGrant Likely 
153*ca632f55SGrant Likely struct chip_data {
154*ca632f55SGrant Likely 	u32 cr0;
155*ca632f55SGrant Likely 	u32 cr1;
156*ca632f55SGrant Likely 	u32 psp;
157*ca632f55SGrant Likely 	u32 timeout;
158*ca632f55SGrant Likely 	u8 n_bytes;
159*ca632f55SGrant Likely 	u32 dma_width;
160*ca632f55SGrant Likely 	u32 dma_burst_size;
161*ca632f55SGrant Likely 	u32 threshold;
162*ca632f55SGrant Likely 	u32 dma_threshold;
163*ca632f55SGrant Likely 	u8 enable_dma;
164*ca632f55SGrant Likely 	u8 bits_per_word;
165*ca632f55SGrant Likely 	u32 speed_hz;
166*ca632f55SGrant Likely 	union {
167*ca632f55SGrant Likely 		int gpio_cs;
168*ca632f55SGrant Likely 		unsigned int frm;
169*ca632f55SGrant Likely 	};
170*ca632f55SGrant Likely 	int gpio_cs_inverted;
171*ca632f55SGrant Likely 	int (*write)(struct driver_data *drv_data);
172*ca632f55SGrant Likely 	int (*read)(struct driver_data *drv_data);
173*ca632f55SGrant Likely 	void (*cs_control)(u32 command);
174*ca632f55SGrant Likely };
175*ca632f55SGrant Likely 
176*ca632f55SGrant Likely static void pump_messages(struct work_struct *work);
177*ca632f55SGrant Likely 
178*ca632f55SGrant Likely static void cs_assert(struct driver_data *drv_data)
179*ca632f55SGrant Likely {
180*ca632f55SGrant Likely 	struct chip_data *chip = drv_data->cur_chip;
181*ca632f55SGrant Likely 
182*ca632f55SGrant Likely 	if (drv_data->ssp_type == CE4100_SSP) {
183*ca632f55SGrant Likely 		write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr);
184*ca632f55SGrant Likely 		return;
185*ca632f55SGrant Likely 	}
186*ca632f55SGrant Likely 
187*ca632f55SGrant Likely 	if (chip->cs_control) {
188*ca632f55SGrant Likely 		chip->cs_control(PXA2XX_CS_ASSERT);
189*ca632f55SGrant Likely 		return;
190*ca632f55SGrant Likely 	}
191*ca632f55SGrant Likely 
192*ca632f55SGrant Likely 	if (gpio_is_valid(chip->gpio_cs))
193*ca632f55SGrant Likely 		gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
194*ca632f55SGrant Likely }
195*ca632f55SGrant Likely 
196*ca632f55SGrant Likely static void cs_deassert(struct driver_data *drv_data)
197*ca632f55SGrant Likely {
198*ca632f55SGrant Likely 	struct chip_data *chip = drv_data->cur_chip;
199*ca632f55SGrant Likely 
200*ca632f55SGrant Likely 	if (drv_data->ssp_type == CE4100_SSP)
201*ca632f55SGrant Likely 		return;
202*ca632f55SGrant Likely 
203*ca632f55SGrant Likely 	if (chip->cs_control) {
204*ca632f55SGrant Likely 		chip->cs_control(PXA2XX_CS_DEASSERT);
205*ca632f55SGrant Likely 		return;
206*ca632f55SGrant Likely 	}
207*ca632f55SGrant Likely 
208*ca632f55SGrant Likely 	if (gpio_is_valid(chip->gpio_cs))
209*ca632f55SGrant Likely 		gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
210*ca632f55SGrant Likely }
211*ca632f55SGrant Likely 
212*ca632f55SGrant Likely static void write_SSSR_CS(struct driver_data *drv_data, u32 val)
213*ca632f55SGrant Likely {
214*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
215*ca632f55SGrant Likely 
216*ca632f55SGrant Likely 	if (drv_data->ssp_type == CE4100_SSP)
217*ca632f55SGrant Likely 		val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
218*ca632f55SGrant Likely 
219*ca632f55SGrant Likely 	write_SSSR(val, reg);
220*ca632f55SGrant Likely }
221*ca632f55SGrant Likely 
222*ca632f55SGrant Likely static int pxa25x_ssp_comp(struct driver_data *drv_data)
223*ca632f55SGrant Likely {
224*ca632f55SGrant Likely 	if (drv_data->ssp_type == PXA25x_SSP)
225*ca632f55SGrant Likely 		return 1;
226*ca632f55SGrant Likely 	if (drv_data->ssp_type == CE4100_SSP)
227*ca632f55SGrant Likely 		return 1;
228*ca632f55SGrant Likely 	return 0;
229*ca632f55SGrant Likely }
230*ca632f55SGrant Likely 
231*ca632f55SGrant Likely static int flush(struct driver_data *drv_data)
232*ca632f55SGrant Likely {
233*ca632f55SGrant Likely 	unsigned long limit = loops_per_jiffy << 1;
234*ca632f55SGrant Likely 
235*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
236*ca632f55SGrant Likely 
237*ca632f55SGrant Likely 	do {
238*ca632f55SGrant Likely 		while (read_SSSR(reg) & SSSR_RNE) {
239*ca632f55SGrant Likely 			read_SSDR(reg);
240*ca632f55SGrant Likely 		}
241*ca632f55SGrant Likely 	} while ((read_SSSR(reg) & SSSR_BSY) && --limit);
242*ca632f55SGrant Likely 	write_SSSR_CS(drv_data, SSSR_ROR);
243*ca632f55SGrant Likely 
244*ca632f55SGrant Likely 	return limit;
245*ca632f55SGrant Likely }
246*ca632f55SGrant Likely 
247*ca632f55SGrant Likely static int null_writer(struct driver_data *drv_data)
248*ca632f55SGrant Likely {
249*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
250*ca632f55SGrant Likely 	u8 n_bytes = drv_data->n_bytes;
251*ca632f55SGrant Likely 
252*ca632f55SGrant Likely 	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
253*ca632f55SGrant Likely 		|| (drv_data->tx == drv_data->tx_end))
254*ca632f55SGrant Likely 		return 0;
255*ca632f55SGrant Likely 
256*ca632f55SGrant Likely 	write_SSDR(0, reg);
257*ca632f55SGrant Likely 	drv_data->tx += n_bytes;
258*ca632f55SGrant Likely 
259*ca632f55SGrant Likely 	return 1;
260*ca632f55SGrant Likely }
261*ca632f55SGrant Likely 
262*ca632f55SGrant Likely static int null_reader(struct driver_data *drv_data)
263*ca632f55SGrant Likely {
264*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
265*ca632f55SGrant Likely 	u8 n_bytes = drv_data->n_bytes;
266*ca632f55SGrant Likely 
267*ca632f55SGrant Likely 	while ((read_SSSR(reg) & SSSR_RNE)
268*ca632f55SGrant Likely 		&& (drv_data->rx < drv_data->rx_end)) {
269*ca632f55SGrant Likely 		read_SSDR(reg);
270*ca632f55SGrant Likely 		drv_data->rx += n_bytes;
271*ca632f55SGrant Likely 	}
272*ca632f55SGrant Likely 
273*ca632f55SGrant Likely 	return drv_data->rx == drv_data->rx_end;
274*ca632f55SGrant Likely }
275*ca632f55SGrant Likely 
276*ca632f55SGrant Likely static int u8_writer(struct driver_data *drv_data)
277*ca632f55SGrant Likely {
278*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
279*ca632f55SGrant Likely 
280*ca632f55SGrant Likely 	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
281*ca632f55SGrant Likely 		|| (drv_data->tx == drv_data->tx_end))
282*ca632f55SGrant Likely 		return 0;
283*ca632f55SGrant Likely 
284*ca632f55SGrant Likely 	write_SSDR(*(u8 *)(drv_data->tx), reg);
285*ca632f55SGrant Likely 	++drv_data->tx;
286*ca632f55SGrant Likely 
287*ca632f55SGrant Likely 	return 1;
288*ca632f55SGrant Likely }
289*ca632f55SGrant Likely 
290*ca632f55SGrant Likely static int u8_reader(struct driver_data *drv_data)
291*ca632f55SGrant Likely {
292*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
293*ca632f55SGrant Likely 
294*ca632f55SGrant Likely 	while ((read_SSSR(reg) & SSSR_RNE)
295*ca632f55SGrant Likely 		&& (drv_data->rx < drv_data->rx_end)) {
296*ca632f55SGrant Likely 		*(u8 *)(drv_data->rx) = read_SSDR(reg);
297*ca632f55SGrant Likely 		++drv_data->rx;
298*ca632f55SGrant Likely 	}
299*ca632f55SGrant Likely 
300*ca632f55SGrant Likely 	return drv_data->rx == drv_data->rx_end;
301*ca632f55SGrant Likely }
302*ca632f55SGrant Likely 
303*ca632f55SGrant Likely static int u16_writer(struct driver_data *drv_data)
304*ca632f55SGrant Likely {
305*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
306*ca632f55SGrant Likely 
307*ca632f55SGrant Likely 	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
308*ca632f55SGrant Likely 		|| (drv_data->tx == drv_data->tx_end))
309*ca632f55SGrant Likely 		return 0;
310*ca632f55SGrant Likely 
311*ca632f55SGrant Likely 	write_SSDR(*(u16 *)(drv_data->tx), reg);
312*ca632f55SGrant Likely 	drv_data->tx += 2;
313*ca632f55SGrant Likely 
314*ca632f55SGrant Likely 	return 1;
315*ca632f55SGrant Likely }
316*ca632f55SGrant Likely 
317*ca632f55SGrant Likely static int u16_reader(struct driver_data *drv_data)
318*ca632f55SGrant Likely {
319*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
320*ca632f55SGrant Likely 
321*ca632f55SGrant Likely 	while ((read_SSSR(reg) & SSSR_RNE)
322*ca632f55SGrant Likely 		&& (drv_data->rx < drv_data->rx_end)) {
323*ca632f55SGrant Likely 		*(u16 *)(drv_data->rx) = read_SSDR(reg);
324*ca632f55SGrant Likely 		drv_data->rx += 2;
325*ca632f55SGrant Likely 	}
326*ca632f55SGrant Likely 
327*ca632f55SGrant Likely 	return drv_data->rx == drv_data->rx_end;
328*ca632f55SGrant Likely }
329*ca632f55SGrant Likely 
330*ca632f55SGrant Likely static int u32_writer(struct driver_data *drv_data)
331*ca632f55SGrant Likely {
332*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
333*ca632f55SGrant Likely 
334*ca632f55SGrant Likely 	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
335*ca632f55SGrant Likely 		|| (drv_data->tx == drv_data->tx_end))
336*ca632f55SGrant Likely 		return 0;
337*ca632f55SGrant Likely 
338*ca632f55SGrant Likely 	write_SSDR(*(u32 *)(drv_data->tx), reg);
339*ca632f55SGrant Likely 	drv_data->tx += 4;
340*ca632f55SGrant Likely 
341*ca632f55SGrant Likely 	return 1;
342*ca632f55SGrant Likely }
343*ca632f55SGrant Likely 
344*ca632f55SGrant Likely static int u32_reader(struct driver_data *drv_data)
345*ca632f55SGrant Likely {
346*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
347*ca632f55SGrant Likely 
348*ca632f55SGrant Likely 	while ((read_SSSR(reg) & SSSR_RNE)
349*ca632f55SGrant Likely 		&& (drv_data->rx < drv_data->rx_end)) {
350*ca632f55SGrant Likely 		*(u32 *)(drv_data->rx) = read_SSDR(reg);
351*ca632f55SGrant Likely 		drv_data->rx += 4;
352*ca632f55SGrant Likely 	}
353*ca632f55SGrant Likely 
354*ca632f55SGrant Likely 	return drv_data->rx == drv_data->rx_end;
355*ca632f55SGrant Likely }
356*ca632f55SGrant Likely 
357*ca632f55SGrant Likely static void *next_transfer(struct driver_data *drv_data)
358*ca632f55SGrant Likely {
359*ca632f55SGrant Likely 	struct spi_message *msg = drv_data->cur_msg;
360*ca632f55SGrant Likely 	struct spi_transfer *trans = drv_data->cur_transfer;
361*ca632f55SGrant Likely 
362*ca632f55SGrant Likely 	/* Move to next transfer */
363*ca632f55SGrant Likely 	if (trans->transfer_list.next != &msg->transfers) {
364*ca632f55SGrant Likely 		drv_data->cur_transfer =
365*ca632f55SGrant Likely 			list_entry(trans->transfer_list.next,
366*ca632f55SGrant Likely 					struct spi_transfer,
367*ca632f55SGrant Likely 					transfer_list);
368*ca632f55SGrant Likely 		return RUNNING_STATE;
369*ca632f55SGrant Likely 	} else
370*ca632f55SGrant Likely 		return DONE_STATE;
371*ca632f55SGrant Likely }
372*ca632f55SGrant Likely 
373*ca632f55SGrant Likely static int map_dma_buffers(struct driver_data *drv_data)
374*ca632f55SGrant Likely {
375*ca632f55SGrant Likely 	struct spi_message *msg = drv_data->cur_msg;
376*ca632f55SGrant Likely 	struct device *dev = &msg->spi->dev;
377*ca632f55SGrant Likely 
378*ca632f55SGrant Likely 	if (!drv_data->cur_chip->enable_dma)
379*ca632f55SGrant Likely 		return 0;
380*ca632f55SGrant Likely 
381*ca632f55SGrant Likely 	if (msg->is_dma_mapped)
382*ca632f55SGrant Likely 		return  drv_data->rx_dma && drv_data->tx_dma;
383*ca632f55SGrant Likely 
384*ca632f55SGrant Likely 	if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
385*ca632f55SGrant Likely 		return 0;
386*ca632f55SGrant Likely 
387*ca632f55SGrant Likely 	/* Modify setup if rx buffer is null */
388*ca632f55SGrant Likely 	if (drv_data->rx == NULL) {
389*ca632f55SGrant Likely 		*drv_data->null_dma_buf = 0;
390*ca632f55SGrant Likely 		drv_data->rx = drv_data->null_dma_buf;
391*ca632f55SGrant Likely 		drv_data->rx_map_len = 4;
392*ca632f55SGrant Likely 	} else
393*ca632f55SGrant Likely 		drv_data->rx_map_len = drv_data->len;
394*ca632f55SGrant Likely 
395*ca632f55SGrant Likely 
396*ca632f55SGrant Likely 	/* Modify setup if tx buffer is null */
397*ca632f55SGrant Likely 	if (drv_data->tx == NULL) {
398*ca632f55SGrant Likely 		*drv_data->null_dma_buf = 0;
399*ca632f55SGrant Likely 		drv_data->tx = drv_data->null_dma_buf;
400*ca632f55SGrant Likely 		drv_data->tx_map_len = 4;
401*ca632f55SGrant Likely 	} else
402*ca632f55SGrant Likely 		drv_data->tx_map_len = drv_data->len;
403*ca632f55SGrant Likely 
404*ca632f55SGrant Likely 	/* Stream map the tx buffer. Always do DMA_TO_DEVICE first
405*ca632f55SGrant Likely 	 * so we flush the cache *before* invalidating it, in case
406*ca632f55SGrant Likely 	 * the tx and rx buffers overlap.
407*ca632f55SGrant Likely 	 */
408*ca632f55SGrant Likely 	drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
409*ca632f55SGrant Likely 					drv_data->tx_map_len, DMA_TO_DEVICE);
410*ca632f55SGrant Likely 	if (dma_mapping_error(dev, drv_data->tx_dma))
411*ca632f55SGrant Likely 		return 0;
412*ca632f55SGrant Likely 
413*ca632f55SGrant Likely 	/* Stream map the rx buffer */
414*ca632f55SGrant Likely 	drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
415*ca632f55SGrant Likely 					drv_data->rx_map_len, DMA_FROM_DEVICE);
416*ca632f55SGrant Likely 	if (dma_mapping_error(dev, drv_data->rx_dma)) {
417*ca632f55SGrant Likely 		dma_unmap_single(dev, drv_data->tx_dma,
418*ca632f55SGrant Likely 					drv_data->tx_map_len, DMA_TO_DEVICE);
419*ca632f55SGrant Likely 		return 0;
420*ca632f55SGrant Likely 	}
421*ca632f55SGrant Likely 
422*ca632f55SGrant Likely 	return 1;
423*ca632f55SGrant Likely }
424*ca632f55SGrant Likely 
425*ca632f55SGrant Likely static void unmap_dma_buffers(struct driver_data *drv_data)
426*ca632f55SGrant Likely {
427*ca632f55SGrant Likely 	struct device *dev;
428*ca632f55SGrant Likely 
429*ca632f55SGrant Likely 	if (!drv_data->dma_mapped)
430*ca632f55SGrant Likely 		return;
431*ca632f55SGrant Likely 
432*ca632f55SGrant Likely 	if (!drv_data->cur_msg->is_dma_mapped) {
433*ca632f55SGrant Likely 		dev = &drv_data->cur_msg->spi->dev;
434*ca632f55SGrant Likely 		dma_unmap_single(dev, drv_data->rx_dma,
435*ca632f55SGrant Likely 					drv_data->rx_map_len, DMA_FROM_DEVICE);
436*ca632f55SGrant Likely 		dma_unmap_single(dev, drv_data->tx_dma,
437*ca632f55SGrant Likely 					drv_data->tx_map_len, DMA_TO_DEVICE);
438*ca632f55SGrant Likely 	}
439*ca632f55SGrant Likely 
440*ca632f55SGrant Likely 	drv_data->dma_mapped = 0;
441*ca632f55SGrant Likely }
442*ca632f55SGrant Likely 
443*ca632f55SGrant Likely /* caller already set message->status; dma and pio irqs are blocked */
444*ca632f55SGrant Likely static void giveback(struct driver_data *drv_data)
445*ca632f55SGrant Likely {
446*ca632f55SGrant Likely 	struct spi_transfer* last_transfer;
447*ca632f55SGrant Likely 	unsigned long flags;
448*ca632f55SGrant Likely 	struct spi_message *msg;
449*ca632f55SGrant Likely 
450*ca632f55SGrant Likely 	spin_lock_irqsave(&drv_data->lock, flags);
451*ca632f55SGrant Likely 	msg = drv_data->cur_msg;
452*ca632f55SGrant Likely 	drv_data->cur_msg = NULL;
453*ca632f55SGrant Likely 	drv_data->cur_transfer = NULL;
454*ca632f55SGrant Likely 	queue_work(drv_data->workqueue, &drv_data->pump_messages);
455*ca632f55SGrant Likely 	spin_unlock_irqrestore(&drv_data->lock, flags);
456*ca632f55SGrant Likely 
457*ca632f55SGrant Likely 	last_transfer = list_entry(msg->transfers.prev,
458*ca632f55SGrant Likely 					struct spi_transfer,
459*ca632f55SGrant Likely 					transfer_list);
460*ca632f55SGrant Likely 
461*ca632f55SGrant Likely 	/* Delay if requested before any change in chip select */
462*ca632f55SGrant Likely 	if (last_transfer->delay_usecs)
463*ca632f55SGrant Likely 		udelay(last_transfer->delay_usecs);
464*ca632f55SGrant Likely 
465*ca632f55SGrant Likely 	/* Drop chip select UNLESS cs_change is true or we are returning
466*ca632f55SGrant Likely 	 * a message with an error, or next message is for another chip
467*ca632f55SGrant Likely 	 */
468*ca632f55SGrant Likely 	if (!last_transfer->cs_change)
469*ca632f55SGrant Likely 		cs_deassert(drv_data);
470*ca632f55SGrant Likely 	else {
471*ca632f55SGrant Likely 		struct spi_message *next_msg;
472*ca632f55SGrant Likely 
473*ca632f55SGrant Likely 		/* Holding of cs was hinted, but we need to make sure
474*ca632f55SGrant Likely 		 * the next message is for the same chip.  Don't waste
475*ca632f55SGrant Likely 		 * time with the following tests unless this was hinted.
476*ca632f55SGrant Likely 		 *
477*ca632f55SGrant Likely 		 * We cannot postpone this until pump_messages, because
478*ca632f55SGrant Likely 		 * after calling msg->complete (below) the driver that
479*ca632f55SGrant Likely 		 * sent the current message could be unloaded, which
480*ca632f55SGrant Likely 		 * could invalidate the cs_control() callback...
481*ca632f55SGrant Likely 		 */
482*ca632f55SGrant Likely 
483*ca632f55SGrant Likely 		/* get a pointer to the next message, if any */
484*ca632f55SGrant Likely 		spin_lock_irqsave(&drv_data->lock, flags);
485*ca632f55SGrant Likely 		if (list_empty(&drv_data->queue))
486*ca632f55SGrant Likely 			next_msg = NULL;
487*ca632f55SGrant Likely 		else
488*ca632f55SGrant Likely 			next_msg = list_entry(drv_data->queue.next,
489*ca632f55SGrant Likely 					struct spi_message, queue);
490*ca632f55SGrant Likely 		spin_unlock_irqrestore(&drv_data->lock, flags);
491*ca632f55SGrant Likely 
492*ca632f55SGrant Likely 		/* see if the next and current messages point
493*ca632f55SGrant Likely 		 * to the same chip
494*ca632f55SGrant Likely 		 */
495*ca632f55SGrant Likely 		if (next_msg && next_msg->spi != msg->spi)
496*ca632f55SGrant Likely 			next_msg = NULL;
497*ca632f55SGrant Likely 		if (!next_msg || msg->state == ERROR_STATE)
498*ca632f55SGrant Likely 			cs_deassert(drv_data);
499*ca632f55SGrant Likely 	}
500*ca632f55SGrant Likely 
501*ca632f55SGrant Likely 	msg->state = NULL;
502*ca632f55SGrant Likely 	if (msg->complete)
503*ca632f55SGrant Likely 		msg->complete(msg->context);
504*ca632f55SGrant Likely 
505*ca632f55SGrant Likely 	drv_data->cur_chip = NULL;
506*ca632f55SGrant Likely }
507*ca632f55SGrant Likely 
508*ca632f55SGrant Likely static int wait_ssp_rx_stall(void const __iomem *ioaddr)
509*ca632f55SGrant Likely {
510*ca632f55SGrant Likely 	unsigned long limit = loops_per_jiffy << 1;
511*ca632f55SGrant Likely 
512*ca632f55SGrant Likely 	while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
513*ca632f55SGrant Likely 		cpu_relax();
514*ca632f55SGrant Likely 
515*ca632f55SGrant Likely 	return limit;
516*ca632f55SGrant Likely }
517*ca632f55SGrant Likely 
518*ca632f55SGrant Likely static int wait_dma_channel_stop(int channel)
519*ca632f55SGrant Likely {
520*ca632f55SGrant Likely 	unsigned long limit = loops_per_jiffy << 1;
521*ca632f55SGrant Likely 
522*ca632f55SGrant Likely 	while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
523*ca632f55SGrant Likely 		cpu_relax();
524*ca632f55SGrant Likely 
525*ca632f55SGrant Likely 	return limit;
526*ca632f55SGrant Likely }
527*ca632f55SGrant Likely 
528*ca632f55SGrant Likely static void dma_error_stop(struct driver_data *drv_data, const char *msg)
529*ca632f55SGrant Likely {
530*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
531*ca632f55SGrant Likely 
532*ca632f55SGrant Likely 	/* Stop and reset */
533*ca632f55SGrant Likely 	DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
534*ca632f55SGrant Likely 	DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
535*ca632f55SGrant Likely 	write_SSSR_CS(drv_data, drv_data->clear_sr);
536*ca632f55SGrant Likely 	write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
537*ca632f55SGrant Likely 	if (!pxa25x_ssp_comp(drv_data))
538*ca632f55SGrant Likely 		write_SSTO(0, reg);
539*ca632f55SGrant Likely 	flush(drv_data);
540*ca632f55SGrant Likely 	write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
541*ca632f55SGrant Likely 
542*ca632f55SGrant Likely 	unmap_dma_buffers(drv_data);
543*ca632f55SGrant Likely 
544*ca632f55SGrant Likely 	dev_err(&drv_data->pdev->dev, "%s\n", msg);
545*ca632f55SGrant Likely 
546*ca632f55SGrant Likely 	drv_data->cur_msg->state = ERROR_STATE;
547*ca632f55SGrant Likely 	tasklet_schedule(&drv_data->pump_transfers);
548*ca632f55SGrant Likely }
549*ca632f55SGrant Likely 
550*ca632f55SGrant Likely static void dma_transfer_complete(struct driver_data *drv_data)
551*ca632f55SGrant Likely {
552*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
553*ca632f55SGrant Likely 	struct spi_message *msg = drv_data->cur_msg;
554*ca632f55SGrant Likely 
555*ca632f55SGrant Likely 	/* Clear and disable interrupts on SSP and DMA channels*/
556*ca632f55SGrant Likely 	write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
557*ca632f55SGrant Likely 	write_SSSR_CS(drv_data, drv_data->clear_sr);
558*ca632f55SGrant Likely 	DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
559*ca632f55SGrant Likely 	DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
560*ca632f55SGrant Likely 
561*ca632f55SGrant Likely 	if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
562*ca632f55SGrant Likely 		dev_err(&drv_data->pdev->dev,
563*ca632f55SGrant Likely 			"dma_handler: dma rx channel stop failed\n");
564*ca632f55SGrant Likely 
565*ca632f55SGrant Likely 	if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
566*ca632f55SGrant Likely 		dev_err(&drv_data->pdev->dev,
567*ca632f55SGrant Likely 			"dma_transfer: ssp rx stall failed\n");
568*ca632f55SGrant Likely 
569*ca632f55SGrant Likely 	unmap_dma_buffers(drv_data);
570*ca632f55SGrant Likely 
571*ca632f55SGrant Likely 	/* update the buffer pointer for the amount completed in dma */
572*ca632f55SGrant Likely 	drv_data->rx += drv_data->len -
573*ca632f55SGrant Likely 			(DCMD(drv_data->rx_channel) & DCMD_LENGTH);
574*ca632f55SGrant Likely 
575*ca632f55SGrant Likely 	/* read trailing data from fifo, it does not matter how many
576*ca632f55SGrant Likely 	 * bytes are in the fifo just read until buffer is full
577*ca632f55SGrant Likely 	 * or fifo is empty, which ever occurs first */
578*ca632f55SGrant Likely 	drv_data->read(drv_data);
579*ca632f55SGrant Likely 
580*ca632f55SGrant Likely 	/* return count of what was actually read */
581*ca632f55SGrant Likely 	msg->actual_length += drv_data->len -
582*ca632f55SGrant Likely 				(drv_data->rx_end - drv_data->rx);
583*ca632f55SGrant Likely 
584*ca632f55SGrant Likely 	/* Transfer delays and chip select release are
585*ca632f55SGrant Likely 	 * handled in pump_transfers or giveback
586*ca632f55SGrant Likely 	 */
587*ca632f55SGrant Likely 
588*ca632f55SGrant Likely 	/* Move to next transfer */
589*ca632f55SGrant Likely 	msg->state = next_transfer(drv_data);
590*ca632f55SGrant Likely 
591*ca632f55SGrant Likely 	/* Schedule transfer tasklet */
592*ca632f55SGrant Likely 	tasklet_schedule(&drv_data->pump_transfers);
593*ca632f55SGrant Likely }
594*ca632f55SGrant Likely 
595*ca632f55SGrant Likely static void dma_handler(int channel, void *data)
596*ca632f55SGrant Likely {
597*ca632f55SGrant Likely 	struct driver_data *drv_data = data;
598*ca632f55SGrant Likely 	u32 irq_status = DCSR(channel) & DMA_INT_MASK;
599*ca632f55SGrant Likely 
600*ca632f55SGrant Likely 	if (irq_status & DCSR_BUSERR) {
601*ca632f55SGrant Likely 
602*ca632f55SGrant Likely 		if (channel == drv_data->tx_channel)
603*ca632f55SGrant Likely 			dma_error_stop(drv_data,
604*ca632f55SGrant Likely 					"dma_handler: "
605*ca632f55SGrant Likely 					"bad bus address on tx channel");
606*ca632f55SGrant Likely 		else
607*ca632f55SGrant Likely 			dma_error_stop(drv_data,
608*ca632f55SGrant Likely 					"dma_handler: "
609*ca632f55SGrant Likely 					"bad bus address on rx channel");
610*ca632f55SGrant Likely 		return;
611*ca632f55SGrant Likely 	}
612*ca632f55SGrant Likely 
613*ca632f55SGrant Likely 	/* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
614*ca632f55SGrant Likely 	if ((channel == drv_data->tx_channel)
615*ca632f55SGrant Likely 		&& (irq_status & DCSR_ENDINTR)
616*ca632f55SGrant Likely 		&& (drv_data->ssp_type == PXA25x_SSP)) {
617*ca632f55SGrant Likely 
618*ca632f55SGrant Likely 		/* Wait for rx to stall */
619*ca632f55SGrant Likely 		if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
620*ca632f55SGrant Likely 			dev_err(&drv_data->pdev->dev,
621*ca632f55SGrant Likely 				"dma_handler: ssp rx stall failed\n");
622*ca632f55SGrant Likely 
623*ca632f55SGrant Likely 		/* finish this transfer, start the next */
624*ca632f55SGrant Likely 		dma_transfer_complete(drv_data);
625*ca632f55SGrant Likely 	}
626*ca632f55SGrant Likely }
627*ca632f55SGrant Likely 
628*ca632f55SGrant Likely static irqreturn_t dma_transfer(struct driver_data *drv_data)
629*ca632f55SGrant Likely {
630*ca632f55SGrant Likely 	u32 irq_status;
631*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
632*ca632f55SGrant Likely 
633*ca632f55SGrant Likely 	irq_status = read_SSSR(reg) & drv_data->mask_sr;
634*ca632f55SGrant Likely 	if (irq_status & SSSR_ROR) {
635*ca632f55SGrant Likely 		dma_error_stop(drv_data, "dma_transfer: fifo overrun");
636*ca632f55SGrant Likely 		return IRQ_HANDLED;
637*ca632f55SGrant Likely 	}
638*ca632f55SGrant Likely 
639*ca632f55SGrant Likely 	/* Check for false positive timeout */
640*ca632f55SGrant Likely 	if ((irq_status & SSSR_TINT)
641*ca632f55SGrant Likely 		&& (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
642*ca632f55SGrant Likely 		write_SSSR(SSSR_TINT, reg);
643*ca632f55SGrant Likely 		return IRQ_HANDLED;
644*ca632f55SGrant Likely 	}
645*ca632f55SGrant Likely 
646*ca632f55SGrant Likely 	if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
647*ca632f55SGrant Likely 
648*ca632f55SGrant Likely 		/* Clear and disable timeout interrupt, do the rest in
649*ca632f55SGrant Likely 		 * dma_transfer_complete */
650*ca632f55SGrant Likely 		if (!pxa25x_ssp_comp(drv_data))
651*ca632f55SGrant Likely 			write_SSTO(0, reg);
652*ca632f55SGrant Likely 
653*ca632f55SGrant Likely 		/* finish this transfer, start the next */
654*ca632f55SGrant Likely 		dma_transfer_complete(drv_data);
655*ca632f55SGrant Likely 
656*ca632f55SGrant Likely 		return IRQ_HANDLED;
657*ca632f55SGrant Likely 	}
658*ca632f55SGrant Likely 
659*ca632f55SGrant Likely 	/* Opps problem detected */
660*ca632f55SGrant Likely 	return IRQ_NONE;
661*ca632f55SGrant Likely }
662*ca632f55SGrant Likely 
663*ca632f55SGrant Likely static void reset_sccr1(struct driver_data *drv_data)
664*ca632f55SGrant Likely {
665*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
666*ca632f55SGrant Likely 	struct chip_data *chip = drv_data->cur_chip;
667*ca632f55SGrant Likely 	u32 sccr1_reg;
668*ca632f55SGrant Likely 
669*ca632f55SGrant Likely 	sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1;
670*ca632f55SGrant Likely 	sccr1_reg &= ~SSCR1_RFT;
671*ca632f55SGrant Likely 	sccr1_reg |= chip->threshold;
672*ca632f55SGrant Likely 	write_SSCR1(sccr1_reg, reg);
673*ca632f55SGrant Likely }
674*ca632f55SGrant Likely 
675*ca632f55SGrant Likely static void int_error_stop(struct driver_data *drv_data, const char* msg)
676*ca632f55SGrant Likely {
677*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
678*ca632f55SGrant Likely 
679*ca632f55SGrant Likely 	/* Stop and reset SSP */
680*ca632f55SGrant Likely 	write_SSSR_CS(drv_data, drv_data->clear_sr);
681*ca632f55SGrant Likely 	reset_sccr1(drv_data);
682*ca632f55SGrant Likely 	if (!pxa25x_ssp_comp(drv_data))
683*ca632f55SGrant Likely 		write_SSTO(0, reg);
684*ca632f55SGrant Likely 	flush(drv_data);
685*ca632f55SGrant Likely 	write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
686*ca632f55SGrant Likely 
687*ca632f55SGrant Likely 	dev_err(&drv_data->pdev->dev, "%s\n", msg);
688*ca632f55SGrant Likely 
689*ca632f55SGrant Likely 	drv_data->cur_msg->state = ERROR_STATE;
690*ca632f55SGrant Likely 	tasklet_schedule(&drv_data->pump_transfers);
691*ca632f55SGrant Likely }
692*ca632f55SGrant Likely 
693*ca632f55SGrant Likely static void int_transfer_complete(struct driver_data *drv_data)
694*ca632f55SGrant Likely {
695*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
696*ca632f55SGrant Likely 
697*ca632f55SGrant Likely 	/* Stop SSP */
698*ca632f55SGrant Likely 	write_SSSR_CS(drv_data, drv_data->clear_sr);
699*ca632f55SGrant Likely 	reset_sccr1(drv_data);
700*ca632f55SGrant Likely 	if (!pxa25x_ssp_comp(drv_data))
701*ca632f55SGrant Likely 		write_SSTO(0, reg);
702*ca632f55SGrant Likely 
703*ca632f55SGrant Likely 	/* Update total byte transferred return count actual bytes read */
704*ca632f55SGrant Likely 	drv_data->cur_msg->actual_length += drv_data->len -
705*ca632f55SGrant Likely 				(drv_data->rx_end - drv_data->rx);
706*ca632f55SGrant Likely 
707*ca632f55SGrant Likely 	/* Transfer delays and chip select release are
708*ca632f55SGrant Likely 	 * handled in pump_transfers or giveback
709*ca632f55SGrant Likely 	 */
710*ca632f55SGrant Likely 
711*ca632f55SGrant Likely 	/* Move to next transfer */
712*ca632f55SGrant Likely 	drv_data->cur_msg->state = next_transfer(drv_data);
713*ca632f55SGrant Likely 
714*ca632f55SGrant Likely 	/* Schedule transfer tasklet */
715*ca632f55SGrant Likely 	tasklet_schedule(&drv_data->pump_transfers);
716*ca632f55SGrant Likely }
717*ca632f55SGrant Likely 
718*ca632f55SGrant Likely static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
719*ca632f55SGrant Likely {
720*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
721*ca632f55SGrant Likely 
722*ca632f55SGrant Likely 	u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
723*ca632f55SGrant Likely 			drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
724*ca632f55SGrant Likely 
725*ca632f55SGrant Likely 	u32 irq_status = read_SSSR(reg) & irq_mask;
726*ca632f55SGrant Likely 
727*ca632f55SGrant Likely 	if (irq_status & SSSR_ROR) {
728*ca632f55SGrant Likely 		int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
729*ca632f55SGrant Likely 		return IRQ_HANDLED;
730*ca632f55SGrant Likely 	}
731*ca632f55SGrant Likely 
732*ca632f55SGrant Likely 	if (irq_status & SSSR_TINT) {
733*ca632f55SGrant Likely 		write_SSSR(SSSR_TINT, reg);
734*ca632f55SGrant Likely 		if (drv_data->read(drv_data)) {
735*ca632f55SGrant Likely 			int_transfer_complete(drv_data);
736*ca632f55SGrant Likely 			return IRQ_HANDLED;
737*ca632f55SGrant Likely 		}
738*ca632f55SGrant Likely 	}
739*ca632f55SGrant Likely 
740*ca632f55SGrant Likely 	/* Drain rx fifo, Fill tx fifo and prevent overruns */
741*ca632f55SGrant Likely 	do {
742*ca632f55SGrant Likely 		if (drv_data->read(drv_data)) {
743*ca632f55SGrant Likely 			int_transfer_complete(drv_data);
744*ca632f55SGrant Likely 			return IRQ_HANDLED;
745*ca632f55SGrant Likely 		}
746*ca632f55SGrant Likely 	} while (drv_data->write(drv_data));
747*ca632f55SGrant Likely 
748*ca632f55SGrant Likely 	if (drv_data->read(drv_data)) {
749*ca632f55SGrant Likely 		int_transfer_complete(drv_data);
750*ca632f55SGrant Likely 		return IRQ_HANDLED;
751*ca632f55SGrant Likely 	}
752*ca632f55SGrant Likely 
753*ca632f55SGrant Likely 	if (drv_data->tx == drv_data->tx_end) {
754*ca632f55SGrant Likely 		u32 bytes_left;
755*ca632f55SGrant Likely 		u32 sccr1_reg;
756*ca632f55SGrant Likely 
757*ca632f55SGrant Likely 		sccr1_reg = read_SSCR1(reg);
758*ca632f55SGrant Likely 		sccr1_reg &= ~SSCR1_TIE;
759*ca632f55SGrant Likely 
760*ca632f55SGrant Likely 		/*
761*ca632f55SGrant Likely 		 * PXA25x_SSP has no timeout, set up rx threshould for the
762*ca632f55SGrant Likely 		 * remaining RX bytes.
763*ca632f55SGrant Likely 		 */
764*ca632f55SGrant Likely 		if (pxa25x_ssp_comp(drv_data)) {
765*ca632f55SGrant Likely 
766*ca632f55SGrant Likely 			sccr1_reg &= ~SSCR1_RFT;
767*ca632f55SGrant Likely 
768*ca632f55SGrant Likely 			bytes_left = drv_data->rx_end - drv_data->rx;
769*ca632f55SGrant Likely 			switch (drv_data->n_bytes) {
770*ca632f55SGrant Likely 			case 4:
771*ca632f55SGrant Likely 				bytes_left >>= 1;
772*ca632f55SGrant Likely 			case 2:
773*ca632f55SGrant Likely 				bytes_left >>= 1;
774*ca632f55SGrant Likely 			}
775*ca632f55SGrant Likely 
776*ca632f55SGrant Likely 			if (bytes_left > RX_THRESH_DFLT)
777*ca632f55SGrant Likely 				bytes_left = RX_THRESH_DFLT;
778*ca632f55SGrant Likely 
779*ca632f55SGrant Likely 			sccr1_reg |= SSCR1_RxTresh(bytes_left);
780*ca632f55SGrant Likely 		}
781*ca632f55SGrant Likely 		write_SSCR1(sccr1_reg, reg);
782*ca632f55SGrant Likely 	}
783*ca632f55SGrant Likely 
784*ca632f55SGrant Likely 	/* We did something */
785*ca632f55SGrant Likely 	return IRQ_HANDLED;
786*ca632f55SGrant Likely }
787*ca632f55SGrant Likely 
788*ca632f55SGrant Likely static irqreturn_t ssp_int(int irq, void *dev_id)
789*ca632f55SGrant Likely {
790*ca632f55SGrant Likely 	struct driver_data *drv_data = dev_id;
791*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
792*ca632f55SGrant Likely 	u32 sccr1_reg = read_SSCR1(reg);
793*ca632f55SGrant Likely 	u32 mask = drv_data->mask_sr;
794*ca632f55SGrant Likely 	u32 status;
795*ca632f55SGrant Likely 
796*ca632f55SGrant Likely 	status = read_SSSR(reg);
797*ca632f55SGrant Likely 
798*ca632f55SGrant Likely 	/* Ignore possible writes if we don't need to write */
799*ca632f55SGrant Likely 	if (!(sccr1_reg & SSCR1_TIE))
800*ca632f55SGrant Likely 		mask &= ~SSSR_TFS;
801*ca632f55SGrant Likely 
802*ca632f55SGrant Likely 	if (!(status & mask))
803*ca632f55SGrant Likely 		return IRQ_NONE;
804*ca632f55SGrant Likely 
805*ca632f55SGrant Likely 	if (!drv_data->cur_msg) {
806*ca632f55SGrant Likely 
807*ca632f55SGrant Likely 		write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
808*ca632f55SGrant Likely 		write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
809*ca632f55SGrant Likely 		if (!pxa25x_ssp_comp(drv_data))
810*ca632f55SGrant Likely 			write_SSTO(0, reg);
811*ca632f55SGrant Likely 		write_SSSR_CS(drv_data, drv_data->clear_sr);
812*ca632f55SGrant Likely 
813*ca632f55SGrant Likely 		dev_err(&drv_data->pdev->dev, "bad message state "
814*ca632f55SGrant Likely 			"in interrupt handler\n");
815*ca632f55SGrant Likely 
816*ca632f55SGrant Likely 		/* Never fail */
817*ca632f55SGrant Likely 		return IRQ_HANDLED;
818*ca632f55SGrant Likely 	}
819*ca632f55SGrant Likely 
820*ca632f55SGrant Likely 	return drv_data->transfer_handler(drv_data);
821*ca632f55SGrant Likely }
822*ca632f55SGrant Likely 
823*ca632f55SGrant Likely static int set_dma_burst_and_threshold(struct chip_data *chip,
824*ca632f55SGrant Likely 				struct spi_device *spi,
825*ca632f55SGrant Likely 				u8 bits_per_word, u32 *burst_code,
826*ca632f55SGrant Likely 				u32 *threshold)
827*ca632f55SGrant Likely {
828*ca632f55SGrant Likely 	struct pxa2xx_spi_chip *chip_info =
829*ca632f55SGrant Likely 			(struct pxa2xx_spi_chip *)spi->controller_data;
830*ca632f55SGrant Likely 	int bytes_per_word;
831*ca632f55SGrant Likely 	int burst_bytes;
832*ca632f55SGrant Likely 	int thresh_words;
833*ca632f55SGrant Likely 	int req_burst_size;
834*ca632f55SGrant Likely 	int retval = 0;
835*ca632f55SGrant Likely 
836*ca632f55SGrant Likely 	/* Set the threshold (in registers) to equal the same amount of data
837*ca632f55SGrant Likely 	 * as represented by burst size (in bytes).  The computation below
838*ca632f55SGrant Likely 	 * is (burst_size rounded up to nearest 8 byte, word or long word)
839*ca632f55SGrant Likely 	 * divided by (bytes/register); the tx threshold is the inverse of
840*ca632f55SGrant Likely 	 * the rx, so that there will always be enough data in the rx fifo
841*ca632f55SGrant Likely 	 * to satisfy a burst, and there will always be enough space in the
842*ca632f55SGrant Likely 	 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
843*ca632f55SGrant Likely 	 * there is not enough space), there must always remain enough empty
844*ca632f55SGrant Likely 	 * space in the rx fifo for any data loaded to the tx fifo.
845*ca632f55SGrant Likely 	 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
846*ca632f55SGrant Likely 	 * will be 8, or half the fifo;
847*ca632f55SGrant Likely 	 * The threshold can only be set to 2, 4 or 8, but not 16, because
848*ca632f55SGrant Likely 	 * to burst 16 to the tx fifo, the fifo would have to be empty;
849*ca632f55SGrant Likely 	 * however, the minimum fifo trigger level is 1, and the tx will
850*ca632f55SGrant Likely 	 * request service when the fifo is at this level, with only 15 spaces.
851*ca632f55SGrant Likely 	 */
852*ca632f55SGrant Likely 
853*ca632f55SGrant Likely 	/* find bytes/word */
854*ca632f55SGrant Likely 	if (bits_per_word <= 8)
855*ca632f55SGrant Likely 		bytes_per_word = 1;
856*ca632f55SGrant Likely 	else if (bits_per_word <= 16)
857*ca632f55SGrant Likely 		bytes_per_word = 2;
858*ca632f55SGrant Likely 	else
859*ca632f55SGrant Likely 		bytes_per_word = 4;
860*ca632f55SGrant Likely 
861*ca632f55SGrant Likely 	/* use struct pxa2xx_spi_chip->dma_burst_size if available */
862*ca632f55SGrant Likely 	if (chip_info)
863*ca632f55SGrant Likely 		req_burst_size = chip_info->dma_burst_size;
864*ca632f55SGrant Likely 	else {
865*ca632f55SGrant Likely 		switch (chip->dma_burst_size) {
866*ca632f55SGrant Likely 		default:
867*ca632f55SGrant Likely 			/* if the default burst size is not set,
868*ca632f55SGrant Likely 			 * do it now */
869*ca632f55SGrant Likely 			chip->dma_burst_size = DCMD_BURST8;
870*ca632f55SGrant Likely 		case DCMD_BURST8:
871*ca632f55SGrant Likely 			req_burst_size = 8;
872*ca632f55SGrant Likely 			break;
873*ca632f55SGrant Likely 		case DCMD_BURST16:
874*ca632f55SGrant Likely 			req_burst_size = 16;
875*ca632f55SGrant Likely 			break;
876*ca632f55SGrant Likely 		case DCMD_BURST32:
877*ca632f55SGrant Likely 			req_burst_size = 32;
878*ca632f55SGrant Likely 			break;
879*ca632f55SGrant Likely 		}
880*ca632f55SGrant Likely 	}
881*ca632f55SGrant Likely 	if (req_burst_size <= 8) {
882*ca632f55SGrant Likely 		*burst_code = DCMD_BURST8;
883*ca632f55SGrant Likely 		burst_bytes = 8;
884*ca632f55SGrant Likely 	} else if (req_burst_size <= 16) {
885*ca632f55SGrant Likely 		if (bytes_per_word == 1) {
886*ca632f55SGrant Likely 			/* don't burst more than 1/2 the fifo */
887*ca632f55SGrant Likely 			*burst_code = DCMD_BURST8;
888*ca632f55SGrant Likely 			burst_bytes = 8;
889*ca632f55SGrant Likely 			retval = 1;
890*ca632f55SGrant Likely 		} else {
891*ca632f55SGrant Likely 			*burst_code = DCMD_BURST16;
892*ca632f55SGrant Likely 			burst_bytes = 16;
893*ca632f55SGrant Likely 		}
894*ca632f55SGrant Likely 	} else {
895*ca632f55SGrant Likely 		if (bytes_per_word == 1) {
896*ca632f55SGrant Likely 			/* don't burst more than 1/2 the fifo */
897*ca632f55SGrant Likely 			*burst_code = DCMD_BURST8;
898*ca632f55SGrant Likely 			burst_bytes = 8;
899*ca632f55SGrant Likely 			retval = 1;
900*ca632f55SGrant Likely 		} else if (bytes_per_word == 2) {
901*ca632f55SGrant Likely 			/* don't burst more than 1/2 the fifo */
902*ca632f55SGrant Likely 			*burst_code = DCMD_BURST16;
903*ca632f55SGrant Likely 			burst_bytes = 16;
904*ca632f55SGrant Likely 			retval = 1;
905*ca632f55SGrant Likely 		} else {
906*ca632f55SGrant Likely 			*burst_code = DCMD_BURST32;
907*ca632f55SGrant Likely 			burst_bytes = 32;
908*ca632f55SGrant Likely 		}
909*ca632f55SGrant Likely 	}
910*ca632f55SGrant Likely 
911*ca632f55SGrant Likely 	thresh_words = burst_bytes / bytes_per_word;
912*ca632f55SGrant Likely 
913*ca632f55SGrant Likely 	/* thresh_words will be between 2 and 8 */
914*ca632f55SGrant Likely 	*threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
915*ca632f55SGrant Likely 			| (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
916*ca632f55SGrant Likely 
917*ca632f55SGrant Likely 	return retval;
918*ca632f55SGrant Likely }
919*ca632f55SGrant Likely 
920*ca632f55SGrant Likely static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate)
921*ca632f55SGrant Likely {
922*ca632f55SGrant Likely 	unsigned long ssp_clk = clk_get_rate(ssp->clk);
923*ca632f55SGrant Likely 
924*ca632f55SGrant Likely 	if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
925*ca632f55SGrant Likely 		return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
926*ca632f55SGrant Likely 	else
927*ca632f55SGrant Likely 		return ((ssp_clk / rate - 1) & 0xfff) << 8;
928*ca632f55SGrant Likely }
929*ca632f55SGrant Likely 
930*ca632f55SGrant Likely static void pump_transfers(unsigned long data)
931*ca632f55SGrant Likely {
932*ca632f55SGrant Likely 	struct driver_data *drv_data = (struct driver_data *)data;
933*ca632f55SGrant Likely 	struct spi_message *message = NULL;
934*ca632f55SGrant Likely 	struct spi_transfer *transfer = NULL;
935*ca632f55SGrant Likely 	struct spi_transfer *previous = NULL;
936*ca632f55SGrant Likely 	struct chip_data *chip = NULL;
937*ca632f55SGrant Likely 	struct ssp_device *ssp = drv_data->ssp;
938*ca632f55SGrant Likely 	void __iomem *reg = drv_data->ioaddr;
939*ca632f55SGrant Likely 	u32 clk_div = 0;
940*ca632f55SGrant Likely 	u8 bits = 0;
941*ca632f55SGrant Likely 	u32 speed = 0;
942*ca632f55SGrant Likely 	u32 cr0;
943*ca632f55SGrant Likely 	u32 cr1;
944*ca632f55SGrant Likely 	u32 dma_thresh = drv_data->cur_chip->dma_threshold;
945*ca632f55SGrant Likely 	u32 dma_burst = drv_data->cur_chip->dma_burst_size;
946*ca632f55SGrant Likely 
947*ca632f55SGrant Likely 	/* Get current state information */
948*ca632f55SGrant Likely 	message = drv_data->cur_msg;
949*ca632f55SGrant Likely 	transfer = drv_data->cur_transfer;
950*ca632f55SGrant Likely 	chip = drv_data->cur_chip;
951*ca632f55SGrant Likely 
952*ca632f55SGrant Likely 	/* Handle for abort */
953*ca632f55SGrant Likely 	if (message->state == ERROR_STATE) {
954*ca632f55SGrant Likely 		message->status = -EIO;
955*ca632f55SGrant Likely 		giveback(drv_data);
956*ca632f55SGrant Likely 		return;
957*ca632f55SGrant Likely 	}
958*ca632f55SGrant Likely 
959*ca632f55SGrant Likely 	/* Handle end of message */
960*ca632f55SGrant Likely 	if (message->state == DONE_STATE) {
961*ca632f55SGrant Likely 		message->status = 0;
962*ca632f55SGrant Likely 		giveback(drv_data);
963*ca632f55SGrant Likely 		return;
964*ca632f55SGrant Likely 	}
965*ca632f55SGrant Likely 
966*ca632f55SGrant Likely 	/* Delay if requested at end of transfer before CS change */
967*ca632f55SGrant Likely 	if (message->state == RUNNING_STATE) {
968*ca632f55SGrant Likely 		previous = list_entry(transfer->transfer_list.prev,
969*ca632f55SGrant Likely 					struct spi_transfer,
970*ca632f55SGrant Likely 					transfer_list);
971*ca632f55SGrant Likely 		if (previous->delay_usecs)
972*ca632f55SGrant Likely 			udelay(previous->delay_usecs);
973*ca632f55SGrant Likely 
974*ca632f55SGrant Likely 		/* Drop chip select only if cs_change is requested */
975*ca632f55SGrant Likely 		if (previous->cs_change)
976*ca632f55SGrant Likely 			cs_deassert(drv_data);
977*ca632f55SGrant Likely 	}
978*ca632f55SGrant Likely 
979*ca632f55SGrant Likely 	/* Check for transfers that need multiple DMA segments */
980*ca632f55SGrant Likely 	if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
981*ca632f55SGrant Likely 
982*ca632f55SGrant Likely 		/* reject already-mapped transfers; PIO won't always work */
983*ca632f55SGrant Likely 		if (message->is_dma_mapped
984*ca632f55SGrant Likely 				|| transfer->rx_dma || transfer->tx_dma) {
985*ca632f55SGrant Likely 			dev_err(&drv_data->pdev->dev,
986*ca632f55SGrant Likely 				"pump_transfers: mapped transfer length "
987*ca632f55SGrant Likely 				"of %u is greater than %d\n",
988*ca632f55SGrant Likely 				transfer->len, MAX_DMA_LEN);
989*ca632f55SGrant Likely 			message->status = -EINVAL;
990*ca632f55SGrant Likely 			giveback(drv_data);
991*ca632f55SGrant Likely 			return;
992*ca632f55SGrant Likely 		}
993*ca632f55SGrant Likely 
994*ca632f55SGrant Likely 		/* warn ... we force this to PIO mode */
995*ca632f55SGrant Likely 		if (printk_ratelimit())
996*ca632f55SGrant Likely 			dev_warn(&message->spi->dev, "pump_transfers: "
997*ca632f55SGrant Likely 				"DMA disabled for transfer length %ld "
998*ca632f55SGrant Likely 				"greater than %d\n",
999*ca632f55SGrant Likely 				(long)drv_data->len, MAX_DMA_LEN);
1000*ca632f55SGrant Likely 	}
1001*ca632f55SGrant Likely 
1002*ca632f55SGrant Likely 	/* Setup the transfer state based on the type of transfer */
1003*ca632f55SGrant Likely 	if (flush(drv_data) == 0) {
1004*ca632f55SGrant Likely 		dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
1005*ca632f55SGrant Likely 		message->status = -EIO;
1006*ca632f55SGrant Likely 		giveback(drv_data);
1007*ca632f55SGrant Likely 		return;
1008*ca632f55SGrant Likely 	}
1009*ca632f55SGrant Likely 	drv_data->n_bytes = chip->n_bytes;
1010*ca632f55SGrant Likely 	drv_data->dma_width = chip->dma_width;
1011*ca632f55SGrant Likely 	drv_data->tx = (void *)transfer->tx_buf;
1012*ca632f55SGrant Likely 	drv_data->tx_end = drv_data->tx + transfer->len;
1013*ca632f55SGrant Likely 	drv_data->rx = transfer->rx_buf;
1014*ca632f55SGrant Likely 	drv_data->rx_end = drv_data->rx + transfer->len;
1015*ca632f55SGrant Likely 	drv_data->rx_dma = transfer->rx_dma;
1016*ca632f55SGrant Likely 	drv_data->tx_dma = transfer->tx_dma;
1017*ca632f55SGrant Likely 	drv_data->len = transfer->len & DCMD_LENGTH;
1018*ca632f55SGrant Likely 	drv_data->write = drv_data->tx ? chip->write : null_writer;
1019*ca632f55SGrant Likely 	drv_data->read = drv_data->rx ? chip->read : null_reader;
1020*ca632f55SGrant Likely 
1021*ca632f55SGrant Likely 	/* Change speed and bit per word on a per transfer */
1022*ca632f55SGrant Likely 	cr0 = chip->cr0;
1023*ca632f55SGrant Likely 	if (transfer->speed_hz || transfer->bits_per_word) {
1024*ca632f55SGrant Likely 
1025*ca632f55SGrant Likely 		bits = chip->bits_per_word;
1026*ca632f55SGrant Likely 		speed = chip->speed_hz;
1027*ca632f55SGrant Likely 
1028*ca632f55SGrant Likely 		if (transfer->speed_hz)
1029*ca632f55SGrant Likely 			speed = transfer->speed_hz;
1030*ca632f55SGrant Likely 
1031*ca632f55SGrant Likely 		if (transfer->bits_per_word)
1032*ca632f55SGrant Likely 			bits = transfer->bits_per_word;
1033*ca632f55SGrant Likely 
1034*ca632f55SGrant Likely 		clk_div = ssp_get_clk_div(ssp, speed);
1035*ca632f55SGrant Likely 
1036*ca632f55SGrant Likely 		if (bits <= 8) {
1037*ca632f55SGrant Likely 			drv_data->n_bytes = 1;
1038*ca632f55SGrant Likely 			drv_data->dma_width = DCMD_WIDTH1;
1039*ca632f55SGrant Likely 			drv_data->read = drv_data->read != null_reader ?
1040*ca632f55SGrant Likely 						u8_reader : null_reader;
1041*ca632f55SGrant Likely 			drv_data->write = drv_data->write != null_writer ?
1042*ca632f55SGrant Likely 						u8_writer : null_writer;
1043*ca632f55SGrant Likely 		} else if (bits <= 16) {
1044*ca632f55SGrant Likely 			drv_data->n_bytes = 2;
1045*ca632f55SGrant Likely 			drv_data->dma_width = DCMD_WIDTH2;
1046*ca632f55SGrant Likely 			drv_data->read = drv_data->read != null_reader ?
1047*ca632f55SGrant Likely 						u16_reader : null_reader;
1048*ca632f55SGrant Likely 			drv_data->write = drv_data->write != null_writer ?
1049*ca632f55SGrant Likely 						u16_writer : null_writer;
1050*ca632f55SGrant Likely 		} else if (bits <= 32) {
1051*ca632f55SGrant Likely 			drv_data->n_bytes = 4;
1052*ca632f55SGrant Likely 			drv_data->dma_width = DCMD_WIDTH4;
1053*ca632f55SGrant Likely 			drv_data->read = drv_data->read != null_reader ?
1054*ca632f55SGrant Likely 						u32_reader : null_reader;
1055*ca632f55SGrant Likely 			drv_data->write = drv_data->write != null_writer ?
1056*ca632f55SGrant Likely 						u32_writer : null_writer;
1057*ca632f55SGrant Likely 		}
1058*ca632f55SGrant Likely 		/* if bits/word is changed in dma mode, then must check the
1059*ca632f55SGrant Likely 		 * thresholds and burst also */
1060*ca632f55SGrant Likely 		if (chip->enable_dma) {
1061*ca632f55SGrant Likely 			if (set_dma_burst_and_threshold(chip, message->spi,
1062*ca632f55SGrant Likely 							bits, &dma_burst,
1063*ca632f55SGrant Likely 							&dma_thresh))
1064*ca632f55SGrant Likely 				if (printk_ratelimit())
1065*ca632f55SGrant Likely 					dev_warn(&message->spi->dev,
1066*ca632f55SGrant Likely 						"pump_transfers: "
1067*ca632f55SGrant Likely 						"DMA burst size reduced to "
1068*ca632f55SGrant Likely 						"match bits_per_word\n");
1069*ca632f55SGrant Likely 		}
1070*ca632f55SGrant Likely 
1071*ca632f55SGrant Likely 		cr0 = clk_div
1072*ca632f55SGrant Likely 			| SSCR0_Motorola
1073*ca632f55SGrant Likely 			| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
1074*ca632f55SGrant Likely 			| SSCR0_SSE
1075*ca632f55SGrant Likely 			| (bits > 16 ? SSCR0_EDSS : 0);
1076*ca632f55SGrant Likely 	}
1077*ca632f55SGrant Likely 
1078*ca632f55SGrant Likely 	message->state = RUNNING_STATE;
1079*ca632f55SGrant Likely 
1080*ca632f55SGrant Likely 	/* Try to map dma buffer and do a dma transfer if successful, but
1081*ca632f55SGrant Likely 	 * only if the length is non-zero and less than MAX_DMA_LEN.
1082*ca632f55SGrant Likely 	 *
1083*ca632f55SGrant Likely 	 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
1084*ca632f55SGrant Likely 	 * of PIO instead.  Care is needed above because the transfer may
1085*ca632f55SGrant Likely 	 * have have been passed with buffers that are already dma mapped.
1086*ca632f55SGrant Likely 	 * A zero-length transfer in PIO mode will not try to write/read
1087*ca632f55SGrant Likely 	 * to/from the buffers
1088*ca632f55SGrant Likely 	 *
1089*ca632f55SGrant Likely 	 * REVISIT large transfers are exactly where we most want to be
1090*ca632f55SGrant Likely 	 * using DMA.  If this happens much, split those transfers into
1091*ca632f55SGrant Likely 	 * multiple DMA segments rather than forcing PIO.
1092*ca632f55SGrant Likely 	 */
1093*ca632f55SGrant Likely 	drv_data->dma_mapped = 0;
1094*ca632f55SGrant Likely 	if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
1095*ca632f55SGrant Likely 		drv_data->dma_mapped = map_dma_buffers(drv_data);
1096*ca632f55SGrant Likely 	if (drv_data->dma_mapped) {
1097*ca632f55SGrant Likely 
1098*ca632f55SGrant Likely 		/* Ensure we have the correct interrupt handler */
1099*ca632f55SGrant Likely 		drv_data->transfer_handler = dma_transfer;
1100*ca632f55SGrant Likely 
1101*ca632f55SGrant Likely 		/* Setup rx DMA Channel */
1102*ca632f55SGrant Likely 		DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
1103*ca632f55SGrant Likely 		DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
1104*ca632f55SGrant Likely 		DTADR(drv_data->rx_channel) = drv_data->rx_dma;
1105*ca632f55SGrant Likely 		if (drv_data->rx == drv_data->null_dma_buf)
1106*ca632f55SGrant Likely 			/* No target address increment */
1107*ca632f55SGrant Likely 			DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
1108*ca632f55SGrant Likely 							| drv_data->dma_width
1109*ca632f55SGrant Likely 							| dma_burst
1110*ca632f55SGrant Likely 							| drv_data->len;
1111*ca632f55SGrant Likely 		else
1112*ca632f55SGrant Likely 			DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
1113*ca632f55SGrant Likely 							| DCMD_FLOWSRC
1114*ca632f55SGrant Likely 							| drv_data->dma_width
1115*ca632f55SGrant Likely 							| dma_burst
1116*ca632f55SGrant Likely 							| drv_data->len;
1117*ca632f55SGrant Likely 
1118*ca632f55SGrant Likely 		/* Setup tx DMA Channel */
1119*ca632f55SGrant Likely 		DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
1120*ca632f55SGrant Likely 		DSADR(drv_data->tx_channel) = drv_data->tx_dma;
1121*ca632f55SGrant Likely 		DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
1122*ca632f55SGrant Likely 		if (drv_data->tx == drv_data->null_dma_buf)
1123*ca632f55SGrant Likely 			/* No source address increment */
1124*ca632f55SGrant Likely 			DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
1125*ca632f55SGrant Likely 							| drv_data->dma_width
1126*ca632f55SGrant Likely 							| dma_burst
1127*ca632f55SGrant Likely 							| drv_data->len;
1128*ca632f55SGrant Likely 		else
1129*ca632f55SGrant Likely 			DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
1130*ca632f55SGrant Likely 							| DCMD_FLOWTRG
1131*ca632f55SGrant Likely 							| drv_data->dma_width
1132*ca632f55SGrant Likely 							| dma_burst
1133*ca632f55SGrant Likely 							| drv_data->len;
1134*ca632f55SGrant Likely 
1135*ca632f55SGrant Likely 		/* Enable dma end irqs on SSP to detect end of transfer */
1136*ca632f55SGrant Likely 		if (drv_data->ssp_type == PXA25x_SSP)
1137*ca632f55SGrant Likely 			DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
1138*ca632f55SGrant Likely 
1139*ca632f55SGrant Likely 		/* Clear status and start DMA engine */
1140*ca632f55SGrant Likely 		cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
1141*ca632f55SGrant Likely 		write_SSSR(drv_data->clear_sr, reg);
1142*ca632f55SGrant Likely 		DCSR(drv_data->rx_channel) |= DCSR_RUN;
1143*ca632f55SGrant Likely 		DCSR(drv_data->tx_channel) |= DCSR_RUN;
1144*ca632f55SGrant Likely 	} else {
1145*ca632f55SGrant Likely 		/* Ensure we have the correct interrupt handler	*/
1146*ca632f55SGrant Likely 		drv_data->transfer_handler = interrupt_transfer;
1147*ca632f55SGrant Likely 
1148*ca632f55SGrant Likely 		/* Clear status  */
1149*ca632f55SGrant Likely 		cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
1150*ca632f55SGrant Likely 		write_SSSR_CS(drv_data, drv_data->clear_sr);
1151*ca632f55SGrant Likely 	}
1152*ca632f55SGrant Likely 
1153*ca632f55SGrant Likely 	/* see if we need to reload the config registers */
1154*ca632f55SGrant Likely 	if ((read_SSCR0(reg) != cr0)
1155*ca632f55SGrant Likely 		|| (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
1156*ca632f55SGrant Likely 			(cr1 & SSCR1_CHANGE_MASK)) {
1157*ca632f55SGrant Likely 
1158*ca632f55SGrant Likely 		/* stop the SSP, and update the other bits */
1159*ca632f55SGrant Likely 		write_SSCR0(cr0 & ~SSCR0_SSE, reg);
1160*ca632f55SGrant Likely 		if (!pxa25x_ssp_comp(drv_data))
1161*ca632f55SGrant Likely 			write_SSTO(chip->timeout, reg);
1162*ca632f55SGrant Likely 		/* first set CR1 without interrupt and service enables */
1163*ca632f55SGrant Likely 		write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg);
1164*ca632f55SGrant Likely 		/* restart the SSP */
1165*ca632f55SGrant Likely 		write_SSCR0(cr0, reg);
1166*ca632f55SGrant Likely 
1167*ca632f55SGrant Likely 	} else {
1168*ca632f55SGrant Likely 		if (!pxa25x_ssp_comp(drv_data))
1169*ca632f55SGrant Likely 			write_SSTO(chip->timeout, reg);
1170*ca632f55SGrant Likely 	}
1171*ca632f55SGrant Likely 
1172*ca632f55SGrant Likely 	cs_assert(drv_data);
1173*ca632f55SGrant Likely 
1174*ca632f55SGrant Likely 	/* after chip select, release the data by enabling service
1175*ca632f55SGrant Likely 	 * requests and interrupts, without changing any mode bits */
1176*ca632f55SGrant Likely 	write_SSCR1(cr1, reg);
1177*ca632f55SGrant Likely }
1178*ca632f55SGrant Likely 
1179*ca632f55SGrant Likely static void pump_messages(struct work_struct *work)
1180*ca632f55SGrant Likely {
1181*ca632f55SGrant Likely 	struct driver_data *drv_data =
1182*ca632f55SGrant Likely 		container_of(work, struct driver_data, pump_messages);
1183*ca632f55SGrant Likely 	unsigned long flags;
1184*ca632f55SGrant Likely 
1185*ca632f55SGrant Likely 	/* Lock queue and check for queue work */
1186*ca632f55SGrant Likely 	spin_lock_irqsave(&drv_data->lock, flags);
1187*ca632f55SGrant Likely 	if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
1188*ca632f55SGrant Likely 		drv_data->busy = 0;
1189*ca632f55SGrant Likely 		spin_unlock_irqrestore(&drv_data->lock, flags);
1190*ca632f55SGrant Likely 		return;
1191*ca632f55SGrant Likely 	}
1192*ca632f55SGrant Likely 
1193*ca632f55SGrant Likely 	/* Make sure we are not already running a message */
1194*ca632f55SGrant Likely 	if (drv_data->cur_msg) {
1195*ca632f55SGrant Likely 		spin_unlock_irqrestore(&drv_data->lock, flags);
1196*ca632f55SGrant Likely 		return;
1197*ca632f55SGrant Likely 	}
1198*ca632f55SGrant Likely 
1199*ca632f55SGrant Likely 	/* Extract head of queue */
1200*ca632f55SGrant Likely 	drv_data->cur_msg = list_entry(drv_data->queue.next,
1201*ca632f55SGrant Likely 					struct spi_message, queue);
1202*ca632f55SGrant Likely 	list_del_init(&drv_data->cur_msg->queue);
1203*ca632f55SGrant Likely 
1204*ca632f55SGrant Likely 	/* Initial message state*/
1205*ca632f55SGrant Likely 	drv_data->cur_msg->state = START_STATE;
1206*ca632f55SGrant Likely 	drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
1207*ca632f55SGrant Likely 						struct spi_transfer,
1208*ca632f55SGrant Likely 						transfer_list);
1209*ca632f55SGrant Likely 
1210*ca632f55SGrant Likely 	/* prepare to setup the SSP, in pump_transfers, using the per
1211*ca632f55SGrant Likely 	 * chip configuration */
1212*ca632f55SGrant Likely 	drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
1213*ca632f55SGrant Likely 
1214*ca632f55SGrant Likely 	/* Mark as busy and launch transfers */
1215*ca632f55SGrant Likely 	tasklet_schedule(&drv_data->pump_transfers);
1216*ca632f55SGrant Likely 
1217*ca632f55SGrant Likely 	drv_data->busy = 1;
1218*ca632f55SGrant Likely 	spin_unlock_irqrestore(&drv_data->lock, flags);
1219*ca632f55SGrant Likely }
1220*ca632f55SGrant Likely 
1221*ca632f55SGrant Likely static int transfer(struct spi_device *spi, struct spi_message *msg)
1222*ca632f55SGrant Likely {
1223*ca632f55SGrant Likely 	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1224*ca632f55SGrant Likely 	unsigned long flags;
1225*ca632f55SGrant Likely 
1226*ca632f55SGrant Likely 	spin_lock_irqsave(&drv_data->lock, flags);
1227*ca632f55SGrant Likely 
1228*ca632f55SGrant Likely 	if (drv_data->run == QUEUE_STOPPED) {
1229*ca632f55SGrant Likely 		spin_unlock_irqrestore(&drv_data->lock, flags);
1230*ca632f55SGrant Likely 		return -ESHUTDOWN;
1231*ca632f55SGrant Likely 	}
1232*ca632f55SGrant Likely 
1233*ca632f55SGrant Likely 	msg->actual_length = 0;
1234*ca632f55SGrant Likely 	msg->status = -EINPROGRESS;
1235*ca632f55SGrant Likely 	msg->state = START_STATE;
1236*ca632f55SGrant Likely 
1237*ca632f55SGrant Likely 	list_add_tail(&msg->queue, &drv_data->queue);
1238*ca632f55SGrant Likely 
1239*ca632f55SGrant Likely 	if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
1240*ca632f55SGrant Likely 		queue_work(drv_data->workqueue, &drv_data->pump_messages);
1241*ca632f55SGrant Likely 
1242*ca632f55SGrant Likely 	spin_unlock_irqrestore(&drv_data->lock, flags);
1243*ca632f55SGrant Likely 
1244*ca632f55SGrant Likely 	return 0;
1245*ca632f55SGrant Likely }
1246*ca632f55SGrant Likely 
1247*ca632f55SGrant Likely static int setup_cs(struct spi_device *spi, struct chip_data *chip,
1248*ca632f55SGrant Likely 		    struct pxa2xx_spi_chip *chip_info)
1249*ca632f55SGrant Likely {
1250*ca632f55SGrant Likely 	int err = 0;
1251*ca632f55SGrant Likely 
1252*ca632f55SGrant Likely 	if (chip == NULL || chip_info == NULL)
1253*ca632f55SGrant Likely 		return 0;
1254*ca632f55SGrant Likely 
1255*ca632f55SGrant Likely 	/* NOTE: setup() can be called multiple times, possibly with
1256*ca632f55SGrant Likely 	 * different chip_info, release previously requested GPIO
1257*ca632f55SGrant Likely 	 */
1258*ca632f55SGrant Likely 	if (gpio_is_valid(chip->gpio_cs))
1259*ca632f55SGrant Likely 		gpio_free(chip->gpio_cs);
1260*ca632f55SGrant Likely 
1261*ca632f55SGrant Likely 	/* If (*cs_control) is provided, ignore GPIO chip select */
1262*ca632f55SGrant Likely 	if (chip_info->cs_control) {
1263*ca632f55SGrant Likely 		chip->cs_control = chip_info->cs_control;
1264*ca632f55SGrant Likely 		return 0;
1265*ca632f55SGrant Likely 	}
1266*ca632f55SGrant Likely 
1267*ca632f55SGrant Likely 	if (gpio_is_valid(chip_info->gpio_cs)) {
1268*ca632f55SGrant Likely 		err = gpio_request(chip_info->gpio_cs, "SPI_CS");
1269*ca632f55SGrant Likely 		if (err) {
1270*ca632f55SGrant Likely 			dev_err(&spi->dev, "failed to request chip select "
1271*ca632f55SGrant Likely 					"GPIO%d\n", chip_info->gpio_cs);
1272*ca632f55SGrant Likely 			return err;
1273*ca632f55SGrant Likely 		}
1274*ca632f55SGrant Likely 
1275*ca632f55SGrant Likely 		chip->gpio_cs = chip_info->gpio_cs;
1276*ca632f55SGrant Likely 		chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
1277*ca632f55SGrant Likely 
1278*ca632f55SGrant Likely 		err = gpio_direction_output(chip->gpio_cs,
1279*ca632f55SGrant Likely 					!chip->gpio_cs_inverted);
1280*ca632f55SGrant Likely 	}
1281*ca632f55SGrant Likely 
1282*ca632f55SGrant Likely 	return err;
1283*ca632f55SGrant Likely }
1284*ca632f55SGrant Likely 
1285*ca632f55SGrant Likely static int setup(struct spi_device *spi)
1286*ca632f55SGrant Likely {
1287*ca632f55SGrant Likely 	struct pxa2xx_spi_chip *chip_info = NULL;
1288*ca632f55SGrant Likely 	struct chip_data *chip;
1289*ca632f55SGrant Likely 	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1290*ca632f55SGrant Likely 	struct ssp_device *ssp = drv_data->ssp;
1291*ca632f55SGrant Likely 	unsigned int clk_div;
1292*ca632f55SGrant Likely 	uint tx_thres = TX_THRESH_DFLT;
1293*ca632f55SGrant Likely 	uint rx_thres = RX_THRESH_DFLT;
1294*ca632f55SGrant Likely 
1295*ca632f55SGrant Likely 	if (!pxa25x_ssp_comp(drv_data)
1296*ca632f55SGrant Likely 		&& (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
1297*ca632f55SGrant Likely 		dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1298*ca632f55SGrant Likely 				"b/w not 4-32 for type non-PXA25x_SSP\n",
1299*ca632f55SGrant Likely 				drv_data->ssp_type, spi->bits_per_word);
1300*ca632f55SGrant Likely 		return -EINVAL;
1301*ca632f55SGrant Likely 	} else if (pxa25x_ssp_comp(drv_data)
1302*ca632f55SGrant Likely 			&& (spi->bits_per_word < 4
1303*ca632f55SGrant Likely 				|| spi->bits_per_word > 16)) {
1304*ca632f55SGrant Likely 		dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1305*ca632f55SGrant Likely 				"b/w not 4-16 for type PXA25x_SSP\n",
1306*ca632f55SGrant Likely 				drv_data->ssp_type, spi->bits_per_word);
1307*ca632f55SGrant Likely 		return -EINVAL;
1308*ca632f55SGrant Likely 	}
1309*ca632f55SGrant Likely 
1310*ca632f55SGrant Likely 	/* Only alloc on first setup */
1311*ca632f55SGrant Likely 	chip = spi_get_ctldata(spi);
1312*ca632f55SGrant Likely 	if (!chip) {
1313*ca632f55SGrant Likely 		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1314*ca632f55SGrant Likely 		if (!chip) {
1315*ca632f55SGrant Likely 			dev_err(&spi->dev,
1316*ca632f55SGrant Likely 				"failed setup: can't allocate chip data\n");
1317*ca632f55SGrant Likely 			return -ENOMEM;
1318*ca632f55SGrant Likely 		}
1319*ca632f55SGrant Likely 
1320*ca632f55SGrant Likely 		if (drv_data->ssp_type == CE4100_SSP) {
1321*ca632f55SGrant Likely 			if (spi->chip_select > 4) {
1322*ca632f55SGrant Likely 				dev_err(&spi->dev, "failed setup: "
1323*ca632f55SGrant Likely 				"cs number must not be > 4.\n");
1324*ca632f55SGrant Likely 				kfree(chip);
1325*ca632f55SGrant Likely 				return -EINVAL;
1326*ca632f55SGrant Likely 			}
1327*ca632f55SGrant Likely 
1328*ca632f55SGrant Likely 			chip->frm = spi->chip_select;
1329*ca632f55SGrant Likely 		} else
1330*ca632f55SGrant Likely 			chip->gpio_cs = -1;
1331*ca632f55SGrant Likely 		chip->enable_dma = 0;
1332*ca632f55SGrant Likely 		chip->timeout = TIMOUT_DFLT;
1333*ca632f55SGrant Likely 		chip->dma_burst_size = drv_data->master_info->enable_dma ?
1334*ca632f55SGrant Likely 					DCMD_BURST8 : 0;
1335*ca632f55SGrant Likely 	}
1336*ca632f55SGrant Likely 
1337*ca632f55SGrant Likely 	/* protocol drivers may change the chip settings, so...
1338*ca632f55SGrant Likely 	 * if chip_info exists, use it */
1339*ca632f55SGrant Likely 	chip_info = spi->controller_data;
1340*ca632f55SGrant Likely 
1341*ca632f55SGrant Likely 	/* chip_info isn't always needed */
1342*ca632f55SGrant Likely 	chip->cr1 = 0;
1343*ca632f55SGrant Likely 	if (chip_info) {
1344*ca632f55SGrant Likely 		if (chip_info->timeout)
1345*ca632f55SGrant Likely 			chip->timeout = chip_info->timeout;
1346*ca632f55SGrant Likely 		if (chip_info->tx_threshold)
1347*ca632f55SGrant Likely 			tx_thres = chip_info->tx_threshold;
1348*ca632f55SGrant Likely 		if (chip_info->rx_threshold)
1349*ca632f55SGrant Likely 			rx_thres = chip_info->rx_threshold;
1350*ca632f55SGrant Likely 		chip->enable_dma = drv_data->master_info->enable_dma;
1351*ca632f55SGrant Likely 		chip->dma_threshold = 0;
1352*ca632f55SGrant Likely 		if (chip_info->enable_loopback)
1353*ca632f55SGrant Likely 			chip->cr1 = SSCR1_LBM;
1354*ca632f55SGrant Likely 	}
1355*ca632f55SGrant Likely 
1356*ca632f55SGrant Likely 	chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
1357*ca632f55SGrant Likely 			(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
1358*ca632f55SGrant Likely 
1359*ca632f55SGrant Likely 	/* set dma burst and threshold outside of chip_info path so that if
1360*ca632f55SGrant Likely 	 * chip_info goes away after setting chip->enable_dma, the
1361*ca632f55SGrant Likely 	 * burst and threshold can still respond to changes in bits_per_word */
1362*ca632f55SGrant Likely 	if (chip->enable_dma) {
1363*ca632f55SGrant Likely 		/* set up legal burst and threshold for dma */
1364*ca632f55SGrant Likely 		if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word,
1365*ca632f55SGrant Likely 						&chip->dma_burst_size,
1366*ca632f55SGrant Likely 						&chip->dma_threshold)) {
1367*ca632f55SGrant Likely 			dev_warn(&spi->dev, "in setup: DMA burst size reduced "
1368*ca632f55SGrant Likely 					"to match bits_per_word\n");
1369*ca632f55SGrant Likely 		}
1370*ca632f55SGrant Likely 	}
1371*ca632f55SGrant Likely 
1372*ca632f55SGrant Likely 	clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz);
1373*ca632f55SGrant Likely 	chip->speed_hz = spi->max_speed_hz;
1374*ca632f55SGrant Likely 
1375*ca632f55SGrant Likely 	chip->cr0 = clk_div
1376*ca632f55SGrant Likely 			| SSCR0_Motorola
1377*ca632f55SGrant Likely 			| SSCR0_DataSize(spi->bits_per_word > 16 ?
1378*ca632f55SGrant Likely 				spi->bits_per_word - 16 : spi->bits_per_word)
1379*ca632f55SGrant Likely 			| SSCR0_SSE
1380*ca632f55SGrant Likely 			| (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
1381*ca632f55SGrant Likely 	chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
1382*ca632f55SGrant Likely 	chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
1383*ca632f55SGrant Likely 			| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1384*ca632f55SGrant Likely 
1385*ca632f55SGrant Likely 	/* NOTE:  PXA25x_SSP _could_ use external clocking ... */
1386*ca632f55SGrant Likely 	if (!pxa25x_ssp_comp(drv_data))
1387*ca632f55SGrant Likely 		dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1388*ca632f55SGrant Likely 			clk_get_rate(ssp->clk)
1389*ca632f55SGrant Likely 				/ (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
1390*ca632f55SGrant Likely 			chip->enable_dma ? "DMA" : "PIO");
1391*ca632f55SGrant Likely 	else
1392*ca632f55SGrant Likely 		dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1393*ca632f55SGrant Likely 			clk_get_rate(ssp->clk) / 2
1394*ca632f55SGrant Likely 				/ (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1395*ca632f55SGrant Likely 			chip->enable_dma ? "DMA" : "PIO");
1396*ca632f55SGrant Likely 
1397*ca632f55SGrant Likely 	if (spi->bits_per_word <= 8) {
1398*ca632f55SGrant Likely 		chip->n_bytes = 1;
1399*ca632f55SGrant Likely 		chip->dma_width = DCMD_WIDTH1;
1400*ca632f55SGrant Likely 		chip->read = u8_reader;
1401*ca632f55SGrant Likely 		chip->write = u8_writer;
1402*ca632f55SGrant Likely 	} else if (spi->bits_per_word <= 16) {
1403*ca632f55SGrant Likely 		chip->n_bytes = 2;
1404*ca632f55SGrant Likely 		chip->dma_width = DCMD_WIDTH2;
1405*ca632f55SGrant Likely 		chip->read = u16_reader;
1406*ca632f55SGrant Likely 		chip->write = u16_writer;
1407*ca632f55SGrant Likely 	} else if (spi->bits_per_word <= 32) {
1408*ca632f55SGrant Likely 		chip->cr0 |= SSCR0_EDSS;
1409*ca632f55SGrant Likely 		chip->n_bytes = 4;
1410*ca632f55SGrant Likely 		chip->dma_width = DCMD_WIDTH4;
1411*ca632f55SGrant Likely 		chip->read = u32_reader;
1412*ca632f55SGrant Likely 		chip->write = u32_writer;
1413*ca632f55SGrant Likely 	} else {
1414*ca632f55SGrant Likely 		dev_err(&spi->dev, "invalid wordsize\n");
1415*ca632f55SGrant Likely 		return -ENODEV;
1416*ca632f55SGrant Likely 	}
1417*ca632f55SGrant Likely 	chip->bits_per_word = spi->bits_per_word;
1418*ca632f55SGrant Likely 
1419*ca632f55SGrant Likely 	spi_set_ctldata(spi, chip);
1420*ca632f55SGrant Likely 
1421*ca632f55SGrant Likely 	if (drv_data->ssp_type == CE4100_SSP)
1422*ca632f55SGrant Likely 		return 0;
1423*ca632f55SGrant Likely 
1424*ca632f55SGrant Likely 	return setup_cs(spi, chip, chip_info);
1425*ca632f55SGrant Likely }
1426*ca632f55SGrant Likely 
1427*ca632f55SGrant Likely static void cleanup(struct spi_device *spi)
1428*ca632f55SGrant Likely {
1429*ca632f55SGrant Likely 	struct chip_data *chip = spi_get_ctldata(spi);
1430*ca632f55SGrant Likely 	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1431*ca632f55SGrant Likely 
1432*ca632f55SGrant Likely 	if (!chip)
1433*ca632f55SGrant Likely 		return;
1434*ca632f55SGrant Likely 
1435*ca632f55SGrant Likely 	if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
1436*ca632f55SGrant Likely 		gpio_free(chip->gpio_cs);
1437*ca632f55SGrant Likely 
1438*ca632f55SGrant Likely 	kfree(chip);
1439*ca632f55SGrant Likely }
1440*ca632f55SGrant Likely 
1441*ca632f55SGrant Likely static int __devinit init_queue(struct driver_data *drv_data)
1442*ca632f55SGrant Likely {
1443*ca632f55SGrant Likely 	INIT_LIST_HEAD(&drv_data->queue);
1444*ca632f55SGrant Likely 	spin_lock_init(&drv_data->lock);
1445*ca632f55SGrant Likely 
1446*ca632f55SGrant Likely 	drv_data->run = QUEUE_STOPPED;
1447*ca632f55SGrant Likely 	drv_data->busy = 0;
1448*ca632f55SGrant Likely 
1449*ca632f55SGrant Likely 	tasklet_init(&drv_data->pump_transfers,
1450*ca632f55SGrant Likely 			pump_transfers,	(unsigned long)drv_data);
1451*ca632f55SGrant Likely 
1452*ca632f55SGrant Likely 	INIT_WORK(&drv_data->pump_messages, pump_messages);
1453*ca632f55SGrant Likely 	drv_data->workqueue = create_singlethread_workqueue(
1454*ca632f55SGrant Likely 				dev_name(drv_data->master->dev.parent));
1455*ca632f55SGrant Likely 	if (drv_data->workqueue == NULL)
1456*ca632f55SGrant Likely 		return -EBUSY;
1457*ca632f55SGrant Likely 
1458*ca632f55SGrant Likely 	return 0;
1459*ca632f55SGrant Likely }
1460*ca632f55SGrant Likely 
1461*ca632f55SGrant Likely static int start_queue(struct driver_data *drv_data)
1462*ca632f55SGrant Likely {
1463*ca632f55SGrant Likely 	unsigned long flags;
1464*ca632f55SGrant Likely 
1465*ca632f55SGrant Likely 	spin_lock_irqsave(&drv_data->lock, flags);
1466*ca632f55SGrant Likely 
1467*ca632f55SGrant Likely 	if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1468*ca632f55SGrant Likely 		spin_unlock_irqrestore(&drv_data->lock, flags);
1469*ca632f55SGrant Likely 		return -EBUSY;
1470*ca632f55SGrant Likely 	}
1471*ca632f55SGrant Likely 
1472*ca632f55SGrant Likely 	drv_data->run = QUEUE_RUNNING;
1473*ca632f55SGrant Likely 	drv_data->cur_msg = NULL;
1474*ca632f55SGrant Likely 	drv_data->cur_transfer = NULL;
1475*ca632f55SGrant Likely 	drv_data->cur_chip = NULL;
1476*ca632f55SGrant Likely 	spin_unlock_irqrestore(&drv_data->lock, flags);
1477*ca632f55SGrant Likely 
1478*ca632f55SGrant Likely 	queue_work(drv_data->workqueue, &drv_data->pump_messages);
1479*ca632f55SGrant Likely 
1480*ca632f55SGrant Likely 	return 0;
1481*ca632f55SGrant Likely }
1482*ca632f55SGrant Likely 
1483*ca632f55SGrant Likely static int stop_queue(struct driver_data *drv_data)
1484*ca632f55SGrant Likely {
1485*ca632f55SGrant Likely 	unsigned long flags;
1486*ca632f55SGrant Likely 	unsigned limit = 500;
1487*ca632f55SGrant Likely 	int status = 0;
1488*ca632f55SGrant Likely 
1489*ca632f55SGrant Likely 	spin_lock_irqsave(&drv_data->lock, flags);
1490*ca632f55SGrant Likely 
1491*ca632f55SGrant Likely 	/* This is a bit lame, but is optimized for the common execution path.
1492*ca632f55SGrant Likely 	 * A wait_queue on the drv_data->busy could be used, but then the common
1493*ca632f55SGrant Likely 	 * execution path (pump_messages) would be required to call wake_up or
1494*ca632f55SGrant Likely 	 * friends on every SPI message. Do this instead */
1495*ca632f55SGrant Likely 	drv_data->run = QUEUE_STOPPED;
1496*ca632f55SGrant Likely 	while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
1497*ca632f55SGrant Likely 		spin_unlock_irqrestore(&drv_data->lock, flags);
1498*ca632f55SGrant Likely 		msleep(10);
1499*ca632f55SGrant Likely 		spin_lock_irqsave(&drv_data->lock, flags);
1500*ca632f55SGrant Likely 	}
1501*ca632f55SGrant Likely 
1502*ca632f55SGrant Likely 	if (!list_empty(&drv_data->queue) || drv_data->busy)
1503*ca632f55SGrant Likely 		status = -EBUSY;
1504*ca632f55SGrant Likely 
1505*ca632f55SGrant Likely 	spin_unlock_irqrestore(&drv_data->lock, flags);
1506*ca632f55SGrant Likely 
1507*ca632f55SGrant Likely 	return status;
1508*ca632f55SGrant Likely }
1509*ca632f55SGrant Likely 
1510*ca632f55SGrant Likely static int destroy_queue(struct driver_data *drv_data)
1511*ca632f55SGrant Likely {
1512*ca632f55SGrant Likely 	int status;
1513*ca632f55SGrant Likely 
1514*ca632f55SGrant Likely 	status = stop_queue(drv_data);
1515*ca632f55SGrant Likely 	/* we are unloading the module or failing to load (only two calls
1516*ca632f55SGrant Likely 	 * to this routine), and neither call can handle a return value.
1517*ca632f55SGrant Likely 	 * However, destroy_workqueue calls flush_workqueue, and that will
1518*ca632f55SGrant Likely 	 * block until all work is done.  If the reason that stop_queue
1519*ca632f55SGrant Likely 	 * timed out is that the work will never finish, then it does no
1520*ca632f55SGrant Likely 	 * good to call destroy_workqueue, so return anyway. */
1521*ca632f55SGrant Likely 	if (status != 0)
1522*ca632f55SGrant Likely 		return status;
1523*ca632f55SGrant Likely 
1524*ca632f55SGrant Likely 	destroy_workqueue(drv_data->workqueue);
1525*ca632f55SGrant Likely 
1526*ca632f55SGrant Likely 	return 0;
1527*ca632f55SGrant Likely }
1528*ca632f55SGrant Likely 
1529*ca632f55SGrant Likely static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
1530*ca632f55SGrant Likely {
1531*ca632f55SGrant Likely 	struct device *dev = &pdev->dev;
1532*ca632f55SGrant Likely 	struct pxa2xx_spi_master *platform_info;
1533*ca632f55SGrant Likely 	struct spi_master *master;
1534*ca632f55SGrant Likely 	struct driver_data *drv_data;
1535*ca632f55SGrant Likely 	struct ssp_device *ssp;
1536*ca632f55SGrant Likely 	int status;
1537*ca632f55SGrant Likely 
1538*ca632f55SGrant Likely 	platform_info = dev->platform_data;
1539*ca632f55SGrant Likely 
1540*ca632f55SGrant Likely 	ssp = pxa_ssp_request(pdev->id, pdev->name);
1541*ca632f55SGrant Likely 	if (ssp == NULL) {
1542*ca632f55SGrant Likely 		dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id);
1543*ca632f55SGrant Likely 		return -ENODEV;
1544*ca632f55SGrant Likely 	}
1545*ca632f55SGrant Likely 
1546*ca632f55SGrant Likely 	/* Allocate master with space for drv_data and null dma buffer */
1547*ca632f55SGrant Likely 	master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1548*ca632f55SGrant Likely 	if (!master) {
1549*ca632f55SGrant Likely 		dev_err(&pdev->dev, "cannot alloc spi_master\n");
1550*ca632f55SGrant Likely 		pxa_ssp_free(ssp);
1551*ca632f55SGrant Likely 		return -ENOMEM;
1552*ca632f55SGrant Likely 	}
1553*ca632f55SGrant Likely 	drv_data = spi_master_get_devdata(master);
1554*ca632f55SGrant Likely 	drv_data->master = master;
1555*ca632f55SGrant Likely 	drv_data->master_info = platform_info;
1556*ca632f55SGrant Likely 	drv_data->pdev = pdev;
1557*ca632f55SGrant Likely 	drv_data->ssp = ssp;
1558*ca632f55SGrant Likely 
1559*ca632f55SGrant Likely 	master->dev.parent = &pdev->dev;
1560*ca632f55SGrant Likely 	master->dev.of_node = pdev->dev.of_node;
1561*ca632f55SGrant Likely 	/* the spi->mode bits understood by this driver: */
1562*ca632f55SGrant Likely 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1563*ca632f55SGrant Likely 
1564*ca632f55SGrant Likely 	master->bus_num = pdev->id;
1565*ca632f55SGrant Likely 	master->num_chipselect = platform_info->num_chipselect;
1566*ca632f55SGrant Likely 	master->dma_alignment = DMA_ALIGNMENT;
1567*ca632f55SGrant Likely 	master->cleanup = cleanup;
1568*ca632f55SGrant Likely 	master->setup = setup;
1569*ca632f55SGrant Likely 	master->transfer = transfer;
1570*ca632f55SGrant Likely 
1571*ca632f55SGrant Likely 	drv_data->ssp_type = ssp->type;
1572*ca632f55SGrant Likely 	drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
1573*ca632f55SGrant Likely 						sizeof(struct driver_data)), 8);
1574*ca632f55SGrant Likely 
1575*ca632f55SGrant Likely 	drv_data->ioaddr = ssp->mmio_base;
1576*ca632f55SGrant Likely 	drv_data->ssdr_physical = ssp->phys_base + SSDR;
1577*ca632f55SGrant Likely 	if (pxa25x_ssp_comp(drv_data)) {
1578*ca632f55SGrant Likely 		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
1579*ca632f55SGrant Likely 		drv_data->dma_cr1 = 0;
1580*ca632f55SGrant Likely 		drv_data->clear_sr = SSSR_ROR;
1581*ca632f55SGrant Likely 		drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
1582*ca632f55SGrant Likely 	} else {
1583*ca632f55SGrant Likely 		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1584*ca632f55SGrant Likely 		drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
1585*ca632f55SGrant Likely 		drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
1586*ca632f55SGrant Likely 		drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
1587*ca632f55SGrant Likely 	}
1588*ca632f55SGrant Likely 
1589*ca632f55SGrant Likely 	status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
1590*ca632f55SGrant Likely 			drv_data);
1591*ca632f55SGrant Likely 	if (status < 0) {
1592*ca632f55SGrant Likely 		dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
1593*ca632f55SGrant Likely 		goto out_error_master_alloc;
1594*ca632f55SGrant Likely 	}
1595*ca632f55SGrant Likely 
1596*ca632f55SGrant Likely 	/* Setup DMA if requested */
1597*ca632f55SGrant Likely 	drv_data->tx_channel = -1;
1598*ca632f55SGrant Likely 	drv_data->rx_channel = -1;
1599*ca632f55SGrant Likely 	if (platform_info->enable_dma) {
1600*ca632f55SGrant Likely 
1601*ca632f55SGrant Likely 		/* Get two DMA channels	(rx and tx) */
1602*ca632f55SGrant Likely 		drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
1603*ca632f55SGrant Likely 							DMA_PRIO_HIGH,
1604*ca632f55SGrant Likely 							dma_handler,
1605*ca632f55SGrant Likely 							drv_data);
1606*ca632f55SGrant Likely 		if (drv_data->rx_channel < 0) {
1607*ca632f55SGrant Likely 			dev_err(dev, "problem (%d) requesting rx channel\n",
1608*ca632f55SGrant Likely 				drv_data->rx_channel);
1609*ca632f55SGrant Likely 			status = -ENODEV;
1610*ca632f55SGrant Likely 			goto out_error_irq_alloc;
1611*ca632f55SGrant Likely 		}
1612*ca632f55SGrant Likely 		drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
1613*ca632f55SGrant Likely 							DMA_PRIO_MEDIUM,
1614*ca632f55SGrant Likely 							dma_handler,
1615*ca632f55SGrant Likely 							drv_data);
1616*ca632f55SGrant Likely 		if (drv_data->tx_channel < 0) {
1617*ca632f55SGrant Likely 			dev_err(dev, "problem (%d) requesting tx channel\n",
1618*ca632f55SGrant Likely 				drv_data->tx_channel);
1619*ca632f55SGrant Likely 			status = -ENODEV;
1620*ca632f55SGrant Likely 			goto out_error_dma_alloc;
1621*ca632f55SGrant Likely 		}
1622*ca632f55SGrant Likely 
1623*ca632f55SGrant Likely 		DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
1624*ca632f55SGrant Likely 		DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
1625*ca632f55SGrant Likely 	}
1626*ca632f55SGrant Likely 
1627*ca632f55SGrant Likely 	/* Enable SOC clock */
1628*ca632f55SGrant Likely 	clk_enable(ssp->clk);
1629*ca632f55SGrant Likely 
1630*ca632f55SGrant Likely 	/* Load default SSP configuration */
1631*ca632f55SGrant Likely 	write_SSCR0(0, drv_data->ioaddr);
1632*ca632f55SGrant Likely 	write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
1633*ca632f55SGrant Likely 				SSCR1_TxTresh(TX_THRESH_DFLT),
1634*ca632f55SGrant Likely 				drv_data->ioaddr);
1635*ca632f55SGrant Likely 	write_SSCR0(SSCR0_SCR(2)
1636*ca632f55SGrant Likely 			| SSCR0_Motorola
1637*ca632f55SGrant Likely 			| SSCR0_DataSize(8),
1638*ca632f55SGrant Likely 			drv_data->ioaddr);
1639*ca632f55SGrant Likely 	if (!pxa25x_ssp_comp(drv_data))
1640*ca632f55SGrant Likely 		write_SSTO(0, drv_data->ioaddr);
1641*ca632f55SGrant Likely 	write_SSPSP(0, drv_data->ioaddr);
1642*ca632f55SGrant Likely 
1643*ca632f55SGrant Likely 	/* Initial and start queue */
1644*ca632f55SGrant Likely 	status = init_queue(drv_data);
1645*ca632f55SGrant Likely 	if (status != 0) {
1646*ca632f55SGrant Likely 		dev_err(&pdev->dev, "problem initializing queue\n");
1647*ca632f55SGrant Likely 		goto out_error_clock_enabled;
1648*ca632f55SGrant Likely 	}
1649*ca632f55SGrant Likely 	status = start_queue(drv_data);
1650*ca632f55SGrant Likely 	if (status != 0) {
1651*ca632f55SGrant Likely 		dev_err(&pdev->dev, "problem starting queue\n");
1652*ca632f55SGrant Likely 		goto out_error_clock_enabled;
1653*ca632f55SGrant Likely 	}
1654*ca632f55SGrant Likely 
1655*ca632f55SGrant Likely 	/* Register with the SPI framework */
1656*ca632f55SGrant Likely 	platform_set_drvdata(pdev, drv_data);
1657*ca632f55SGrant Likely 	status = spi_register_master(master);
1658*ca632f55SGrant Likely 	if (status != 0) {
1659*ca632f55SGrant Likely 		dev_err(&pdev->dev, "problem registering spi master\n");
1660*ca632f55SGrant Likely 		goto out_error_queue_alloc;
1661*ca632f55SGrant Likely 	}
1662*ca632f55SGrant Likely 
1663*ca632f55SGrant Likely 	return status;
1664*ca632f55SGrant Likely 
1665*ca632f55SGrant Likely out_error_queue_alloc:
1666*ca632f55SGrant Likely 	destroy_queue(drv_data);
1667*ca632f55SGrant Likely 
1668*ca632f55SGrant Likely out_error_clock_enabled:
1669*ca632f55SGrant Likely 	clk_disable(ssp->clk);
1670*ca632f55SGrant Likely 
1671*ca632f55SGrant Likely out_error_dma_alloc:
1672*ca632f55SGrant Likely 	if (drv_data->tx_channel != -1)
1673*ca632f55SGrant Likely 		pxa_free_dma(drv_data->tx_channel);
1674*ca632f55SGrant Likely 	if (drv_data->rx_channel != -1)
1675*ca632f55SGrant Likely 		pxa_free_dma(drv_data->rx_channel);
1676*ca632f55SGrant Likely 
1677*ca632f55SGrant Likely out_error_irq_alloc:
1678*ca632f55SGrant Likely 	free_irq(ssp->irq, drv_data);
1679*ca632f55SGrant Likely 
1680*ca632f55SGrant Likely out_error_master_alloc:
1681*ca632f55SGrant Likely 	spi_master_put(master);
1682*ca632f55SGrant Likely 	pxa_ssp_free(ssp);
1683*ca632f55SGrant Likely 	return status;
1684*ca632f55SGrant Likely }
1685*ca632f55SGrant Likely 
1686*ca632f55SGrant Likely static int pxa2xx_spi_remove(struct platform_device *pdev)
1687*ca632f55SGrant Likely {
1688*ca632f55SGrant Likely 	struct driver_data *drv_data = platform_get_drvdata(pdev);
1689*ca632f55SGrant Likely 	struct ssp_device *ssp;
1690*ca632f55SGrant Likely 	int status = 0;
1691*ca632f55SGrant Likely 
1692*ca632f55SGrant Likely 	if (!drv_data)
1693*ca632f55SGrant Likely 		return 0;
1694*ca632f55SGrant Likely 	ssp = drv_data->ssp;
1695*ca632f55SGrant Likely 
1696*ca632f55SGrant Likely 	/* Remove the queue */
1697*ca632f55SGrant Likely 	status = destroy_queue(drv_data);
1698*ca632f55SGrant Likely 	if (status != 0)
1699*ca632f55SGrant Likely 		/* the kernel does not check the return status of this
1700*ca632f55SGrant Likely 		 * this routine (mod->exit, within the kernel).  Therefore
1701*ca632f55SGrant Likely 		 * nothing is gained by returning from here, the module is
1702*ca632f55SGrant Likely 		 * going away regardless, and we should not leave any more
1703*ca632f55SGrant Likely 		 * resources allocated than necessary.  We cannot free the
1704*ca632f55SGrant Likely 		 * message memory in drv_data->queue, but we can release the
1705*ca632f55SGrant Likely 		 * resources below.  I think the kernel should honor -EBUSY
1706*ca632f55SGrant Likely 		 * returns but... */
1707*ca632f55SGrant Likely 		dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not "
1708*ca632f55SGrant Likely 			"complete, message memory not freed\n");
1709*ca632f55SGrant Likely 
1710*ca632f55SGrant Likely 	/* Disable the SSP at the peripheral and SOC level */
1711*ca632f55SGrant Likely 	write_SSCR0(0, drv_data->ioaddr);
1712*ca632f55SGrant Likely 	clk_disable(ssp->clk);
1713*ca632f55SGrant Likely 
1714*ca632f55SGrant Likely 	/* Release DMA */
1715*ca632f55SGrant Likely 	if (drv_data->master_info->enable_dma) {
1716*ca632f55SGrant Likely 		DRCMR(ssp->drcmr_rx) = 0;
1717*ca632f55SGrant Likely 		DRCMR(ssp->drcmr_tx) = 0;
1718*ca632f55SGrant Likely 		pxa_free_dma(drv_data->tx_channel);
1719*ca632f55SGrant Likely 		pxa_free_dma(drv_data->rx_channel);
1720*ca632f55SGrant Likely 	}
1721*ca632f55SGrant Likely 
1722*ca632f55SGrant Likely 	/* Release IRQ */
1723*ca632f55SGrant Likely 	free_irq(ssp->irq, drv_data);
1724*ca632f55SGrant Likely 
1725*ca632f55SGrant Likely 	/* Release SSP */
1726*ca632f55SGrant Likely 	pxa_ssp_free(ssp);
1727*ca632f55SGrant Likely 
1728*ca632f55SGrant Likely 	/* Disconnect from the SPI framework */
1729*ca632f55SGrant Likely 	spi_unregister_master(drv_data->master);
1730*ca632f55SGrant Likely 
1731*ca632f55SGrant Likely 	/* Prevent double remove */
1732*ca632f55SGrant Likely 	platform_set_drvdata(pdev, NULL);
1733*ca632f55SGrant Likely 
1734*ca632f55SGrant Likely 	return 0;
1735*ca632f55SGrant Likely }
1736*ca632f55SGrant Likely 
1737*ca632f55SGrant Likely static void pxa2xx_spi_shutdown(struct platform_device *pdev)
1738*ca632f55SGrant Likely {
1739*ca632f55SGrant Likely 	int status = 0;
1740*ca632f55SGrant Likely 
1741*ca632f55SGrant Likely 	if ((status = pxa2xx_spi_remove(pdev)) != 0)
1742*ca632f55SGrant Likely 		dev_err(&pdev->dev, "shutdown failed with %d\n", status);
1743*ca632f55SGrant Likely }
1744*ca632f55SGrant Likely 
1745*ca632f55SGrant Likely #ifdef CONFIG_PM
1746*ca632f55SGrant Likely static int pxa2xx_spi_suspend(struct device *dev)
1747*ca632f55SGrant Likely {
1748*ca632f55SGrant Likely 	struct driver_data *drv_data = dev_get_drvdata(dev);
1749*ca632f55SGrant Likely 	struct ssp_device *ssp = drv_data->ssp;
1750*ca632f55SGrant Likely 	int status = 0;
1751*ca632f55SGrant Likely 
1752*ca632f55SGrant Likely 	status = stop_queue(drv_data);
1753*ca632f55SGrant Likely 	if (status != 0)
1754*ca632f55SGrant Likely 		return status;
1755*ca632f55SGrant Likely 	write_SSCR0(0, drv_data->ioaddr);
1756*ca632f55SGrant Likely 	clk_disable(ssp->clk);
1757*ca632f55SGrant Likely 
1758*ca632f55SGrant Likely 	return 0;
1759*ca632f55SGrant Likely }
1760*ca632f55SGrant Likely 
1761*ca632f55SGrant Likely static int pxa2xx_spi_resume(struct device *dev)
1762*ca632f55SGrant Likely {
1763*ca632f55SGrant Likely 	struct driver_data *drv_data = dev_get_drvdata(dev);
1764*ca632f55SGrant Likely 	struct ssp_device *ssp = drv_data->ssp;
1765*ca632f55SGrant Likely 	int status = 0;
1766*ca632f55SGrant Likely 
1767*ca632f55SGrant Likely 	if (drv_data->rx_channel != -1)
1768*ca632f55SGrant Likely 		DRCMR(drv_data->ssp->drcmr_rx) =
1769*ca632f55SGrant Likely 			DRCMR_MAPVLD | drv_data->rx_channel;
1770*ca632f55SGrant Likely 	if (drv_data->tx_channel != -1)
1771*ca632f55SGrant Likely 		DRCMR(drv_data->ssp->drcmr_tx) =
1772*ca632f55SGrant Likely 			DRCMR_MAPVLD | drv_data->tx_channel;
1773*ca632f55SGrant Likely 
1774*ca632f55SGrant Likely 	/* Enable the SSP clock */
1775*ca632f55SGrant Likely 	clk_enable(ssp->clk);
1776*ca632f55SGrant Likely 
1777*ca632f55SGrant Likely 	/* Start the queue running */
1778*ca632f55SGrant Likely 	status = start_queue(drv_data);
1779*ca632f55SGrant Likely 	if (status != 0) {
1780*ca632f55SGrant Likely 		dev_err(dev, "problem starting queue (%d)\n", status);
1781*ca632f55SGrant Likely 		return status;
1782*ca632f55SGrant Likely 	}
1783*ca632f55SGrant Likely 
1784*ca632f55SGrant Likely 	return 0;
1785*ca632f55SGrant Likely }
1786*ca632f55SGrant Likely 
1787*ca632f55SGrant Likely static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1788*ca632f55SGrant Likely 	.suspend	= pxa2xx_spi_suspend,
1789*ca632f55SGrant Likely 	.resume		= pxa2xx_spi_resume,
1790*ca632f55SGrant Likely };
1791*ca632f55SGrant Likely #endif
1792*ca632f55SGrant Likely 
1793*ca632f55SGrant Likely static struct platform_driver driver = {
1794*ca632f55SGrant Likely 	.driver = {
1795*ca632f55SGrant Likely 		.name	= "pxa2xx-spi",
1796*ca632f55SGrant Likely 		.owner	= THIS_MODULE,
1797*ca632f55SGrant Likely #ifdef CONFIG_PM
1798*ca632f55SGrant Likely 		.pm	= &pxa2xx_spi_pm_ops,
1799*ca632f55SGrant Likely #endif
1800*ca632f55SGrant Likely 	},
1801*ca632f55SGrant Likely 	.probe = pxa2xx_spi_probe,
1802*ca632f55SGrant Likely 	.remove = pxa2xx_spi_remove,
1803*ca632f55SGrant Likely 	.shutdown = pxa2xx_spi_shutdown,
1804*ca632f55SGrant Likely };
1805*ca632f55SGrant Likely 
1806*ca632f55SGrant Likely static int __init pxa2xx_spi_init(void)
1807*ca632f55SGrant Likely {
1808*ca632f55SGrant Likely 	return platform_driver_register(&driver);
1809*ca632f55SGrant Likely }
1810*ca632f55SGrant Likely subsys_initcall(pxa2xx_spi_init);
1811*ca632f55SGrant Likely 
1812*ca632f55SGrant Likely static void __exit pxa2xx_spi_exit(void)
1813*ca632f55SGrant Likely {
1814*ca632f55SGrant Likely 	platform_driver_unregister(&driver);
1815*ca632f55SGrant Likely }
1816*ca632f55SGrant Likely module_exit(pxa2xx_spi_exit);
1817