xref: /openbmc/linux/drivers/spi/spi-pxa2xx.c (revision 4bce6fce)
1 /*
2  * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3  * Copyright (C) 2013, Intel Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/device.h>
19 #include <linux/ioport.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/interrupt.h>
23 #include <linux/kernel.h>
24 #include <linux/platform_device.h>
25 #include <linux/spi/pxa2xx_spi.h>
26 #include <linux/spi/spi.h>
27 #include <linux/delay.h>
28 #include <linux/gpio.h>
29 #include <linux/slab.h>
30 #include <linux/clk.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/acpi.h>
33 
34 #include "spi-pxa2xx.h"
35 
36 MODULE_AUTHOR("Stephen Street");
37 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
38 MODULE_LICENSE("GPL");
39 MODULE_ALIAS("platform:pxa2xx-spi");
40 
41 #define TIMOUT_DFLT		1000
42 
43 /*
44  * for testing SSCR1 changes that require SSP restart, basically
45  * everything except the service and interrupt enables, the pxa270 developer
46  * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
47  * list, but the PXA255 dev man says all bits without really meaning the
48  * service and interrupt enables
49  */
50 #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
51 				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
52 				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
53 				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
54 				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
55 				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
56 
57 #define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF	\
58 				| QUARK_X1000_SSCR1_EFWR	\
59 				| QUARK_X1000_SSCR1_RFT		\
60 				| QUARK_X1000_SSCR1_TFT		\
61 				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
62 
63 #define LPSS_RX_THRESH_DFLT	64
64 #define LPSS_TX_LOTHRESH_DFLT	160
65 #define LPSS_TX_HITHRESH_DFLT	224
66 
67 /* Offset from drv_data->lpss_base */
68 #define GENERAL_REG		0x08
69 #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
70 #define SSP_REG			0x0c
71 #define SPI_CS_CONTROL		0x18
72 #define SPI_CS_CONTROL_SW_MODE	BIT(0)
73 #define SPI_CS_CONTROL_CS_HIGH	BIT(1)
74 
75 static bool is_lpss_ssp(const struct driver_data *drv_data)
76 {
77 	return drv_data->ssp_type == LPSS_SSP;
78 }
79 
80 static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
81 {
82 	return drv_data->ssp_type == QUARK_X1000_SSP;
83 }
84 
85 static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
86 {
87 	switch (drv_data->ssp_type) {
88 	case QUARK_X1000_SSP:
89 		return QUARK_X1000_SSCR1_CHANGE_MASK;
90 	default:
91 		return SSCR1_CHANGE_MASK;
92 	}
93 }
94 
95 static u32
96 pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
97 {
98 	switch (drv_data->ssp_type) {
99 	case QUARK_X1000_SSP:
100 		return RX_THRESH_QUARK_X1000_DFLT;
101 	default:
102 		return RX_THRESH_DFLT;
103 	}
104 }
105 
106 static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
107 {
108 	u32 mask;
109 
110 	switch (drv_data->ssp_type) {
111 	case QUARK_X1000_SSP:
112 		mask = QUARK_X1000_SSSR_TFL_MASK;
113 		break;
114 	default:
115 		mask = SSSR_TFL_MASK;
116 		break;
117 	}
118 
119 	return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
120 }
121 
122 static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
123 				     u32 *sccr1_reg)
124 {
125 	u32 mask;
126 
127 	switch (drv_data->ssp_type) {
128 	case QUARK_X1000_SSP:
129 		mask = QUARK_X1000_SSCR1_RFT;
130 		break;
131 	default:
132 		mask = SSCR1_RFT;
133 		break;
134 	}
135 	*sccr1_reg &= ~mask;
136 }
137 
138 static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
139 				   u32 *sccr1_reg, u32 threshold)
140 {
141 	switch (drv_data->ssp_type) {
142 	case QUARK_X1000_SSP:
143 		*sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
144 		break;
145 	default:
146 		*sccr1_reg |= SSCR1_RxTresh(threshold);
147 		break;
148 	}
149 }
150 
151 static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
152 				  u32 clk_div, u8 bits)
153 {
154 	switch (drv_data->ssp_type) {
155 	case QUARK_X1000_SSP:
156 		return clk_div
157 			| QUARK_X1000_SSCR0_Motorola
158 			| QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
159 			| SSCR0_SSE;
160 	default:
161 		return clk_div
162 			| SSCR0_Motorola
163 			| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
164 			| SSCR0_SSE
165 			| (bits > 16 ? SSCR0_EDSS : 0);
166 	}
167 }
168 
169 /*
170  * Read and write LPSS SSP private registers. Caller must first check that
171  * is_lpss_ssp() returns true before these can be called.
172  */
173 static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
174 {
175 	WARN_ON(!drv_data->lpss_base);
176 	return readl(drv_data->lpss_base + offset);
177 }
178 
179 static void __lpss_ssp_write_priv(struct driver_data *drv_data,
180 				  unsigned offset, u32 value)
181 {
182 	WARN_ON(!drv_data->lpss_base);
183 	writel(value, drv_data->lpss_base + offset);
184 }
185 
186 /*
187  * lpss_ssp_setup - perform LPSS SSP specific setup
188  * @drv_data: pointer to the driver private data
189  *
190  * Perform LPSS SSP specific setup. This function must be called first if
191  * one is going to use LPSS SSP private registers.
192  */
193 static void lpss_ssp_setup(struct driver_data *drv_data)
194 {
195 	unsigned offset = 0x400;
196 	u32 value, orig;
197 
198 	/*
199 	 * Perform auto-detection of the LPSS SSP private registers. They
200 	 * can be either at 1k or 2k offset from the base address.
201 	 */
202 	orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
203 
204 	/* Test SPI_CS_CONTROL_SW_MODE bit enabling */
205 	value = orig | SPI_CS_CONTROL_SW_MODE;
206 	writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
207 	value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
208 	if (value != (orig | SPI_CS_CONTROL_SW_MODE)) {
209 		offset = 0x800;
210 		goto detection_done;
211 	}
212 
213 	orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
214 
215 	/* Test SPI_CS_CONTROL_SW_MODE bit disabling */
216 	value = orig & ~SPI_CS_CONTROL_SW_MODE;
217 	writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
218 	value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
219 	if (value != (orig & ~SPI_CS_CONTROL_SW_MODE)) {
220 		offset = 0x800;
221 		goto detection_done;
222 	}
223 
224 detection_done:
225 	/* Now set the LPSS base */
226 	drv_data->lpss_base = drv_data->ioaddr + offset;
227 
228 	/* Enable software chip select control */
229 	value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
230 	__lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
231 
232 	/* Enable multiblock DMA transfers */
233 	if (drv_data->master_info->enable_dma) {
234 		__lpss_ssp_write_priv(drv_data, SSP_REG, 1);
235 
236 		value = __lpss_ssp_read_priv(drv_data, GENERAL_REG);
237 		value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
238 		__lpss_ssp_write_priv(drv_data, GENERAL_REG, value);
239 	}
240 }
241 
242 static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
243 {
244 	u32 value;
245 
246 	value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
247 	if (enable)
248 		value &= ~SPI_CS_CONTROL_CS_HIGH;
249 	else
250 		value |= SPI_CS_CONTROL_CS_HIGH;
251 	__lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
252 }
253 
254 static void cs_assert(struct driver_data *drv_data)
255 {
256 	struct chip_data *chip = drv_data->cur_chip;
257 
258 	if (drv_data->ssp_type == CE4100_SSP) {
259 		pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
260 		return;
261 	}
262 
263 	if (chip->cs_control) {
264 		chip->cs_control(PXA2XX_CS_ASSERT);
265 		return;
266 	}
267 
268 	if (gpio_is_valid(chip->gpio_cs)) {
269 		gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
270 		return;
271 	}
272 
273 	if (is_lpss_ssp(drv_data))
274 		lpss_ssp_cs_control(drv_data, true);
275 }
276 
277 static void cs_deassert(struct driver_data *drv_data)
278 {
279 	struct chip_data *chip = drv_data->cur_chip;
280 
281 	if (drv_data->ssp_type == CE4100_SSP)
282 		return;
283 
284 	if (chip->cs_control) {
285 		chip->cs_control(PXA2XX_CS_DEASSERT);
286 		return;
287 	}
288 
289 	if (gpio_is_valid(chip->gpio_cs)) {
290 		gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
291 		return;
292 	}
293 
294 	if (is_lpss_ssp(drv_data))
295 		lpss_ssp_cs_control(drv_data, false);
296 }
297 
298 int pxa2xx_spi_flush(struct driver_data *drv_data)
299 {
300 	unsigned long limit = loops_per_jiffy << 1;
301 
302 	do {
303 		while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
304 			pxa2xx_spi_read(drv_data, SSDR);
305 	} while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
306 	write_SSSR_CS(drv_data, SSSR_ROR);
307 
308 	return limit;
309 }
310 
311 static int null_writer(struct driver_data *drv_data)
312 {
313 	u8 n_bytes = drv_data->n_bytes;
314 
315 	if (pxa2xx_spi_txfifo_full(drv_data)
316 		|| (drv_data->tx == drv_data->tx_end))
317 		return 0;
318 
319 	pxa2xx_spi_write(drv_data, SSDR, 0);
320 	drv_data->tx += n_bytes;
321 
322 	return 1;
323 }
324 
325 static int null_reader(struct driver_data *drv_data)
326 {
327 	u8 n_bytes = drv_data->n_bytes;
328 
329 	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
330 	       && (drv_data->rx < drv_data->rx_end)) {
331 		pxa2xx_spi_read(drv_data, SSDR);
332 		drv_data->rx += n_bytes;
333 	}
334 
335 	return drv_data->rx == drv_data->rx_end;
336 }
337 
338 static int u8_writer(struct driver_data *drv_data)
339 {
340 	if (pxa2xx_spi_txfifo_full(drv_data)
341 		|| (drv_data->tx == drv_data->tx_end))
342 		return 0;
343 
344 	pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
345 	++drv_data->tx;
346 
347 	return 1;
348 }
349 
350 static int u8_reader(struct driver_data *drv_data)
351 {
352 	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
353 	       && (drv_data->rx < drv_data->rx_end)) {
354 		*(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
355 		++drv_data->rx;
356 	}
357 
358 	return drv_data->rx == drv_data->rx_end;
359 }
360 
361 static int u16_writer(struct driver_data *drv_data)
362 {
363 	if (pxa2xx_spi_txfifo_full(drv_data)
364 		|| (drv_data->tx == drv_data->tx_end))
365 		return 0;
366 
367 	pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
368 	drv_data->tx += 2;
369 
370 	return 1;
371 }
372 
373 static int u16_reader(struct driver_data *drv_data)
374 {
375 	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
376 	       && (drv_data->rx < drv_data->rx_end)) {
377 		*(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
378 		drv_data->rx += 2;
379 	}
380 
381 	return drv_data->rx == drv_data->rx_end;
382 }
383 
384 static int u32_writer(struct driver_data *drv_data)
385 {
386 	if (pxa2xx_spi_txfifo_full(drv_data)
387 		|| (drv_data->tx == drv_data->tx_end))
388 		return 0;
389 
390 	pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
391 	drv_data->tx += 4;
392 
393 	return 1;
394 }
395 
396 static int u32_reader(struct driver_data *drv_data)
397 {
398 	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
399 	       && (drv_data->rx < drv_data->rx_end)) {
400 		*(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
401 		drv_data->rx += 4;
402 	}
403 
404 	return drv_data->rx == drv_data->rx_end;
405 }
406 
407 void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
408 {
409 	struct spi_message *msg = drv_data->cur_msg;
410 	struct spi_transfer *trans = drv_data->cur_transfer;
411 
412 	/* Move to next transfer */
413 	if (trans->transfer_list.next != &msg->transfers) {
414 		drv_data->cur_transfer =
415 			list_entry(trans->transfer_list.next,
416 					struct spi_transfer,
417 					transfer_list);
418 		return RUNNING_STATE;
419 	} else
420 		return DONE_STATE;
421 }
422 
423 /* caller already set message->status; dma and pio irqs are blocked */
424 static void giveback(struct driver_data *drv_data)
425 {
426 	struct spi_transfer* last_transfer;
427 	struct spi_message *msg;
428 
429 	msg = drv_data->cur_msg;
430 	drv_data->cur_msg = NULL;
431 	drv_data->cur_transfer = NULL;
432 
433 	last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
434 					transfer_list);
435 
436 	/* Delay if requested before any change in chip select */
437 	if (last_transfer->delay_usecs)
438 		udelay(last_transfer->delay_usecs);
439 
440 	/* Drop chip select UNLESS cs_change is true or we are returning
441 	 * a message with an error, or next message is for another chip
442 	 */
443 	if (!last_transfer->cs_change)
444 		cs_deassert(drv_data);
445 	else {
446 		struct spi_message *next_msg;
447 
448 		/* Holding of cs was hinted, but we need to make sure
449 		 * the next message is for the same chip.  Don't waste
450 		 * time with the following tests unless this was hinted.
451 		 *
452 		 * We cannot postpone this until pump_messages, because
453 		 * after calling msg->complete (below) the driver that
454 		 * sent the current message could be unloaded, which
455 		 * could invalidate the cs_control() callback...
456 		 */
457 
458 		/* get a pointer to the next message, if any */
459 		next_msg = spi_get_next_queued_message(drv_data->master);
460 
461 		/* see if the next and current messages point
462 		 * to the same chip
463 		 */
464 		if (next_msg && next_msg->spi != msg->spi)
465 			next_msg = NULL;
466 		if (!next_msg || msg->state == ERROR_STATE)
467 			cs_deassert(drv_data);
468 	}
469 
470 	drv_data->cur_chip = NULL;
471 	spi_finalize_current_message(drv_data->master);
472 }
473 
474 static void reset_sccr1(struct driver_data *drv_data)
475 {
476 	struct chip_data *chip = drv_data->cur_chip;
477 	u32 sccr1_reg;
478 
479 	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
480 	sccr1_reg &= ~SSCR1_RFT;
481 	sccr1_reg |= chip->threshold;
482 	pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
483 }
484 
485 static void int_error_stop(struct driver_data *drv_data, const char* msg)
486 {
487 	/* Stop and reset SSP */
488 	write_SSSR_CS(drv_data, drv_data->clear_sr);
489 	reset_sccr1(drv_data);
490 	if (!pxa25x_ssp_comp(drv_data))
491 		pxa2xx_spi_write(drv_data, SSTO, 0);
492 	pxa2xx_spi_flush(drv_data);
493 	pxa2xx_spi_write(drv_data, SSCR0,
494 			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
495 
496 	dev_err(&drv_data->pdev->dev, "%s\n", msg);
497 
498 	drv_data->cur_msg->state = ERROR_STATE;
499 	tasklet_schedule(&drv_data->pump_transfers);
500 }
501 
502 static void int_transfer_complete(struct driver_data *drv_data)
503 {
504 	/* Stop SSP */
505 	write_SSSR_CS(drv_data, drv_data->clear_sr);
506 	reset_sccr1(drv_data);
507 	if (!pxa25x_ssp_comp(drv_data))
508 		pxa2xx_spi_write(drv_data, SSTO, 0);
509 
510 	/* Update total byte transferred return count actual bytes read */
511 	drv_data->cur_msg->actual_length += drv_data->len -
512 				(drv_data->rx_end - drv_data->rx);
513 
514 	/* Transfer delays and chip select release are
515 	 * handled in pump_transfers or giveback
516 	 */
517 
518 	/* Move to next transfer */
519 	drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
520 
521 	/* Schedule transfer tasklet */
522 	tasklet_schedule(&drv_data->pump_transfers);
523 }
524 
525 static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
526 {
527 	u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
528 		       drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
529 
530 	u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
531 
532 	if (irq_status & SSSR_ROR) {
533 		int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
534 		return IRQ_HANDLED;
535 	}
536 
537 	if (irq_status & SSSR_TINT) {
538 		pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
539 		if (drv_data->read(drv_data)) {
540 			int_transfer_complete(drv_data);
541 			return IRQ_HANDLED;
542 		}
543 	}
544 
545 	/* Drain rx fifo, Fill tx fifo and prevent overruns */
546 	do {
547 		if (drv_data->read(drv_data)) {
548 			int_transfer_complete(drv_data);
549 			return IRQ_HANDLED;
550 		}
551 	} while (drv_data->write(drv_data));
552 
553 	if (drv_data->read(drv_data)) {
554 		int_transfer_complete(drv_data);
555 		return IRQ_HANDLED;
556 	}
557 
558 	if (drv_data->tx == drv_data->tx_end) {
559 		u32 bytes_left;
560 		u32 sccr1_reg;
561 
562 		sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
563 		sccr1_reg &= ~SSCR1_TIE;
564 
565 		/*
566 		 * PXA25x_SSP has no timeout, set up rx threshould for the
567 		 * remaining RX bytes.
568 		 */
569 		if (pxa25x_ssp_comp(drv_data)) {
570 			u32 rx_thre;
571 
572 			pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
573 
574 			bytes_left = drv_data->rx_end - drv_data->rx;
575 			switch (drv_data->n_bytes) {
576 			case 4:
577 				bytes_left >>= 1;
578 			case 2:
579 				bytes_left >>= 1;
580 			}
581 
582 			rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
583 			if (rx_thre > bytes_left)
584 				rx_thre = bytes_left;
585 
586 			pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
587 		}
588 		pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
589 	}
590 
591 	/* We did something */
592 	return IRQ_HANDLED;
593 }
594 
595 static irqreturn_t ssp_int(int irq, void *dev_id)
596 {
597 	struct driver_data *drv_data = dev_id;
598 	u32 sccr1_reg;
599 	u32 mask = drv_data->mask_sr;
600 	u32 status;
601 
602 	/*
603 	 * The IRQ might be shared with other peripherals so we must first
604 	 * check that are we RPM suspended or not. If we are we assume that
605 	 * the IRQ was not for us (we shouldn't be RPM suspended when the
606 	 * interrupt is enabled).
607 	 */
608 	if (pm_runtime_suspended(&drv_data->pdev->dev))
609 		return IRQ_NONE;
610 
611 	/*
612 	 * If the device is not yet in RPM suspended state and we get an
613 	 * interrupt that is meant for another device, check if status bits
614 	 * are all set to one. That means that the device is already
615 	 * powered off.
616 	 */
617 	status = pxa2xx_spi_read(drv_data, SSSR);
618 	if (status == ~0)
619 		return IRQ_NONE;
620 
621 	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
622 
623 	/* Ignore possible writes if we don't need to write */
624 	if (!(sccr1_reg & SSCR1_TIE))
625 		mask &= ~SSSR_TFS;
626 
627 	if (!(status & mask))
628 		return IRQ_NONE;
629 
630 	if (!drv_data->cur_msg) {
631 
632 		pxa2xx_spi_write(drv_data, SSCR0,
633 				 pxa2xx_spi_read(drv_data, SSCR0)
634 				 & ~SSCR0_SSE);
635 		pxa2xx_spi_write(drv_data, SSCR1,
636 				 pxa2xx_spi_read(drv_data, SSCR1)
637 				 & ~drv_data->int_cr1);
638 		if (!pxa25x_ssp_comp(drv_data))
639 			pxa2xx_spi_write(drv_data, SSTO, 0);
640 		write_SSSR_CS(drv_data, drv_data->clear_sr);
641 
642 		dev_err(&drv_data->pdev->dev,
643 			"bad message state in interrupt handler\n");
644 
645 		/* Never fail */
646 		return IRQ_HANDLED;
647 	}
648 
649 	return drv_data->transfer_handler(drv_data);
650 }
651 
652 /*
653  * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply
654  * input frequency by fractions of 2^24. It also has a divider by 5.
655  *
656  * There are formulas to get baud rate value for given input frequency and
657  * divider parameters, such as DDS_CLK_RATE and SCR:
658  *
659  * Fsys = 200MHz
660  *
661  * Fssp = Fsys * DDS_CLK_RATE / 2^24			(1)
662  * Baud rate = Fsclk = Fssp / (2 * (SCR + 1))		(2)
663  *
664  * DDS_CLK_RATE either 2^n or 2^n / 5.
665  * SCR is in range 0 .. 255
666  *
667  * Divisor = 5^i * 2^j * 2 * k
668  *       i = [0, 1]      i = 1 iff j = 0 or j > 3
669  *       j = [0, 23]     j = 0 iff i = 1
670  *       k = [1, 256]
671  * Special case: j = 0, i = 1: Divisor = 2 / 5
672  *
673  * Accordingly to the specification the recommended values for DDS_CLK_RATE
674  * are:
675  *	Case 1:		2^n, n = [0, 23]
676  *	Case 2:		2^24 * 2 / 5 (0x666666)
677  *	Case 3:		less than or equal to 2^24 / 5 / 16 (0x33333)
678  *
679  * In all cases the lowest possible value is better.
680  *
681  * The function calculates parameters for all cases and chooses the one closest
682  * to the asked baud rate.
683  */
684 static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
685 {
686 	unsigned long xtal = 200000000;
687 	unsigned long fref = xtal / 2;		/* mandatory division by 2,
688 						   see (2) */
689 						/* case 3 */
690 	unsigned long fref1 = fref / 2;		/* case 1 */
691 	unsigned long fref2 = fref * 2 / 5;	/* case 2 */
692 	unsigned long scale;
693 	unsigned long q, q1, q2;
694 	long r, r1, r2;
695 	u32 mul;
696 
697 	/* Case 1 */
698 
699 	/* Set initial value for DDS_CLK_RATE */
700 	mul = (1 << 24) >> 1;
701 
702 	/* Calculate initial quot */
703 	q1 = DIV_ROUND_CLOSEST(fref1, rate);
704 
705 	/* Scale q1 if it's too big */
706 	if (q1 > 256) {
707 		/* Scale q1 to range [1, 512] */
708 		scale = fls_long(q1 - 1);
709 		if (scale > 9) {
710 			q1 >>= scale - 9;
711 			mul >>= scale - 9;
712 		}
713 
714 		/* Round the result if we have a remainder */
715 		q1 += q1 & 1;
716 	}
717 
718 	/* Decrease DDS_CLK_RATE as much as we can without loss in precision */
719 	scale = __ffs(q1);
720 	q1 >>= scale;
721 	mul >>= scale;
722 
723 	/* Get the remainder */
724 	r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate);
725 
726 	/* Case 2 */
727 
728 	q2 = DIV_ROUND_CLOSEST(fref2, rate);
729 	r2 = abs(fref2 / q2 - rate);
730 
731 	/*
732 	 * Choose the best between two: less remainder we have the better. We
733 	 * can't go case 2 if q2 is greater than 256 since SCR register can
734 	 * hold only values 0 .. 255.
735 	 */
736 	if (r2 >= r1 || q2 > 256) {
737 		/* case 1 is better */
738 		r = r1;
739 		q = q1;
740 	} else {
741 		/* case 2 is better */
742 		r = r2;
743 		q = q2;
744 		mul = (1 << 24) * 2 / 5;
745 	}
746 
747 	/* Check case 3 only If the divisor is big enough */
748 	if (fref / rate >= 80) {
749 		u64 fssp;
750 		u32 m;
751 
752 		/* Calculate initial quot */
753 		q1 = DIV_ROUND_CLOSEST(fref, rate);
754 		m = (1 << 24) / q1;
755 
756 		/* Get the remainder */
757 		fssp = (u64)fref * m;
758 		do_div(fssp, 1 << 24);
759 		r1 = abs(fssp - rate);
760 
761 		/* Choose this one if it suits better */
762 		if (r1 < r) {
763 			/* case 3 is better */
764 			q = 1;
765 			mul = m;
766 		}
767 	}
768 
769 	*dds = mul;
770 	return q - 1;
771 }
772 
773 static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
774 {
775 	unsigned long ssp_clk = drv_data->max_clk_rate;
776 	const struct ssp_device *ssp = drv_data->ssp;
777 
778 	rate = min_t(int, ssp_clk, rate);
779 
780 	if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
781 		return (ssp_clk / (2 * rate) - 1) & 0xff;
782 	else
783 		return (ssp_clk / rate - 1) & 0xfff;
784 }
785 
786 static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
787 					   struct chip_data *chip, int rate)
788 {
789 	unsigned int clk_div;
790 
791 	switch (drv_data->ssp_type) {
792 	case QUARK_X1000_SSP:
793 		clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate);
794 		break;
795 	default:
796 		clk_div = ssp_get_clk_div(drv_data, rate);
797 		break;
798 	}
799 	return clk_div << 8;
800 }
801 
802 static void pump_transfers(unsigned long data)
803 {
804 	struct driver_data *drv_data = (struct driver_data *)data;
805 	struct spi_message *message = NULL;
806 	struct spi_transfer *transfer = NULL;
807 	struct spi_transfer *previous = NULL;
808 	struct chip_data *chip = NULL;
809 	u32 clk_div = 0;
810 	u8 bits = 0;
811 	u32 speed = 0;
812 	u32 cr0;
813 	u32 cr1;
814 	u32 dma_thresh = drv_data->cur_chip->dma_threshold;
815 	u32 dma_burst = drv_data->cur_chip->dma_burst_size;
816 	u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
817 
818 	/* Get current state information */
819 	message = drv_data->cur_msg;
820 	transfer = drv_data->cur_transfer;
821 	chip = drv_data->cur_chip;
822 
823 	/* Handle for abort */
824 	if (message->state == ERROR_STATE) {
825 		message->status = -EIO;
826 		giveback(drv_data);
827 		return;
828 	}
829 
830 	/* Handle end of message */
831 	if (message->state == DONE_STATE) {
832 		message->status = 0;
833 		giveback(drv_data);
834 		return;
835 	}
836 
837 	/* Delay if requested at end of transfer before CS change */
838 	if (message->state == RUNNING_STATE) {
839 		previous = list_entry(transfer->transfer_list.prev,
840 					struct spi_transfer,
841 					transfer_list);
842 		if (previous->delay_usecs)
843 			udelay(previous->delay_usecs);
844 
845 		/* Drop chip select only if cs_change is requested */
846 		if (previous->cs_change)
847 			cs_deassert(drv_data);
848 	}
849 
850 	/* Check if we can DMA this transfer */
851 	if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) {
852 
853 		/* reject already-mapped transfers; PIO won't always work */
854 		if (message->is_dma_mapped
855 				|| transfer->rx_dma || transfer->tx_dma) {
856 			dev_err(&drv_data->pdev->dev,
857 				"pump_transfers: mapped transfer length of "
858 				"%u is greater than %d\n",
859 				transfer->len, MAX_DMA_LEN);
860 			message->status = -EINVAL;
861 			giveback(drv_data);
862 			return;
863 		}
864 
865 		/* warn ... we force this to PIO mode */
866 		dev_warn_ratelimited(&message->spi->dev,
867 				     "pump_transfers: DMA disabled for transfer length %ld "
868 				     "greater than %d\n",
869 				     (long)drv_data->len, MAX_DMA_LEN);
870 	}
871 
872 	/* Setup the transfer state based on the type of transfer */
873 	if (pxa2xx_spi_flush(drv_data) == 0) {
874 		dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
875 		message->status = -EIO;
876 		giveback(drv_data);
877 		return;
878 	}
879 	drv_data->n_bytes = chip->n_bytes;
880 	drv_data->tx = (void *)transfer->tx_buf;
881 	drv_data->tx_end = drv_data->tx + transfer->len;
882 	drv_data->rx = transfer->rx_buf;
883 	drv_data->rx_end = drv_data->rx + transfer->len;
884 	drv_data->rx_dma = transfer->rx_dma;
885 	drv_data->tx_dma = transfer->tx_dma;
886 	drv_data->len = transfer->len;
887 	drv_data->write = drv_data->tx ? chip->write : null_writer;
888 	drv_data->read = drv_data->rx ? chip->read : null_reader;
889 
890 	/* Change speed and bit per word on a per transfer */
891 	cr0 = chip->cr0;
892 	if (transfer->speed_hz || transfer->bits_per_word) {
893 
894 		bits = chip->bits_per_word;
895 		speed = chip->speed_hz;
896 
897 		if (transfer->speed_hz)
898 			speed = transfer->speed_hz;
899 
900 		if (transfer->bits_per_word)
901 			bits = transfer->bits_per_word;
902 
903 		clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, speed);
904 
905 		if (bits <= 8) {
906 			drv_data->n_bytes = 1;
907 			drv_data->read = drv_data->read != null_reader ?
908 						u8_reader : null_reader;
909 			drv_data->write = drv_data->write != null_writer ?
910 						u8_writer : null_writer;
911 		} else if (bits <= 16) {
912 			drv_data->n_bytes = 2;
913 			drv_data->read = drv_data->read != null_reader ?
914 						u16_reader : null_reader;
915 			drv_data->write = drv_data->write != null_writer ?
916 						u16_writer : null_writer;
917 		} else if (bits <= 32) {
918 			drv_data->n_bytes = 4;
919 			drv_data->read = drv_data->read != null_reader ?
920 						u32_reader : null_reader;
921 			drv_data->write = drv_data->write != null_writer ?
922 						u32_writer : null_writer;
923 		}
924 		/* if bits/word is changed in dma mode, then must check the
925 		 * thresholds and burst also */
926 		if (chip->enable_dma) {
927 			if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
928 							message->spi,
929 							bits, &dma_burst,
930 							&dma_thresh))
931 				dev_warn_ratelimited(&message->spi->dev,
932 						     "pump_transfers: DMA burst size reduced to match bits_per_word\n");
933 		}
934 
935 		cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
936 	}
937 
938 	message->state = RUNNING_STATE;
939 
940 	drv_data->dma_mapped = 0;
941 	if (pxa2xx_spi_dma_is_possible(drv_data->len))
942 		drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data);
943 	if (drv_data->dma_mapped) {
944 
945 		/* Ensure we have the correct interrupt handler */
946 		drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
947 
948 		pxa2xx_spi_dma_prepare(drv_data, dma_burst);
949 
950 		/* Clear status and start DMA engine */
951 		cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
952 		pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
953 
954 		pxa2xx_spi_dma_start(drv_data);
955 	} else {
956 		/* Ensure we have the correct interrupt handler	*/
957 		drv_data->transfer_handler = interrupt_transfer;
958 
959 		/* Clear status  */
960 		cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
961 		write_SSSR_CS(drv_data, drv_data->clear_sr);
962 	}
963 
964 	if (is_lpss_ssp(drv_data)) {
965 		if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
966 		    != chip->lpss_rx_threshold)
967 			pxa2xx_spi_write(drv_data, SSIRF,
968 					 chip->lpss_rx_threshold);
969 		if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
970 		    != chip->lpss_tx_threshold)
971 			pxa2xx_spi_write(drv_data, SSITF,
972 					 chip->lpss_tx_threshold);
973 	}
974 
975 	if (is_quark_x1000_ssp(drv_data) &&
976 	    (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
977 		pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
978 
979 	/* see if we need to reload the config registers */
980 	if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
981 	    || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
982 	    != (cr1 & change_mask)) {
983 		/* stop the SSP, and update the other bits */
984 		pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
985 		if (!pxa25x_ssp_comp(drv_data))
986 			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
987 		/* first set CR1 without interrupt and service enables */
988 		pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
989 		/* restart the SSP */
990 		pxa2xx_spi_write(drv_data, SSCR0, cr0);
991 
992 	} else {
993 		if (!pxa25x_ssp_comp(drv_data))
994 			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
995 	}
996 
997 	cs_assert(drv_data);
998 
999 	/* after chip select, release the data by enabling service
1000 	 * requests and interrupts, without changing any mode bits */
1001 	pxa2xx_spi_write(drv_data, SSCR1, cr1);
1002 }
1003 
1004 static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
1005 					   struct spi_message *msg)
1006 {
1007 	struct driver_data *drv_data = spi_master_get_devdata(master);
1008 
1009 	drv_data->cur_msg = msg;
1010 	/* Initial message state*/
1011 	drv_data->cur_msg->state = START_STATE;
1012 	drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
1013 						struct spi_transfer,
1014 						transfer_list);
1015 
1016 	/* prepare to setup the SSP, in pump_transfers, using the per
1017 	 * chip configuration */
1018 	drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
1019 
1020 	/* Mark as busy and launch transfers */
1021 	tasklet_schedule(&drv_data->pump_transfers);
1022 	return 0;
1023 }
1024 
1025 static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
1026 {
1027 	struct driver_data *drv_data = spi_master_get_devdata(master);
1028 
1029 	/* Disable the SSP now */
1030 	pxa2xx_spi_write(drv_data, SSCR0,
1031 			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
1032 
1033 	return 0;
1034 }
1035 
1036 static int setup_cs(struct spi_device *spi, struct chip_data *chip,
1037 		    struct pxa2xx_spi_chip *chip_info)
1038 {
1039 	int err = 0;
1040 
1041 	if (chip == NULL || chip_info == NULL)
1042 		return 0;
1043 
1044 	/* NOTE: setup() can be called multiple times, possibly with
1045 	 * different chip_info, release previously requested GPIO
1046 	 */
1047 	if (gpio_is_valid(chip->gpio_cs))
1048 		gpio_free(chip->gpio_cs);
1049 
1050 	/* If (*cs_control) is provided, ignore GPIO chip select */
1051 	if (chip_info->cs_control) {
1052 		chip->cs_control = chip_info->cs_control;
1053 		return 0;
1054 	}
1055 
1056 	if (gpio_is_valid(chip_info->gpio_cs)) {
1057 		err = gpio_request(chip_info->gpio_cs, "SPI_CS");
1058 		if (err) {
1059 			dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
1060 				chip_info->gpio_cs);
1061 			return err;
1062 		}
1063 
1064 		chip->gpio_cs = chip_info->gpio_cs;
1065 		chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
1066 
1067 		err = gpio_direction_output(chip->gpio_cs,
1068 					!chip->gpio_cs_inverted);
1069 	}
1070 
1071 	return err;
1072 }
1073 
1074 static int setup(struct spi_device *spi)
1075 {
1076 	struct pxa2xx_spi_chip *chip_info = NULL;
1077 	struct chip_data *chip;
1078 	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1079 	unsigned int clk_div;
1080 	uint tx_thres, tx_hi_thres, rx_thres;
1081 
1082 	switch (drv_data->ssp_type) {
1083 	case QUARK_X1000_SSP:
1084 		tx_thres = TX_THRESH_QUARK_X1000_DFLT;
1085 		tx_hi_thres = 0;
1086 		rx_thres = RX_THRESH_QUARK_X1000_DFLT;
1087 		break;
1088 	case LPSS_SSP:
1089 		tx_thres = LPSS_TX_LOTHRESH_DFLT;
1090 		tx_hi_thres = LPSS_TX_HITHRESH_DFLT;
1091 		rx_thres = LPSS_RX_THRESH_DFLT;
1092 		break;
1093 	default:
1094 		tx_thres = TX_THRESH_DFLT;
1095 		tx_hi_thres = 0;
1096 		rx_thres = RX_THRESH_DFLT;
1097 		break;
1098 	}
1099 
1100 	/* Only alloc on first setup */
1101 	chip = spi_get_ctldata(spi);
1102 	if (!chip) {
1103 		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1104 		if (!chip)
1105 			return -ENOMEM;
1106 
1107 		if (drv_data->ssp_type == CE4100_SSP) {
1108 			if (spi->chip_select > 4) {
1109 				dev_err(&spi->dev,
1110 					"failed setup: cs number must not be > 4.\n");
1111 				kfree(chip);
1112 				return -EINVAL;
1113 			}
1114 
1115 			chip->frm = spi->chip_select;
1116 		} else
1117 			chip->gpio_cs = -1;
1118 		chip->enable_dma = 0;
1119 		chip->timeout = TIMOUT_DFLT;
1120 	}
1121 
1122 	/* protocol drivers may change the chip settings, so...
1123 	 * if chip_info exists, use it */
1124 	chip_info = spi->controller_data;
1125 
1126 	/* chip_info isn't always needed */
1127 	chip->cr1 = 0;
1128 	if (chip_info) {
1129 		if (chip_info->timeout)
1130 			chip->timeout = chip_info->timeout;
1131 		if (chip_info->tx_threshold)
1132 			tx_thres = chip_info->tx_threshold;
1133 		if (chip_info->tx_hi_threshold)
1134 			tx_hi_thres = chip_info->tx_hi_threshold;
1135 		if (chip_info->rx_threshold)
1136 			rx_thres = chip_info->rx_threshold;
1137 		chip->enable_dma = drv_data->master_info->enable_dma;
1138 		chip->dma_threshold = 0;
1139 		if (chip_info->enable_loopback)
1140 			chip->cr1 = SSCR1_LBM;
1141 	} else if (ACPI_HANDLE(&spi->dev)) {
1142 		/*
1143 		 * Slave devices enumerated from ACPI namespace don't
1144 		 * usually have chip_info but we still might want to use
1145 		 * DMA with them.
1146 		 */
1147 		chip->enable_dma = drv_data->master_info->enable_dma;
1148 	}
1149 
1150 	chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
1151 	chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
1152 				| SSITF_TxHiThresh(tx_hi_thres);
1153 
1154 	/* set dma burst and threshold outside of chip_info path so that if
1155 	 * chip_info goes away after setting chip->enable_dma, the
1156 	 * burst and threshold can still respond to changes in bits_per_word */
1157 	if (chip->enable_dma) {
1158 		/* set up legal burst and threshold for dma */
1159 		if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
1160 						spi->bits_per_word,
1161 						&chip->dma_burst_size,
1162 						&chip->dma_threshold)) {
1163 			dev_warn(&spi->dev,
1164 				 "in setup: DMA burst size reduced to match bits_per_word\n");
1165 		}
1166 	}
1167 
1168 	clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, spi->max_speed_hz);
1169 	chip->speed_hz = spi->max_speed_hz;
1170 
1171 	chip->cr0 = pxa2xx_configure_sscr0(drv_data, clk_div,
1172 					   spi->bits_per_word);
1173 	switch (drv_data->ssp_type) {
1174 	case QUARK_X1000_SSP:
1175 		chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
1176 				   & QUARK_X1000_SSCR1_RFT)
1177 				   | (QUARK_X1000_SSCR1_TxTresh(tx_thres)
1178 				   & QUARK_X1000_SSCR1_TFT);
1179 		break;
1180 	default:
1181 		chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
1182 			(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
1183 		break;
1184 	}
1185 
1186 	chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
1187 	chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
1188 			| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1189 
1190 	if (spi->mode & SPI_LOOP)
1191 		chip->cr1 |= SSCR1_LBM;
1192 
1193 	/* NOTE:  PXA25x_SSP _could_ use external clocking ... */
1194 	if (!pxa25x_ssp_comp(drv_data))
1195 		dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1196 			drv_data->max_clk_rate
1197 				/ (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
1198 			chip->enable_dma ? "DMA" : "PIO");
1199 	else
1200 		dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1201 			drv_data->max_clk_rate / 2
1202 				/ (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1203 			chip->enable_dma ? "DMA" : "PIO");
1204 
1205 	if (spi->bits_per_word <= 8) {
1206 		chip->n_bytes = 1;
1207 		chip->read = u8_reader;
1208 		chip->write = u8_writer;
1209 	} else if (spi->bits_per_word <= 16) {
1210 		chip->n_bytes = 2;
1211 		chip->read = u16_reader;
1212 		chip->write = u16_writer;
1213 	} else if (spi->bits_per_word <= 32) {
1214 		if (!is_quark_x1000_ssp(drv_data))
1215 			chip->cr0 |= SSCR0_EDSS;
1216 		chip->n_bytes = 4;
1217 		chip->read = u32_reader;
1218 		chip->write = u32_writer;
1219 	}
1220 	chip->bits_per_word = spi->bits_per_word;
1221 
1222 	spi_set_ctldata(spi, chip);
1223 
1224 	if (drv_data->ssp_type == CE4100_SSP)
1225 		return 0;
1226 
1227 	return setup_cs(spi, chip, chip_info);
1228 }
1229 
1230 static void cleanup(struct spi_device *spi)
1231 {
1232 	struct chip_data *chip = spi_get_ctldata(spi);
1233 	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1234 
1235 	if (!chip)
1236 		return;
1237 
1238 	if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
1239 		gpio_free(chip->gpio_cs);
1240 
1241 	kfree(chip);
1242 }
1243 
1244 #ifdef CONFIG_ACPI
1245 static struct pxa2xx_spi_master *
1246 pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1247 {
1248 	struct pxa2xx_spi_master *pdata;
1249 	struct acpi_device *adev;
1250 	struct ssp_device *ssp;
1251 	struct resource *res;
1252 	int devid;
1253 
1254 	if (!ACPI_HANDLE(&pdev->dev) ||
1255 	    acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
1256 		return NULL;
1257 
1258 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1259 	if (!pdata)
1260 		return NULL;
1261 
1262 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1263 	if (!res)
1264 		return NULL;
1265 
1266 	ssp = &pdata->ssp;
1267 
1268 	ssp->phys_base = res->start;
1269 	ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
1270 	if (IS_ERR(ssp->mmio_base))
1271 		return NULL;
1272 
1273 	ssp->clk = devm_clk_get(&pdev->dev, NULL);
1274 	ssp->irq = platform_get_irq(pdev, 0);
1275 	ssp->type = LPSS_SSP;
1276 	ssp->pdev = pdev;
1277 
1278 	ssp->port_id = -1;
1279 	if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid))
1280 		ssp->port_id = devid;
1281 
1282 	pdata->num_chipselect = 1;
1283 	pdata->enable_dma = true;
1284 
1285 	return pdata;
1286 }
1287 
1288 static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1289 	{ "INT33C0", 0 },
1290 	{ "INT33C1", 0 },
1291 	{ "INT3430", 0 },
1292 	{ "INT3431", 0 },
1293 	{ "80860F0E", 0 },
1294 	{ "8086228E", 0 },
1295 	{ },
1296 };
1297 MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
1298 #else
1299 static inline struct pxa2xx_spi_master *
1300 pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1301 {
1302 	return NULL;
1303 }
1304 #endif
1305 
1306 static int pxa2xx_spi_probe(struct platform_device *pdev)
1307 {
1308 	struct device *dev = &pdev->dev;
1309 	struct pxa2xx_spi_master *platform_info;
1310 	struct spi_master *master;
1311 	struct driver_data *drv_data;
1312 	struct ssp_device *ssp;
1313 	int status;
1314 	u32 tmp;
1315 
1316 	platform_info = dev_get_platdata(dev);
1317 	if (!platform_info) {
1318 		platform_info = pxa2xx_spi_acpi_get_pdata(pdev);
1319 		if (!platform_info) {
1320 			dev_err(&pdev->dev, "missing platform data\n");
1321 			return -ENODEV;
1322 		}
1323 	}
1324 
1325 	ssp = pxa_ssp_request(pdev->id, pdev->name);
1326 	if (!ssp)
1327 		ssp = &platform_info->ssp;
1328 
1329 	if (!ssp->mmio_base) {
1330 		dev_err(&pdev->dev, "failed to get ssp\n");
1331 		return -ENODEV;
1332 	}
1333 
1334 	/* Allocate master with space for drv_data and null dma buffer */
1335 	master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1336 	if (!master) {
1337 		dev_err(&pdev->dev, "cannot alloc spi_master\n");
1338 		pxa_ssp_free(ssp);
1339 		return -ENOMEM;
1340 	}
1341 	drv_data = spi_master_get_devdata(master);
1342 	drv_data->master = master;
1343 	drv_data->master_info = platform_info;
1344 	drv_data->pdev = pdev;
1345 	drv_data->ssp = ssp;
1346 
1347 	master->dev.parent = &pdev->dev;
1348 	master->dev.of_node = pdev->dev.of_node;
1349 	/* the spi->mode bits understood by this driver: */
1350 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1351 
1352 	master->bus_num = ssp->port_id;
1353 	master->num_chipselect = platform_info->num_chipselect;
1354 	master->dma_alignment = DMA_ALIGNMENT;
1355 	master->cleanup = cleanup;
1356 	master->setup = setup;
1357 	master->transfer_one_message = pxa2xx_spi_transfer_one_message;
1358 	master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
1359 	master->auto_runtime_pm = true;
1360 
1361 	drv_data->ssp_type = ssp->type;
1362 	drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
1363 
1364 	drv_data->ioaddr = ssp->mmio_base;
1365 	drv_data->ssdr_physical = ssp->phys_base + SSDR;
1366 	if (pxa25x_ssp_comp(drv_data)) {
1367 		switch (drv_data->ssp_type) {
1368 		case QUARK_X1000_SSP:
1369 			master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1370 			break;
1371 		default:
1372 			master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1373 			break;
1374 		}
1375 
1376 		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
1377 		drv_data->dma_cr1 = 0;
1378 		drv_data->clear_sr = SSSR_ROR;
1379 		drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
1380 	} else {
1381 		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1382 		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1383 		drv_data->dma_cr1 = DEFAULT_DMA_CR1;
1384 		drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
1385 		drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
1386 	}
1387 
1388 	status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
1389 			drv_data);
1390 	if (status < 0) {
1391 		dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
1392 		goto out_error_master_alloc;
1393 	}
1394 
1395 	/* Setup DMA if requested */
1396 	drv_data->tx_channel = -1;
1397 	drv_data->rx_channel = -1;
1398 	if (platform_info->enable_dma) {
1399 		status = pxa2xx_spi_dma_setup(drv_data);
1400 		if (status) {
1401 			dev_dbg(dev, "no DMA channels available, using PIO\n");
1402 			platform_info->enable_dma = false;
1403 		}
1404 	}
1405 
1406 	/* Enable SOC clock */
1407 	clk_prepare_enable(ssp->clk);
1408 
1409 	drv_data->max_clk_rate = clk_get_rate(ssp->clk);
1410 
1411 	/* Load default SSP configuration */
1412 	pxa2xx_spi_write(drv_data, SSCR0, 0);
1413 	switch (drv_data->ssp_type) {
1414 	case QUARK_X1000_SSP:
1415 		tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
1416 		      | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
1417 		pxa2xx_spi_write(drv_data, SSCR1, tmp);
1418 
1419 		/* using the Motorola SPI protocol and use 8 bit frame */
1420 		pxa2xx_spi_write(drv_data, SSCR0,
1421 				 QUARK_X1000_SSCR0_Motorola
1422 				 | QUARK_X1000_SSCR0_DataSize(8));
1423 		break;
1424 	default:
1425 		tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
1426 		      SSCR1_TxTresh(TX_THRESH_DFLT);
1427 		pxa2xx_spi_write(drv_data, SSCR1, tmp);
1428 		tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
1429 		pxa2xx_spi_write(drv_data, SSCR0, tmp);
1430 		break;
1431 	}
1432 
1433 	if (!pxa25x_ssp_comp(drv_data))
1434 		pxa2xx_spi_write(drv_data, SSTO, 0);
1435 
1436 	if (!is_quark_x1000_ssp(drv_data))
1437 		pxa2xx_spi_write(drv_data, SSPSP, 0);
1438 
1439 	if (is_lpss_ssp(drv_data))
1440 		lpss_ssp_setup(drv_data);
1441 
1442 	tasklet_init(&drv_data->pump_transfers, pump_transfers,
1443 		     (unsigned long)drv_data);
1444 
1445 	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1446 	pm_runtime_use_autosuspend(&pdev->dev);
1447 	pm_runtime_set_active(&pdev->dev);
1448 	pm_runtime_enable(&pdev->dev);
1449 
1450 	/* Register with the SPI framework */
1451 	platform_set_drvdata(pdev, drv_data);
1452 	status = devm_spi_register_master(&pdev->dev, master);
1453 	if (status != 0) {
1454 		dev_err(&pdev->dev, "problem registering spi master\n");
1455 		goto out_error_clock_enabled;
1456 	}
1457 
1458 	return status;
1459 
1460 out_error_clock_enabled:
1461 	clk_disable_unprepare(ssp->clk);
1462 	pxa2xx_spi_dma_release(drv_data);
1463 	free_irq(ssp->irq, drv_data);
1464 
1465 out_error_master_alloc:
1466 	spi_master_put(master);
1467 	pxa_ssp_free(ssp);
1468 	return status;
1469 }
1470 
1471 static int pxa2xx_spi_remove(struct platform_device *pdev)
1472 {
1473 	struct driver_data *drv_data = platform_get_drvdata(pdev);
1474 	struct ssp_device *ssp;
1475 
1476 	if (!drv_data)
1477 		return 0;
1478 	ssp = drv_data->ssp;
1479 
1480 	pm_runtime_get_sync(&pdev->dev);
1481 
1482 	/* Disable the SSP at the peripheral and SOC level */
1483 	pxa2xx_spi_write(drv_data, SSCR0, 0);
1484 	clk_disable_unprepare(ssp->clk);
1485 
1486 	/* Release DMA */
1487 	if (drv_data->master_info->enable_dma)
1488 		pxa2xx_spi_dma_release(drv_data);
1489 
1490 	pm_runtime_put_noidle(&pdev->dev);
1491 	pm_runtime_disable(&pdev->dev);
1492 
1493 	/* Release IRQ */
1494 	free_irq(ssp->irq, drv_data);
1495 
1496 	/* Release SSP */
1497 	pxa_ssp_free(ssp);
1498 
1499 	return 0;
1500 }
1501 
1502 static void pxa2xx_spi_shutdown(struct platform_device *pdev)
1503 {
1504 	int status = 0;
1505 
1506 	if ((status = pxa2xx_spi_remove(pdev)) != 0)
1507 		dev_err(&pdev->dev, "shutdown failed with %d\n", status);
1508 }
1509 
1510 #ifdef CONFIG_PM_SLEEP
1511 static int pxa2xx_spi_suspend(struct device *dev)
1512 {
1513 	struct driver_data *drv_data = dev_get_drvdata(dev);
1514 	struct ssp_device *ssp = drv_data->ssp;
1515 	int status = 0;
1516 
1517 	status = spi_master_suspend(drv_data->master);
1518 	if (status != 0)
1519 		return status;
1520 	pxa2xx_spi_write(drv_data, SSCR0, 0);
1521 
1522 	if (!pm_runtime_suspended(dev))
1523 		clk_disable_unprepare(ssp->clk);
1524 
1525 	return 0;
1526 }
1527 
1528 static int pxa2xx_spi_resume(struct device *dev)
1529 {
1530 	struct driver_data *drv_data = dev_get_drvdata(dev);
1531 	struct ssp_device *ssp = drv_data->ssp;
1532 	int status = 0;
1533 
1534 	pxa2xx_spi_dma_resume(drv_data);
1535 
1536 	/* Enable the SSP clock */
1537 	if (!pm_runtime_suspended(dev))
1538 		clk_prepare_enable(ssp->clk);
1539 
1540 	/* Restore LPSS private register bits */
1541 	if (is_lpss_ssp(drv_data))
1542 		lpss_ssp_setup(drv_data);
1543 
1544 	/* Start the queue running */
1545 	status = spi_master_resume(drv_data->master);
1546 	if (status != 0) {
1547 		dev_err(dev, "problem starting queue (%d)\n", status);
1548 		return status;
1549 	}
1550 
1551 	return 0;
1552 }
1553 #endif
1554 
1555 #ifdef CONFIG_PM
1556 static int pxa2xx_spi_runtime_suspend(struct device *dev)
1557 {
1558 	struct driver_data *drv_data = dev_get_drvdata(dev);
1559 
1560 	clk_disable_unprepare(drv_data->ssp->clk);
1561 	return 0;
1562 }
1563 
1564 static int pxa2xx_spi_runtime_resume(struct device *dev)
1565 {
1566 	struct driver_data *drv_data = dev_get_drvdata(dev);
1567 
1568 	clk_prepare_enable(drv_data->ssp->clk);
1569 	return 0;
1570 }
1571 #endif
1572 
1573 static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1574 	SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
1575 	SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
1576 			   pxa2xx_spi_runtime_resume, NULL)
1577 };
1578 
1579 static struct platform_driver driver = {
1580 	.driver = {
1581 		.name	= "pxa2xx-spi",
1582 		.pm	= &pxa2xx_spi_pm_ops,
1583 		.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
1584 	},
1585 	.probe = pxa2xx_spi_probe,
1586 	.remove = pxa2xx_spi_remove,
1587 	.shutdown = pxa2xx_spi_shutdown,
1588 };
1589 
1590 static int __init pxa2xx_spi_init(void)
1591 {
1592 	return platform_driver_register(&driver);
1593 }
1594 subsys_initcall(pxa2xx_spi_init);
1595 
1596 static void __exit pxa2xx_spi_exit(void)
1597 {
1598 	platform_driver_unregister(&driver);
1599 }
1600 module_exit(pxa2xx_spi_exit);
1601