xref: /openbmc/linux/drivers/spi/spi-topcliff-pch.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * SPI bus driver for the Topcliff PCH used by Intel SoCs
4   *
5   * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
6   */
7  
8  #include <linux/delay.h>
9  #include <linux/pci.h>
10  #include <linux/wait.h>
11  #include <linux/spi/spi.h>
12  #include <linux/interrupt.h>
13  #include <linux/sched.h>
14  #include <linux/spi/spidev.h>
15  #include <linux/module.h>
16  #include <linux/device.h>
17  #include <linux/platform_device.h>
18  
19  #include <linux/dmaengine.h>
20  #include <linux/pch_dma.h>
21  
22  /* Register offsets */
23  #define PCH_SPCR		0x00	/* SPI control register */
24  #define PCH_SPBRR		0x04	/* SPI baud rate register */
25  #define PCH_SPSR		0x08	/* SPI status register */
26  #define PCH_SPDWR		0x0C	/* SPI write data register */
27  #define PCH_SPDRR		0x10	/* SPI read data register */
28  #define PCH_SSNXCR		0x18	/* SSN Expand Control Register */
29  #define PCH_SRST		0x1C	/* SPI reset register */
30  #define PCH_ADDRESS_SIZE	0x20
31  
32  #define PCH_SPSR_TFD		0x000007C0
33  #define PCH_SPSR_RFD		0x0000F800
34  
35  #define PCH_READABLE(x)		(((x) & PCH_SPSR_RFD)>>11)
36  #define PCH_WRITABLE(x)		(((x) & PCH_SPSR_TFD)>>6)
37  
38  #define PCH_RX_THOLD		7
39  #define PCH_RX_THOLD_MAX	15
40  
41  #define PCH_TX_THOLD		2
42  
43  #define PCH_MAX_BAUDRATE	5000000
44  #define PCH_MAX_FIFO_DEPTH	16
45  
46  #define STATUS_RUNNING		1
47  #define STATUS_EXITING		2
48  #define PCH_SLEEP_TIME		10
49  
50  #define SSN_LOW			0x02U
51  #define SSN_HIGH		0x03U
52  #define SSN_NO_CONTROL		0x00U
53  #define PCH_MAX_CS		0xFF
54  #define PCI_DEVICE_ID_GE_SPI	0x8816
55  
56  #define SPCR_SPE_BIT		(1 << 0)
57  #define SPCR_MSTR_BIT		(1 << 1)
58  #define SPCR_LSBF_BIT		(1 << 4)
59  #define SPCR_CPHA_BIT		(1 << 5)
60  #define SPCR_CPOL_BIT		(1 << 6)
61  #define SPCR_TFIE_BIT		(1 << 8)
62  #define SPCR_RFIE_BIT		(1 << 9)
63  #define SPCR_FIE_BIT		(1 << 10)
64  #define SPCR_ORIE_BIT		(1 << 11)
65  #define SPCR_MDFIE_BIT		(1 << 12)
66  #define SPCR_FICLR_BIT		(1 << 24)
67  #define SPSR_TFI_BIT		(1 << 0)
68  #define SPSR_RFI_BIT		(1 << 1)
69  #define SPSR_FI_BIT		(1 << 2)
70  #define SPSR_ORF_BIT		(1 << 3)
71  #define SPBRR_SIZE_BIT		(1 << 10)
72  
73  #define PCH_ALL			(SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
74  				SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
75  
76  #define SPCR_RFIC_FIELD		20
77  #define SPCR_TFIC_FIELD		16
78  
79  #define MASK_SPBRR_SPBR_BITS	((1 << 10) - 1)
80  #define MASK_RFIC_SPCR_BITS	(0xf << SPCR_RFIC_FIELD)
81  #define MASK_TFIC_SPCR_BITS	(0xf << SPCR_TFIC_FIELD)
82  
83  #define PCH_CLOCK_HZ		50000000
84  #define PCH_MAX_SPBR		1023
85  
86  /* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
87  #define PCI_DEVICE_ID_ML7213_SPI	0x802c
88  #define PCI_DEVICE_ID_ML7223_SPI	0x800F
89  #define PCI_DEVICE_ID_ML7831_SPI	0x8816
90  
91  /*
92   * Set the number of SPI instance max
93   * Intel EG20T PCH :		1ch
94   * LAPIS Semiconductor ML7213 IOH :	2ch
95   * LAPIS Semiconductor ML7223 IOH :	1ch
96   * LAPIS Semiconductor ML7831 IOH :	1ch
97  */
98  #define PCH_SPI_MAX_DEV			2
99  
100  #define PCH_BUF_SIZE		4096
101  #define PCH_DMA_TRANS_SIZE	12
102  
103  static int use_dma = 1;
104  
105  struct pch_spi_dma_ctrl {
106  	struct pci_dev		*dma_dev;
107  	struct dma_async_tx_descriptor	*desc_tx;
108  	struct dma_async_tx_descriptor	*desc_rx;
109  	struct pch_dma_slave		param_tx;
110  	struct pch_dma_slave		param_rx;
111  	struct dma_chan		*chan_tx;
112  	struct dma_chan		*chan_rx;
113  	struct scatterlist		*sg_tx_p;
114  	struct scatterlist		*sg_rx_p;
115  	struct scatterlist		sg_tx;
116  	struct scatterlist		sg_rx;
117  	int				nent;
118  	void				*tx_buf_virt;
119  	void				*rx_buf_virt;
120  	dma_addr_t			tx_buf_dma;
121  	dma_addr_t			rx_buf_dma;
122  };
123  /**
124   * struct pch_spi_data - Holds the SPI channel specific details
125   * @io_remap_addr:		The remapped PCI base address
126   * @io_base_addr:		Base address
127   * @master:			Pointer to the SPI master structure
128   * @work:			Reference to work queue handler
129   * @wait:			Wait queue for waking up upon receiving an
130   *				interrupt.
131   * @transfer_complete:		Status of SPI Transfer
132   * @bcurrent_msg_processing:	Status flag for message processing
133   * @lock:			Lock for protecting this structure
134   * @queue:			SPI Message queue
135   * @status:			Status of the SPI driver
136   * @bpw_len:			Length of data to be transferred in bits per
137   *				word
138   * @transfer_active:		Flag showing active transfer
139   * @tx_index:			Transmit data count; for bookkeeping during
140   *				transfer
141   * @rx_index:			Receive data count; for bookkeeping during
142   *				transfer
143   * @pkt_tx_buff:		Buffer for data to be transmitted
144   * @pkt_rx_buff:		Buffer for received data
145   * @n_curnt_chip:		The chip number that this SPI driver currently
146   *				operates on
147   * @current_chip:		Reference to the current chip that this SPI
148   *				driver currently operates on
149   * @current_msg:		The current message that this SPI driver is
150   *				handling
151   * @cur_trans:			The current transfer that this SPI driver is
152   *				handling
153   * @board_dat:			Reference to the SPI device data structure
154   * @plat_dev:			platform_device structure
155   * @ch:				SPI channel number
156   * @dma:			Local DMA information
157   * @use_dma:			True if DMA is to be used
158   * @irq_reg_sts:		Status of IRQ registration
159   * @save_total_len:		Save length while data is being transferred
160   */
161  struct pch_spi_data {
162  	void __iomem *io_remap_addr;
163  	unsigned long io_base_addr;
164  	struct spi_master *master;
165  	struct work_struct work;
166  	wait_queue_head_t wait;
167  	u8 transfer_complete;
168  	u8 bcurrent_msg_processing;
169  	spinlock_t lock;
170  	struct list_head queue;
171  	u8 status;
172  	u32 bpw_len;
173  	u8 transfer_active;
174  	u32 tx_index;
175  	u32 rx_index;
176  	u16 *pkt_tx_buff;
177  	u16 *pkt_rx_buff;
178  	u8 n_curnt_chip;
179  	struct spi_device *current_chip;
180  	struct spi_message *current_msg;
181  	struct spi_transfer *cur_trans;
182  	struct pch_spi_board_data *board_dat;
183  	struct platform_device	*plat_dev;
184  	int ch;
185  	struct pch_spi_dma_ctrl dma;
186  	int use_dma;
187  	u8 irq_reg_sts;
188  	int save_total_len;
189  };
190  
191  /**
192   * struct pch_spi_board_data - Holds the SPI device specific details
193   * @pdev:		Pointer to the PCI device
194   * @suspend_sts:	Status of suspend
195   * @num:		The number of SPI device instance
196   */
197  struct pch_spi_board_data {
198  	struct pci_dev *pdev;
199  	u8 suspend_sts;
200  	int num;
201  };
202  
203  struct pch_pd_dev_save {
204  	int num;
205  	struct platform_device *pd_save[PCH_SPI_MAX_DEV];
206  	struct pch_spi_board_data *board_dat;
207  };
208  
209  static const struct pci_device_id pch_spi_pcidev_id[] = {
210  	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI),    1, },
211  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
212  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
213  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
214  	{ }
215  };
216  
217  /**
218   * pch_spi_writereg() - Performs  register writes
219   * @master:	Pointer to struct spi_master.
220   * @idx:	Register offset.
221   * @val:	Value to be written to register.
222   */
pch_spi_writereg(struct spi_master * master,int idx,u32 val)223  static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
224  {
225  	struct pch_spi_data *data = spi_master_get_devdata(master);
226  	iowrite32(val, (data->io_remap_addr + idx));
227  }
228  
229  /**
230   * pch_spi_readreg() - Performs register reads
231   * @master:	Pointer to struct spi_master.
232   * @idx:	Register offset.
233   */
pch_spi_readreg(struct spi_master * master,int idx)234  static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
235  {
236  	struct pch_spi_data *data = spi_master_get_devdata(master);
237  	return ioread32(data->io_remap_addr + idx);
238  }
239  
pch_spi_setclr_reg(struct spi_master * master,int idx,u32 set,u32 clr)240  static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
241  				      u32 set, u32 clr)
242  {
243  	u32 tmp = pch_spi_readreg(master, idx);
244  	tmp = (tmp & ~clr) | set;
245  	pch_spi_writereg(master, idx, tmp);
246  }
247  
pch_spi_set_master_mode(struct spi_master * master)248  static void pch_spi_set_master_mode(struct spi_master *master)
249  {
250  	pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
251  }
252  
253  /**
254   * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
255   * @master:	Pointer to struct spi_master.
256   */
pch_spi_clear_fifo(struct spi_master * master)257  static void pch_spi_clear_fifo(struct spi_master *master)
258  {
259  	pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
260  	pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
261  }
262  
pch_spi_handler_sub(struct pch_spi_data * data,u32 reg_spsr_val,void __iomem * io_remap_addr)263  static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
264  				void __iomem *io_remap_addr)
265  {
266  	u32 n_read, tx_index, rx_index, bpw_len;
267  	u16 *pkt_rx_buffer, *pkt_tx_buff;
268  	int read_cnt;
269  	u32 reg_spcr_val;
270  	void __iomem *spsr;
271  	void __iomem *spdrr;
272  	void __iomem *spdwr;
273  
274  	spsr = io_remap_addr + PCH_SPSR;
275  	iowrite32(reg_spsr_val, spsr);
276  
277  	if (data->transfer_active) {
278  		rx_index = data->rx_index;
279  		tx_index = data->tx_index;
280  		bpw_len = data->bpw_len;
281  		pkt_rx_buffer = data->pkt_rx_buff;
282  		pkt_tx_buff = data->pkt_tx_buff;
283  
284  		spdrr = io_remap_addr + PCH_SPDRR;
285  		spdwr = io_remap_addr + PCH_SPDWR;
286  
287  		n_read = PCH_READABLE(reg_spsr_val);
288  
289  		for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
290  			pkt_rx_buffer[rx_index++] = ioread32(spdrr);
291  			if (tx_index < bpw_len)
292  				iowrite32(pkt_tx_buff[tx_index++], spdwr);
293  		}
294  
295  		/* disable RFI if not needed */
296  		if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
297  			reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
298  			reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
299  
300  			/* reset rx threshold */
301  			reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
302  			reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
303  
304  			iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
305  		}
306  
307  		/* update counts */
308  		data->tx_index = tx_index;
309  		data->rx_index = rx_index;
310  
311  		/* if transfer complete interrupt */
312  		if (reg_spsr_val & SPSR_FI_BIT) {
313  			if ((tx_index == bpw_len) && (rx_index == tx_index)) {
314  				/* disable interrupts */
315  				pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
316  						   PCH_ALL);
317  
318  				/* transfer is completed;
319  				   inform pch_spi_process_messages */
320  				data->transfer_complete = true;
321  				data->transfer_active = false;
322  				wake_up(&data->wait);
323  			} else {
324  				dev_vdbg(&data->master->dev,
325  					"%s : Transfer is not completed",
326  					__func__);
327  			}
328  		}
329  	}
330  }
331  
332  /**
333   * pch_spi_handler() - Interrupt handler
334   * @irq:	The interrupt number.
335   * @dev_id:	Pointer to struct pch_spi_board_data.
336   */
pch_spi_handler(int irq,void * dev_id)337  static irqreturn_t pch_spi_handler(int irq, void *dev_id)
338  {
339  	u32 reg_spsr_val;
340  	void __iomem *spsr;
341  	void __iomem *io_remap_addr;
342  	irqreturn_t ret = IRQ_NONE;
343  	struct pch_spi_data *data = dev_id;
344  	struct pch_spi_board_data *board_dat = data->board_dat;
345  
346  	if (board_dat->suspend_sts) {
347  		dev_dbg(&board_dat->pdev->dev,
348  			"%s returning due to suspend\n", __func__);
349  		return IRQ_NONE;
350  	}
351  
352  	io_remap_addr = data->io_remap_addr;
353  	spsr = io_remap_addr + PCH_SPSR;
354  
355  	reg_spsr_val = ioread32(spsr);
356  
357  	if (reg_spsr_val & SPSR_ORF_BIT) {
358  		dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
359  		if (data->current_msg->complete) {
360  			data->transfer_complete = true;
361  			data->current_msg->status = -EIO;
362  			data->current_msg->complete(data->current_msg->context);
363  			data->bcurrent_msg_processing = false;
364  			data->current_msg = NULL;
365  			data->cur_trans = NULL;
366  		}
367  	}
368  
369  	if (data->use_dma)
370  		return IRQ_NONE;
371  
372  	/* Check if the interrupt is for SPI device */
373  	if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
374  		pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
375  		ret = IRQ_HANDLED;
376  	}
377  
378  	dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
379  		__func__, ret);
380  
381  	return ret;
382  }
383  
384  /**
385   * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
386   * @master:	Pointer to struct spi_master.
387   * @speed_hz:	Baud rate.
388   */
pch_spi_set_baud_rate(struct spi_master * master,u32 speed_hz)389  static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
390  {
391  	u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
392  
393  	/* if baud rate is less than we can support limit it */
394  	if (n_spbr > PCH_MAX_SPBR)
395  		n_spbr = PCH_MAX_SPBR;
396  
397  	pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
398  }
399  
400  /**
401   * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
402   * @master:		Pointer to struct spi_master.
403   * @bits_per_word:	Bits per word for SPI transfer.
404   */
pch_spi_set_bits_per_word(struct spi_master * master,u8 bits_per_word)405  static void pch_spi_set_bits_per_word(struct spi_master *master,
406  				      u8 bits_per_word)
407  {
408  	if (bits_per_word == 8)
409  		pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
410  	else
411  		pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
412  }
413  
414  /**
415   * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer
416   * @spi:	Pointer to struct spi_device.
417   */
pch_spi_setup_transfer(struct spi_device * spi)418  static void pch_spi_setup_transfer(struct spi_device *spi)
419  {
420  	u32 flags = 0;
421  
422  	dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
423  		__func__, pch_spi_readreg(spi->master, PCH_SPBRR),
424  		spi->max_speed_hz);
425  	pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
426  
427  	/* set bits per word */
428  	pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
429  
430  	if (!(spi->mode & SPI_LSB_FIRST))
431  		flags |= SPCR_LSBF_BIT;
432  	if (spi->mode & SPI_CPOL)
433  		flags |= SPCR_CPOL_BIT;
434  	if (spi->mode & SPI_CPHA)
435  		flags |= SPCR_CPHA_BIT;
436  	pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
437  			   (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
438  
439  	/* Clear the FIFO by toggling  FICLR to 1 and back to 0 */
440  	pch_spi_clear_fifo(spi->master);
441  }
442  
443  /**
444   * pch_spi_reset() - Clears SPI registers
445   * @master:	Pointer to struct spi_master.
446   */
pch_spi_reset(struct spi_master * master)447  static void pch_spi_reset(struct spi_master *master)
448  {
449  	/* write 1 to reset SPI */
450  	pch_spi_writereg(master, PCH_SRST, 0x1);
451  
452  	/* clear reset */
453  	pch_spi_writereg(master, PCH_SRST, 0x0);
454  }
455  
pch_spi_transfer(struct spi_device * pspi,struct spi_message * pmsg)456  static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
457  {
458  	struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
459  	int retval;
460  	unsigned long flags;
461  
462  	/* We won't process any messages if we have been asked to terminate */
463  	if (data->status == STATUS_EXITING) {
464  		dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
465  		retval = -ESHUTDOWN;
466  		goto err_out;
467  	}
468  
469  	/* If suspended ,return -EINVAL */
470  	if (data->board_dat->suspend_sts) {
471  		dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
472  		retval = -EINVAL;
473  		goto err_out;
474  	}
475  
476  	/* set status of message */
477  	pmsg->actual_length = 0;
478  	dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
479  
480  	pmsg->status = -EINPROGRESS;
481  	spin_lock_irqsave(&data->lock, flags);
482  	/* add message to queue */
483  	list_add_tail(&pmsg->queue, &data->queue);
484  	spin_unlock_irqrestore(&data->lock, flags);
485  
486  	dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
487  
488  	schedule_work(&data->work);
489  	dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
490  
491  	retval = 0;
492  
493  err_out:
494  	dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
495  	return retval;
496  }
497  
pch_spi_select_chip(struct pch_spi_data * data,struct spi_device * pspi)498  static inline void pch_spi_select_chip(struct pch_spi_data *data,
499  				       struct spi_device *pspi)
500  {
501  	if (data->current_chip != NULL) {
502  		if (spi_get_chipselect(pspi, 0) != data->n_curnt_chip) {
503  			dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
504  			data->current_chip = NULL;
505  		}
506  	}
507  
508  	data->current_chip = pspi;
509  
510  	data->n_curnt_chip = spi_get_chipselect(data->current_chip, 0);
511  
512  	dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
513  	pch_spi_setup_transfer(pspi);
514  }
515  
pch_spi_set_tx(struct pch_spi_data * data,int * bpw)516  static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
517  {
518  	int size;
519  	u32 n_writes;
520  	int j;
521  	struct spi_message *pmsg, *tmp;
522  	const u8 *tx_buf;
523  	const u16 *tx_sbuf;
524  
525  	/* set baud rate if needed */
526  	if (data->cur_trans->speed_hz) {
527  		dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
528  		pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
529  	}
530  
531  	/* set bits per word if needed */
532  	if (data->cur_trans->bits_per_word &&
533  	    (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
534  		dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
535  		pch_spi_set_bits_per_word(data->master,
536  					  data->cur_trans->bits_per_word);
537  		*bpw = data->cur_trans->bits_per_word;
538  	} else {
539  		*bpw = data->current_msg->spi->bits_per_word;
540  	}
541  
542  	/* reset Tx/Rx index */
543  	data->tx_index = 0;
544  	data->rx_index = 0;
545  
546  	data->bpw_len = data->cur_trans->len / (*bpw / 8);
547  
548  	/* find alloc size */
549  	size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
550  
551  	/* allocate memory for pkt_tx_buff & pkt_rx_buffer */
552  	data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
553  	if (data->pkt_tx_buff != NULL) {
554  		data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
555  		if (!data->pkt_rx_buff) {
556  			kfree(data->pkt_tx_buff);
557  			data->pkt_tx_buff = NULL;
558  		}
559  	}
560  
561  	if (!data->pkt_rx_buff) {
562  		/* flush queue and set status of all transfers to -ENOMEM */
563  		list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
564  			pmsg->status = -ENOMEM;
565  
566  			if (pmsg->complete)
567  				pmsg->complete(pmsg->context);
568  
569  			/* delete from queue */
570  			list_del_init(&pmsg->queue);
571  		}
572  		return;
573  	}
574  
575  	/* copy Tx Data */
576  	if (data->cur_trans->tx_buf != NULL) {
577  		if (*bpw == 8) {
578  			tx_buf = data->cur_trans->tx_buf;
579  			for (j = 0; j < data->bpw_len; j++)
580  				data->pkt_tx_buff[j] = *tx_buf++;
581  		} else {
582  			tx_sbuf = data->cur_trans->tx_buf;
583  			for (j = 0; j < data->bpw_len; j++)
584  				data->pkt_tx_buff[j] = *tx_sbuf++;
585  		}
586  	}
587  
588  	/* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */
589  	n_writes = data->bpw_len;
590  	if (n_writes > PCH_MAX_FIFO_DEPTH)
591  		n_writes = PCH_MAX_FIFO_DEPTH;
592  
593  	dev_dbg(&data->master->dev,
594  		"\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
595  		__func__);
596  	pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
597  
598  	for (j = 0; j < n_writes; j++)
599  		pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
600  
601  	/* update tx_index */
602  	data->tx_index = j;
603  
604  	/* reset transfer complete flag */
605  	data->transfer_complete = false;
606  	data->transfer_active = true;
607  }
608  
pch_spi_nomore_transfer(struct pch_spi_data * data)609  static void pch_spi_nomore_transfer(struct pch_spi_data *data)
610  {
611  	struct spi_message *pmsg, *tmp;
612  	dev_dbg(&data->master->dev, "%s called\n", __func__);
613  	/* Invoke complete callback
614  	 * [To the spi core..indicating end of transfer] */
615  	data->current_msg->status = 0;
616  
617  	if (data->current_msg->complete) {
618  		dev_dbg(&data->master->dev,
619  			"%s:Invoking callback of SPI core\n", __func__);
620  		data->current_msg->complete(data->current_msg->context);
621  	}
622  
623  	/* update status in global variable */
624  	data->bcurrent_msg_processing = false;
625  
626  	dev_dbg(&data->master->dev,
627  		"%s:data->bcurrent_msg_processing = false\n", __func__);
628  
629  	data->current_msg = NULL;
630  	data->cur_trans = NULL;
631  
632  	/* check if we have items in list and not suspending
633  	 * return 1 if list empty */
634  	if ((list_empty(&data->queue) == 0) &&
635  	    (!data->board_dat->suspend_sts) &&
636  	    (data->status != STATUS_EXITING)) {
637  		/* We have some more work to do (either there is more tranint
638  		 * bpw;sfer requests in the current message or there are
639  		 *more messages)
640  		 */
641  		dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
642  		schedule_work(&data->work);
643  	} else if (data->board_dat->suspend_sts ||
644  		   data->status == STATUS_EXITING) {
645  		dev_dbg(&data->master->dev,
646  			"%s suspend/remove initiated, flushing queue\n",
647  			__func__);
648  		list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
649  			pmsg->status = -EIO;
650  
651  			if (pmsg->complete)
652  				pmsg->complete(pmsg->context);
653  
654  			/* delete from queue */
655  			list_del_init(&pmsg->queue);
656  		}
657  	}
658  }
659  
pch_spi_set_ir(struct pch_spi_data * data)660  static void pch_spi_set_ir(struct pch_spi_data *data)
661  {
662  	/* enable interrupts, set threshold, enable SPI */
663  	if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
664  		/* set receive threshold to PCH_RX_THOLD */
665  		pch_spi_setclr_reg(data->master, PCH_SPCR,
666  				   PCH_RX_THOLD << SPCR_RFIC_FIELD |
667  				   SPCR_FIE_BIT | SPCR_RFIE_BIT |
668  				   SPCR_ORIE_BIT | SPCR_SPE_BIT,
669  				   MASK_RFIC_SPCR_BITS | PCH_ALL);
670  	else
671  		/* set receive threshold to maximum */
672  		pch_spi_setclr_reg(data->master, PCH_SPCR,
673  				   PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
674  				   SPCR_FIE_BIT | SPCR_ORIE_BIT |
675  				   SPCR_SPE_BIT,
676  				   MASK_RFIC_SPCR_BITS | PCH_ALL);
677  
678  	/* Wait until the transfer completes; go to sleep after
679  				 initiating the transfer. */
680  	dev_dbg(&data->master->dev,
681  		"%s:waiting for transfer to get over\n", __func__);
682  
683  	wait_event_interruptible(data->wait, data->transfer_complete);
684  
685  	/* clear all interrupts */
686  	pch_spi_writereg(data->master, PCH_SPSR,
687  			 pch_spi_readreg(data->master, PCH_SPSR));
688  	/* Disable interrupts and SPI transfer */
689  	pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
690  	/* clear FIFO */
691  	pch_spi_clear_fifo(data->master);
692  }
693  
pch_spi_copy_rx_data(struct pch_spi_data * data,int bpw)694  static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
695  {
696  	int j;
697  	u8 *rx_buf;
698  	u16 *rx_sbuf;
699  
700  	/* copy Rx Data */
701  	if (!data->cur_trans->rx_buf)
702  		return;
703  
704  	if (bpw == 8) {
705  		rx_buf = data->cur_trans->rx_buf;
706  		for (j = 0; j < data->bpw_len; j++)
707  			*rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
708  	} else {
709  		rx_sbuf = data->cur_trans->rx_buf;
710  		for (j = 0; j < data->bpw_len; j++)
711  			*rx_sbuf++ = data->pkt_rx_buff[j];
712  	}
713  }
714  
pch_spi_copy_rx_data_for_dma(struct pch_spi_data * data,int bpw)715  static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
716  {
717  	int j;
718  	u8 *rx_buf;
719  	u16 *rx_sbuf;
720  	const u8 *rx_dma_buf;
721  	const u16 *rx_dma_sbuf;
722  
723  	/* copy Rx Data */
724  	if (!data->cur_trans->rx_buf)
725  		return;
726  
727  	if (bpw == 8) {
728  		rx_buf = data->cur_trans->rx_buf;
729  		rx_dma_buf = data->dma.rx_buf_virt;
730  		for (j = 0; j < data->bpw_len; j++)
731  			*rx_buf++ = *rx_dma_buf++ & 0xFF;
732  		data->cur_trans->rx_buf = rx_buf;
733  	} else {
734  		rx_sbuf = data->cur_trans->rx_buf;
735  		rx_dma_sbuf = data->dma.rx_buf_virt;
736  		for (j = 0; j < data->bpw_len; j++)
737  			*rx_sbuf++ = *rx_dma_sbuf++;
738  		data->cur_trans->rx_buf = rx_sbuf;
739  	}
740  }
741  
pch_spi_start_transfer(struct pch_spi_data * data)742  static int pch_spi_start_transfer(struct pch_spi_data *data)
743  {
744  	struct pch_spi_dma_ctrl *dma;
745  	unsigned long flags;
746  	int rtn;
747  
748  	dma = &data->dma;
749  
750  	spin_lock_irqsave(&data->lock, flags);
751  
752  	/* disable interrupts, SPI set enable */
753  	pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
754  
755  	spin_unlock_irqrestore(&data->lock, flags);
756  
757  	/* Wait until the transfer completes; go to sleep after
758  				 initiating the transfer. */
759  	dev_dbg(&data->master->dev,
760  		"%s:waiting for transfer to get over\n", __func__);
761  	rtn = wait_event_interruptible_timeout(data->wait,
762  					       data->transfer_complete,
763  					       msecs_to_jiffies(2 * HZ));
764  	if (!rtn)
765  		dev_err(&data->master->dev,
766  			"%s wait-event timeout\n", __func__);
767  
768  	dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
769  			    DMA_FROM_DEVICE);
770  
771  	dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
772  			    DMA_FROM_DEVICE);
773  	memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
774  
775  	async_tx_ack(dma->desc_rx);
776  	async_tx_ack(dma->desc_tx);
777  	kfree(dma->sg_tx_p);
778  	kfree(dma->sg_rx_p);
779  
780  	spin_lock_irqsave(&data->lock, flags);
781  
782  	/* clear fifo threshold, disable interrupts, disable SPI transfer */
783  	pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
784  			   MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
785  			   SPCR_SPE_BIT);
786  	/* clear all interrupts */
787  	pch_spi_writereg(data->master, PCH_SPSR,
788  			 pch_spi_readreg(data->master, PCH_SPSR));
789  	/* clear FIFO */
790  	pch_spi_clear_fifo(data->master);
791  
792  	spin_unlock_irqrestore(&data->lock, flags);
793  
794  	return rtn;
795  }
796  
pch_dma_rx_complete(void * arg)797  static void pch_dma_rx_complete(void *arg)
798  {
799  	struct pch_spi_data *data = arg;
800  
801  	/* transfer is completed;inform pch_spi_process_messages_dma */
802  	data->transfer_complete = true;
803  	wake_up_interruptible(&data->wait);
804  }
805  
pch_spi_filter(struct dma_chan * chan,void * slave)806  static bool pch_spi_filter(struct dma_chan *chan, void *slave)
807  {
808  	struct pch_dma_slave *param = slave;
809  
810  	if ((chan->chan_id == param->chan_id) &&
811  	    (param->dma_dev == chan->device->dev)) {
812  		chan->private = param;
813  		return true;
814  	} else {
815  		return false;
816  	}
817  }
818  
pch_spi_request_dma(struct pch_spi_data * data,int bpw)819  static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
820  {
821  	dma_cap_mask_t mask;
822  	struct dma_chan *chan;
823  	struct pci_dev *dma_dev;
824  	struct pch_dma_slave *param;
825  	struct pch_spi_dma_ctrl *dma;
826  	unsigned int width;
827  
828  	if (bpw == 8)
829  		width = PCH_DMA_WIDTH_1_BYTE;
830  	else
831  		width = PCH_DMA_WIDTH_2_BYTES;
832  
833  	dma = &data->dma;
834  	dma_cap_zero(mask);
835  	dma_cap_set(DMA_SLAVE, mask);
836  
837  	/* Get DMA's dev information */
838  	dma_dev = pci_get_slot(data->board_dat->pdev->bus,
839  			PCI_DEVFN(PCI_SLOT(data->board_dat->pdev->devfn), 0));
840  
841  	/* Set Tx DMA */
842  	param = &dma->param_tx;
843  	param->dma_dev = &dma_dev->dev;
844  	param->chan_id = data->ch * 2; /* Tx = 0, 2 */
845  	param->tx_reg = data->io_base_addr + PCH_SPDWR;
846  	param->width = width;
847  	chan = dma_request_channel(mask, pch_spi_filter, param);
848  	if (!chan) {
849  		dev_err(&data->master->dev,
850  			"ERROR: dma_request_channel FAILS(Tx)\n");
851  		goto out;
852  	}
853  	dma->chan_tx = chan;
854  
855  	/* Set Rx DMA */
856  	param = &dma->param_rx;
857  	param->dma_dev = &dma_dev->dev;
858  	param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
859  	param->rx_reg = data->io_base_addr + PCH_SPDRR;
860  	param->width = width;
861  	chan = dma_request_channel(mask, pch_spi_filter, param);
862  	if (!chan) {
863  		dev_err(&data->master->dev,
864  			"ERROR: dma_request_channel FAILS(Rx)\n");
865  		dma_release_channel(dma->chan_tx);
866  		dma->chan_tx = NULL;
867  		goto out;
868  	}
869  	dma->chan_rx = chan;
870  
871  	dma->dma_dev = dma_dev;
872  	return;
873  out:
874  	pci_dev_put(dma_dev);
875  	data->use_dma = 0;
876  }
877  
pch_spi_release_dma(struct pch_spi_data * data)878  static void pch_spi_release_dma(struct pch_spi_data *data)
879  {
880  	struct pch_spi_dma_ctrl *dma;
881  
882  	dma = &data->dma;
883  	if (dma->chan_tx) {
884  		dma_release_channel(dma->chan_tx);
885  		dma->chan_tx = NULL;
886  	}
887  	if (dma->chan_rx) {
888  		dma_release_channel(dma->chan_rx);
889  		dma->chan_rx = NULL;
890  	}
891  
892  	pci_dev_put(dma->dma_dev);
893  }
894  
pch_spi_handle_dma(struct pch_spi_data * data,int * bpw)895  static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
896  {
897  	const u8 *tx_buf;
898  	const u16 *tx_sbuf;
899  	u8 *tx_dma_buf;
900  	u16 *tx_dma_sbuf;
901  	struct scatterlist *sg;
902  	struct dma_async_tx_descriptor *desc_tx;
903  	struct dma_async_tx_descriptor *desc_rx;
904  	int num;
905  	int i;
906  	int size;
907  	int rem;
908  	int head;
909  	unsigned long flags;
910  	struct pch_spi_dma_ctrl *dma;
911  
912  	dma = &data->dma;
913  
914  	/* set baud rate if needed */
915  	if (data->cur_trans->speed_hz) {
916  		dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
917  		spin_lock_irqsave(&data->lock, flags);
918  		pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
919  		spin_unlock_irqrestore(&data->lock, flags);
920  	}
921  
922  	/* set bits per word if needed */
923  	if (data->cur_trans->bits_per_word &&
924  	    (data->current_msg->spi->bits_per_word !=
925  	     data->cur_trans->bits_per_word)) {
926  		dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
927  		spin_lock_irqsave(&data->lock, flags);
928  		pch_spi_set_bits_per_word(data->master,
929  					  data->cur_trans->bits_per_word);
930  		spin_unlock_irqrestore(&data->lock, flags);
931  		*bpw = data->cur_trans->bits_per_word;
932  	} else {
933  		*bpw = data->current_msg->spi->bits_per_word;
934  	}
935  	data->bpw_len = data->cur_trans->len / (*bpw / 8);
936  
937  	if (data->bpw_len > PCH_BUF_SIZE) {
938  		data->bpw_len = PCH_BUF_SIZE;
939  		data->cur_trans->len -= PCH_BUF_SIZE;
940  	}
941  
942  	/* copy Tx Data */
943  	if (data->cur_trans->tx_buf != NULL) {
944  		if (*bpw == 8) {
945  			tx_buf = data->cur_trans->tx_buf;
946  			tx_dma_buf = dma->tx_buf_virt;
947  			for (i = 0; i < data->bpw_len; i++)
948  				*tx_dma_buf++ = *tx_buf++;
949  		} else {
950  			tx_sbuf = data->cur_trans->tx_buf;
951  			tx_dma_sbuf = dma->tx_buf_virt;
952  			for (i = 0; i < data->bpw_len; i++)
953  				*tx_dma_sbuf++ = *tx_sbuf++;
954  		}
955  	}
956  
957  	/* Calculate Rx parameter for DMA transmitting */
958  	if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
959  		if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
960  			num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
961  			rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
962  		} else {
963  			num = data->bpw_len / PCH_DMA_TRANS_SIZE;
964  			rem = PCH_DMA_TRANS_SIZE;
965  		}
966  		size = PCH_DMA_TRANS_SIZE;
967  	} else {
968  		num = 1;
969  		size = data->bpw_len;
970  		rem = data->bpw_len;
971  	}
972  	dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
973  		__func__, num, size, rem);
974  	spin_lock_irqsave(&data->lock, flags);
975  
976  	/* set receive fifo threshold and transmit fifo threshold */
977  	pch_spi_setclr_reg(data->master, PCH_SPCR,
978  			   ((size - 1) << SPCR_RFIC_FIELD) |
979  			   (PCH_TX_THOLD << SPCR_TFIC_FIELD),
980  			   MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
981  
982  	spin_unlock_irqrestore(&data->lock, flags);
983  
984  	/* RX */
985  	dma->sg_rx_p = kmalloc_array(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
986  	if (!dma->sg_rx_p)
987  		return;
988  
989  	sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
990  	/* offset, length setting */
991  	sg = dma->sg_rx_p;
992  	for (i = 0; i < num; i++, sg++) {
993  		if (i == (num - 2)) {
994  			sg->offset = size * i;
995  			sg->offset = sg->offset * (*bpw / 8);
996  			sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
997  				    sg->offset);
998  			sg_dma_len(sg) = rem;
999  		} else if (i == (num - 1)) {
1000  			sg->offset = size * (i - 1) + rem;
1001  			sg->offset = sg->offset * (*bpw / 8);
1002  			sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1003  				    sg->offset);
1004  			sg_dma_len(sg) = size;
1005  		} else {
1006  			sg->offset = size * i;
1007  			sg->offset = sg->offset * (*bpw / 8);
1008  			sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1009  				    sg->offset);
1010  			sg_dma_len(sg) = size;
1011  		}
1012  		sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
1013  	}
1014  	sg = dma->sg_rx_p;
1015  	desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
1016  					num, DMA_DEV_TO_MEM,
1017  					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1018  	if (!desc_rx) {
1019  		dev_err(&data->master->dev,
1020  			"%s:dmaengine_prep_slave_sg Failed\n", __func__);
1021  		return;
1022  	}
1023  	dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
1024  	desc_rx->callback = pch_dma_rx_complete;
1025  	desc_rx->callback_param = data;
1026  	dma->nent = num;
1027  	dma->desc_rx = desc_rx;
1028  
1029  	/* Calculate Tx parameter for DMA transmitting */
1030  	if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
1031  		head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
1032  		if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
1033  			num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
1034  			rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
1035  		} else {
1036  			num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1037  			rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
1038  			      PCH_DMA_TRANS_SIZE - head;
1039  		}
1040  		size = PCH_DMA_TRANS_SIZE;
1041  	} else {
1042  		num = 1;
1043  		size = data->bpw_len;
1044  		rem = data->bpw_len;
1045  		head = 0;
1046  	}
1047  
1048  	dma->sg_tx_p = kmalloc_array(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
1049  	if (!dma->sg_tx_p)
1050  		return;
1051  
1052  	sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
1053  	/* offset, length setting */
1054  	sg = dma->sg_tx_p;
1055  	for (i = 0; i < num; i++, sg++) {
1056  		if (i == 0) {
1057  			sg->offset = 0;
1058  			sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
1059  				    sg->offset);
1060  			sg_dma_len(sg) = size + head;
1061  		} else if (i == (num - 1)) {
1062  			sg->offset = head + size * i;
1063  			sg->offset = sg->offset * (*bpw / 8);
1064  			sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
1065  				    sg->offset);
1066  			sg_dma_len(sg) = rem;
1067  		} else {
1068  			sg->offset = head + size * i;
1069  			sg->offset = sg->offset * (*bpw / 8);
1070  			sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
1071  				    sg->offset);
1072  			sg_dma_len(sg) = size;
1073  		}
1074  		sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
1075  	}
1076  	sg = dma->sg_tx_p;
1077  	desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
1078  					sg, num, DMA_MEM_TO_DEV,
1079  					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1080  	if (!desc_tx) {
1081  		dev_err(&data->master->dev,
1082  			"%s:dmaengine_prep_slave_sg Failed\n", __func__);
1083  		return;
1084  	}
1085  	dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
1086  	desc_tx->callback = NULL;
1087  	desc_tx->callback_param = data;
1088  	dma->nent = num;
1089  	dma->desc_tx = desc_tx;
1090  
1091  	dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
1092  
1093  	spin_lock_irqsave(&data->lock, flags);
1094  	pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
1095  	desc_rx->tx_submit(desc_rx);
1096  	desc_tx->tx_submit(desc_tx);
1097  	spin_unlock_irqrestore(&data->lock, flags);
1098  
1099  	/* reset transfer complete flag */
1100  	data->transfer_complete = false;
1101  }
1102  
pch_spi_process_messages(struct work_struct * pwork)1103  static void pch_spi_process_messages(struct work_struct *pwork)
1104  {
1105  	struct spi_message *pmsg, *tmp;
1106  	struct pch_spi_data *data;
1107  	int bpw;
1108  
1109  	data = container_of(pwork, struct pch_spi_data, work);
1110  	dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
1111  
1112  	spin_lock(&data->lock);
1113  	/* check if suspend has been initiated;if yes flush queue */
1114  	if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
1115  		dev_dbg(&data->master->dev,
1116  			"%s suspend/remove initiated, flushing queue\n", __func__);
1117  		list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
1118  			pmsg->status = -EIO;
1119  
1120  			if (pmsg->complete) {
1121  				spin_unlock(&data->lock);
1122  				pmsg->complete(pmsg->context);
1123  				spin_lock(&data->lock);
1124  			}
1125  
1126  			/* delete from queue */
1127  			list_del_init(&pmsg->queue);
1128  		}
1129  
1130  		spin_unlock(&data->lock);
1131  		return;
1132  	}
1133  
1134  	data->bcurrent_msg_processing = true;
1135  	dev_dbg(&data->master->dev,
1136  		"%s Set data->bcurrent_msg_processing= true\n", __func__);
1137  
1138  	/* Get the message from the queue and delete it from there. */
1139  	data->current_msg = list_entry(data->queue.next, struct spi_message,
1140  					queue);
1141  
1142  	list_del_init(&data->current_msg->queue);
1143  
1144  	data->current_msg->status = 0;
1145  
1146  	pch_spi_select_chip(data, data->current_msg->spi);
1147  
1148  	spin_unlock(&data->lock);
1149  
1150  	if (data->use_dma)
1151  		pch_spi_request_dma(data,
1152  				    data->current_msg->spi->bits_per_word);
1153  	pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
1154  	do {
1155  		int cnt;
1156  		/* If we are already processing a message get the next
1157  		transfer structure from the message otherwise retrieve
1158  		the 1st transfer request from the message. */
1159  		spin_lock(&data->lock);
1160  		if (data->cur_trans == NULL) {
1161  			data->cur_trans =
1162  				list_entry(data->current_msg->transfers.next,
1163  					   struct spi_transfer, transfer_list);
1164  			dev_dbg(&data->master->dev,
1165  				"%s :Getting 1st transfer message\n",
1166  				__func__);
1167  		} else {
1168  			data->cur_trans =
1169  				list_entry(data->cur_trans->transfer_list.next,
1170  					   struct spi_transfer, transfer_list);
1171  			dev_dbg(&data->master->dev,
1172  				"%s :Getting next transfer message\n",
1173  				__func__);
1174  		}
1175  		spin_unlock(&data->lock);
1176  
1177  		if (!data->cur_trans->len)
1178  			goto out;
1179  		cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
1180  		data->save_total_len = data->cur_trans->len;
1181  		if (data->use_dma) {
1182  			int i;
1183  			char *save_rx_buf = data->cur_trans->rx_buf;
1184  
1185  			for (i = 0; i < cnt; i++) {
1186  				pch_spi_handle_dma(data, &bpw);
1187  				if (!pch_spi_start_transfer(data)) {
1188  					data->transfer_complete = true;
1189  					data->current_msg->status = -EIO;
1190  					data->current_msg->complete
1191  						   (data->current_msg->context);
1192  					data->bcurrent_msg_processing = false;
1193  					data->current_msg = NULL;
1194  					data->cur_trans = NULL;
1195  					goto out;
1196  				}
1197  				pch_spi_copy_rx_data_for_dma(data, bpw);
1198  			}
1199  			data->cur_trans->rx_buf = save_rx_buf;
1200  		} else {
1201  			pch_spi_set_tx(data, &bpw);
1202  			pch_spi_set_ir(data);
1203  			pch_spi_copy_rx_data(data, bpw);
1204  			kfree(data->pkt_rx_buff);
1205  			data->pkt_rx_buff = NULL;
1206  			kfree(data->pkt_tx_buff);
1207  			data->pkt_tx_buff = NULL;
1208  		}
1209  		/* increment message count */
1210  		data->cur_trans->len = data->save_total_len;
1211  		data->current_msg->actual_length += data->cur_trans->len;
1212  
1213  		dev_dbg(&data->master->dev,
1214  			"%s:data->current_msg->actual_length=%d\n",
1215  			__func__, data->current_msg->actual_length);
1216  
1217  		spi_transfer_delay_exec(data->cur_trans);
1218  
1219  		spin_lock(&data->lock);
1220  
1221  		/* No more transfer in this message. */
1222  		if ((data->cur_trans->transfer_list.next) ==
1223  		    &(data->current_msg->transfers)) {
1224  			pch_spi_nomore_transfer(data);
1225  		}
1226  
1227  		spin_unlock(&data->lock);
1228  
1229  	} while (data->cur_trans != NULL);
1230  
1231  out:
1232  	pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
1233  	if (data->use_dma)
1234  		pch_spi_release_dma(data);
1235  }
1236  
pch_spi_free_resources(struct pch_spi_board_data * board_dat,struct pch_spi_data * data)1237  static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
1238  				   struct pch_spi_data *data)
1239  {
1240  	dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1241  
1242  	flush_work(&data->work);
1243  }
1244  
pch_spi_get_resources(struct pch_spi_board_data * board_dat,struct pch_spi_data * data)1245  static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
1246  				 struct pch_spi_data *data)
1247  {
1248  	dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1249  
1250  	/* reset PCH SPI h/w */
1251  	pch_spi_reset(data->master);
1252  	dev_dbg(&board_dat->pdev->dev,
1253  		"%s pch_spi_reset invoked successfully\n", __func__);
1254  
1255  	dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
1256  
1257  	return 0;
1258  }
1259  
pch_free_dma_buf(struct pch_spi_board_data * board_dat,struct pch_spi_data * data)1260  static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
1261  			     struct pch_spi_data *data)
1262  {
1263  	struct pch_spi_dma_ctrl *dma;
1264  
1265  	dma = &data->dma;
1266  	if (dma->tx_buf_dma)
1267  		dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1268  				  dma->tx_buf_virt, dma->tx_buf_dma);
1269  	if (dma->rx_buf_dma)
1270  		dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1271  				  dma->rx_buf_virt, dma->rx_buf_dma);
1272  }
1273  
pch_alloc_dma_buf(struct pch_spi_board_data * board_dat,struct pch_spi_data * data)1274  static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
1275  			      struct pch_spi_data *data)
1276  {
1277  	struct pch_spi_dma_ctrl *dma;
1278  	int ret;
1279  
1280  	dma = &data->dma;
1281  	ret = 0;
1282  	/* Get Consistent memory for Tx DMA */
1283  	dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1284  				PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
1285  	if (!dma->tx_buf_virt)
1286  		ret = -ENOMEM;
1287  
1288  	/* Get Consistent memory for Rx DMA */
1289  	dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1290  				PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
1291  	if (!dma->rx_buf_virt)
1292  		ret = -ENOMEM;
1293  
1294  	return ret;
1295  }
1296  
pch_spi_pd_probe(struct platform_device * plat_dev)1297  static int pch_spi_pd_probe(struct platform_device *plat_dev)
1298  {
1299  	int ret;
1300  	struct spi_master *master;
1301  	struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1302  	struct pch_spi_data *data;
1303  
1304  	dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
1305  
1306  	master = spi_alloc_master(&board_dat->pdev->dev,
1307  				  sizeof(struct pch_spi_data));
1308  	if (!master) {
1309  		dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
1310  			plat_dev->id);
1311  		return -ENOMEM;
1312  	}
1313  
1314  	data = spi_master_get_devdata(master);
1315  	data->master = master;
1316  
1317  	platform_set_drvdata(plat_dev, data);
1318  
1319  	/* baseaddress + address offset) */
1320  	data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
1321  					 PCH_ADDRESS_SIZE * plat_dev->id;
1322  	data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
1323  	if (!data->io_remap_addr) {
1324  		dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
1325  		ret = -ENOMEM;
1326  		goto err_pci_iomap;
1327  	}
1328  	data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
1329  
1330  	dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
1331  		plat_dev->id, data->io_remap_addr);
1332  
1333  	/* initialize members of SPI master */
1334  	master->num_chipselect = PCH_MAX_CS;
1335  	master->transfer = pch_spi_transfer;
1336  	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1337  	master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
1338  	master->max_speed_hz = PCH_MAX_BAUDRATE;
1339  	master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
1340  
1341  	data->board_dat = board_dat;
1342  	data->plat_dev = plat_dev;
1343  	data->n_curnt_chip = 255;
1344  	data->status = STATUS_RUNNING;
1345  	data->ch = plat_dev->id;
1346  	data->use_dma = use_dma;
1347  
1348  	INIT_LIST_HEAD(&data->queue);
1349  	spin_lock_init(&data->lock);
1350  	INIT_WORK(&data->work, pch_spi_process_messages);
1351  	init_waitqueue_head(&data->wait);
1352  
1353  	ret = pch_spi_get_resources(board_dat, data);
1354  	if (ret) {
1355  		dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
1356  		goto err_spi_get_resources;
1357  	}
1358  
1359  	ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
1360  			  IRQF_SHARED, KBUILD_MODNAME, data);
1361  	if (ret) {
1362  		dev_err(&plat_dev->dev,
1363  			"%s request_irq failed\n", __func__);
1364  		goto err_request_irq;
1365  	}
1366  	data->irq_reg_sts = true;
1367  
1368  	pch_spi_set_master_mode(master);
1369  
1370  	if (use_dma) {
1371  		dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1372  		ret = pch_alloc_dma_buf(board_dat, data);
1373  		if (ret)
1374  			goto err_spi_register_master;
1375  	}
1376  
1377  	ret = spi_register_master(master);
1378  	if (ret != 0) {
1379  		dev_err(&plat_dev->dev,
1380  			"%s spi_register_master FAILED\n", __func__);
1381  		goto err_spi_register_master;
1382  	}
1383  
1384  	return 0;
1385  
1386  err_spi_register_master:
1387  	pch_free_dma_buf(board_dat, data);
1388  	free_irq(board_dat->pdev->irq, data);
1389  err_request_irq:
1390  	pch_spi_free_resources(board_dat, data);
1391  err_spi_get_resources:
1392  	pci_iounmap(board_dat->pdev, data->io_remap_addr);
1393  err_pci_iomap:
1394  	spi_master_put(master);
1395  
1396  	return ret;
1397  }
1398  
pch_spi_pd_remove(struct platform_device * plat_dev)1399  static void pch_spi_pd_remove(struct platform_device *plat_dev)
1400  {
1401  	struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1402  	struct pch_spi_data *data = platform_get_drvdata(plat_dev);
1403  	int count;
1404  	unsigned long flags;
1405  
1406  	dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
1407  		__func__, plat_dev->id, board_dat->pdev->irq);
1408  
1409  	if (use_dma)
1410  		pch_free_dma_buf(board_dat, data);
1411  
1412  	/* check for any pending messages; no action is taken if the queue
1413  	 * is still full; but at least we tried.  Unload anyway */
1414  	count = 500;
1415  	spin_lock_irqsave(&data->lock, flags);
1416  	data->status = STATUS_EXITING;
1417  	while ((list_empty(&data->queue) == 0) && --count) {
1418  		dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
1419  			__func__);
1420  		spin_unlock_irqrestore(&data->lock, flags);
1421  		msleep(PCH_SLEEP_TIME);
1422  		spin_lock_irqsave(&data->lock, flags);
1423  	}
1424  	spin_unlock_irqrestore(&data->lock, flags);
1425  
1426  	pch_spi_free_resources(board_dat, data);
1427  	/* disable interrupts & free IRQ */
1428  	if (data->irq_reg_sts) {
1429  		/* disable interrupts */
1430  		pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1431  		data->irq_reg_sts = false;
1432  		free_irq(board_dat->pdev->irq, data);
1433  	}
1434  
1435  	pci_iounmap(board_dat->pdev, data->io_remap_addr);
1436  	spi_unregister_master(data->master);
1437  }
1438  #ifdef CONFIG_PM
pch_spi_pd_suspend(struct platform_device * pd_dev,pm_message_t state)1439  static int pch_spi_pd_suspend(struct platform_device *pd_dev,
1440  			      pm_message_t state)
1441  {
1442  	u8 count;
1443  	struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1444  	struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1445  
1446  	dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
1447  
1448  	if (!board_dat) {
1449  		dev_err(&pd_dev->dev,
1450  			"%s pci_get_drvdata returned NULL\n", __func__);
1451  		return -EFAULT;
1452  	}
1453  
1454  	/* check if the current message is processed:
1455  	   Only after thats done the transfer will be suspended */
1456  	count = 255;
1457  	while ((--count) > 0) {
1458  		if (!(data->bcurrent_msg_processing))
1459  			break;
1460  		msleep(PCH_SLEEP_TIME);
1461  	}
1462  
1463  	/* Free IRQ */
1464  	if (data->irq_reg_sts) {
1465  		/* disable all interrupts */
1466  		pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1467  		pch_spi_reset(data->master);
1468  		free_irq(board_dat->pdev->irq, data);
1469  
1470  		data->irq_reg_sts = false;
1471  		dev_dbg(&pd_dev->dev,
1472  			"%s free_irq invoked successfully.\n", __func__);
1473  	}
1474  
1475  	return 0;
1476  }
1477  
pch_spi_pd_resume(struct platform_device * pd_dev)1478  static int pch_spi_pd_resume(struct platform_device *pd_dev)
1479  {
1480  	struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1481  	struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1482  	int retval;
1483  
1484  	if (!board_dat) {
1485  		dev_err(&pd_dev->dev,
1486  			"%s pci_get_drvdata returned NULL\n", __func__);
1487  		return -EFAULT;
1488  	}
1489  
1490  	if (!data->irq_reg_sts) {
1491  		/* register IRQ */
1492  		retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
1493  				     IRQF_SHARED, KBUILD_MODNAME, data);
1494  		if (retval < 0) {
1495  			dev_err(&pd_dev->dev,
1496  				"%s request_irq failed\n", __func__);
1497  			return retval;
1498  		}
1499  
1500  		/* reset PCH SPI h/w */
1501  		pch_spi_reset(data->master);
1502  		pch_spi_set_master_mode(data->master);
1503  		data->irq_reg_sts = true;
1504  	}
1505  	return 0;
1506  }
1507  #else
1508  #define pch_spi_pd_suspend NULL
1509  #define pch_spi_pd_resume NULL
1510  #endif
1511  
1512  static struct platform_driver pch_spi_pd_driver = {
1513  	.driver = {
1514  		.name = "pch-spi",
1515  	},
1516  	.probe = pch_spi_pd_probe,
1517  	.remove_new = pch_spi_pd_remove,
1518  	.suspend = pch_spi_pd_suspend,
1519  	.resume = pch_spi_pd_resume
1520  };
1521  
pch_spi_probe(struct pci_dev * pdev,const struct pci_device_id * id)1522  static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1523  {
1524  	struct pch_spi_board_data *board_dat;
1525  	struct platform_device *pd_dev = NULL;
1526  	int retval;
1527  	int i;
1528  	struct pch_pd_dev_save *pd_dev_save;
1529  
1530  	pd_dev_save = kzalloc(sizeof(*pd_dev_save), GFP_KERNEL);
1531  	if (!pd_dev_save)
1532  		return -ENOMEM;
1533  
1534  	board_dat = kzalloc(sizeof(*board_dat), GFP_KERNEL);
1535  	if (!board_dat) {
1536  		retval = -ENOMEM;
1537  		goto err_no_mem;
1538  	}
1539  
1540  	retval = pci_request_regions(pdev, KBUILD_MODNAME);
1541  	if (retval) {
1542  		dev_err(&pdev->dev, "%s request_region failed\n", __func__);
1543  		goto pci_request_regions;
1544  	}
1545  
1546  	board_dat->pdev = pdev;
1547  	board_dat->num = id->driver_data;
1548  	pd_dev_save->num = id->driver_data;
1549  	pd_dev_save->board_dat = board_dat;
1550  
1551  	retval = pci_enable_device(pdev);
1552  	if (retval) {
1553  		dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
1554  		goto pci_enable_device;
1555  	}
1556  
1557  	for (i = 0; i < board_dat->num; i++) {
1558  		pd_dev = platform_device_alloc("pch-spi", i);
1559  		if (!pd_dev) {
1560  			dev_err(&pdev->dev, "platform_device_alloc failed\n");
1561  			retval = -ENOMEM;
1562  			goto err_platform_device;
1563  		}
1564  		pd_dev_save->pd_save[i] = pd_dev;
1565  		pd_dev->dev.parent = &pdev->dev;
1566  
1567  		retval = platform_device_add_data(pd_dev, board_dat,
1568  						  sizeof(*board_dat));
1569  		if (retval) {
1570  			dev_err(&pdev->dev,
1571  				"platform_device_add_data failed\n");
1572  			platform_device_put(pd_dev);
1573  			goto err_platform_device;
1574  		}
1575  
1576  		retval = platform_device_add(pd_dev);
1577  		if (retval) {
1578  			dev_err(&pdev->dev, "platform_device_add failed\n");
1579  			platform_device_put(pd_dev);
1580  			goto err_platform_device;
1581  		}
1582  	}
1583  
1584  	pci_set_drvdata(pdev, pd_dev_save);
1585  
1586  	return 0;
1587  
1588  err_platform_device:
1589  	while (--i >= 0)
1590  		platform_device_unregister(pd_dev_save->pd_save[i]);
1591  	pci_disable_device(pdev);
1592  pci_enable_device:
1593  	pci_release_regions(pdev);
1594  pci_request_regions:
1595  	kfree(board_dat);
1596  err_no_mem:
1597  	kfree(pd_dev_save);
1598  
1599  	return retval;
1600  }
1601  
pch_spi_remove(struct pci_dev * pdev)1602  static void pch_spi_remove(struct pci_dev *pdev)
1603  {
1604  	int i;
1605  	struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1606  
1607  	dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
1608  
1609  	for (i = 0; i < pd_dev_save->num; i++)
1610  		platform_device_unregister(pd_dev_save->pd_save[i]);
1611  
1612  	pci_disable_device(pdev);
1613  	pci_release_regions(pdev);
1614  	kfree(pd_dev_save->board_dat);
1615  	kfree(pd_dev_save);
1616  }
1617  
pch_spi_suspend(struct device * dev)1618  static int __maybe_unused pch_spi_suspend(struct device *dev)
1619  {
1620  	struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
1621  
1622  	dev_dbg(dev, "%s ENTRY\n", __func__);
1623  
1624  	pd_dev_save->board_dat->suspend_sts = true;
1625  
1626  	return 0;
1627  }
1628  
pch_spi_resume(struct device * dev)1629  static int __maybe_unused pch_spi_resume(struct device *dev)
1630  {
1631  	struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
1632  
1633  	dev_dbg(dev, "%s ENTRY\n", __func__);
1634  
1635  	/* set suspend status to false */
1636  	pd_dev_save->board_dat->suspend_sts = false;
1637  
1638  	return 0;
1639  }
1640  
1641  static SIMPLE_DEV_PM_OPS(pch_spi_pm_ops, pch_spi_suspend, pch_spi_resume);
1642  
1643  static struct pci_driver pch_spi_pcidev_driver = {
1644  	.name = "pch_spi",
1645  	.id_table = pch_spi_pcidev_id,
1646  	.probe = pch_spi_probe,
1647  	.remove = pch_spi_remove,
1648  	.driver.pm = &pch_spi_pm_ops,
1649  };
1650  
pch_spi_init(void)1651  static int __init pch_spi_init(void)
1652  {
1653  	int ret;
1654  	ret = platform_driver_register(&pch_spi_pd_driver);
1655  	if (ret)
1656  		return ret;
1657  
1658  	ret = pci_register_driver(&pch_spi_pcidev_driver);
1659  	if (ret) {
1660  		platform_driver_unregister(&pch_spi_pd_driver);
1661  		return ret;
1662  	}
1663  
1664  	return 0;
1665  }
1666  module_init(pch_spi_init);
1667  
pch_spi_exit(void)1668  static void __exit pch_spi_exit(void)
1669  {
1670  	pci_unregister_driver(&pch_spi_pcidev_driver);
1671  	platform_driver_unregister(&pch_spi_pd_driver);
1672  }
1673  module_exit(pch_spi_exit);
1674  
1675  module_param(use_dma, int, 0644);
1676  MODULE_PARM_DESC(use_dma,
1677  		 "to use DMA for data transfers pass 1 else 0; default 1");
1678  
1679  MODULE_LICENSE("GPL");
1680  MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
1681  MODULE_DEVICE_TABLE(pci, pch_spi_pcidev_id);
1682  
1683