xref: /openbmc/linux/drivers/ata/sata_sx4.c (revision 86aa961bb4619a68077ebeba21c52e9ba0eab43d)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *  sata_sx4.c - Promise SATA
4   *
5   *  Maintained by:  Tejun Heo <tj@kernel.org>
6   *  		    Please ALWAYS copy linux-ide@vger.kernel.org
7   *		    on emails.
8   *
9   *  Copyright 2003-2004 Red Hat, Inc.
10   *
11   *  libata documentation is available via 'make {ps|pdf}docs',
12   *  as Documentation/driver-api/libata.rst
13   *
14   *  Hardware documentation available under NDA.
15   */
16  
17  /*
18  	Theory of operation
19  	-------------------
20  
21  	The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
22  	engine, DIMM memory, and four ATA engines (one per SATA port).
23  	Data is copied to/from DIMM memory by the HDMA engine, before
24  	handing off to one (or more) of the ATA engines.  The ATA
25  	engines operate solely on DIMM memory.
26  
27  	The SX4 behaves like a PATA chip, with no SATA controls or
28  	knowledge whatsoever, leading to the presumption that
29  	PATA<->SATA bridges exist on SX4 boards, external to the
30  	PDC20621 chip itself.
31  
32  	The chip is quite capable, supporting an XOR engine and linked
33  	hardware commands (permits a string to transactions to be
34  	submitted and waited-on as a single unit), and an optional
35  	microprocessor.
36  
37  	The limiting factor is largely software.  This Linux driver was
38  	written to multiplex the single HDMA engine to copy disk
39  	transactions into a fixed DIMM memory space, from where an ATA
40  	engine takes over.  As a result, each WRITE looks like this:
41  
42  		submit HDMA packet to hardware
43  		hardware copies data from system memory to DIMM
44  		hardware raises interrupt
45  
46  		submit ATA packet to hardware
47  		hardware executes ATA WRITE command, w/ data in DIMM
48  		hardware raises interrupt
49  
50  	and each READ looks like this:
51  
52  		submit ATA packet to hardware
53  		hardware executes ATA READ command, w/ data in DIMM
54  		hardware raises interrupt
55  
56  		submit HDMA packet to hardware
57  		hardware copies data from DIMM to system memory
58  		hardware raises interrupt
59  
60  	This is a very slow, lock-step way of doing things that can
61  	certainly be improved by motivated kernel hackers.
62  
63   */
64  
65  #include <linux/kernel.h>
66  #include <linux/module.h>
67  #include <linux/pci.h>
68  #include <linux/slab.h>
69  #include <linux/blkdev.h>
70  #include <linux/delay.h>
71  #include <linux/interrupt.h>
72  #include <linux/device.h>
73  #include <scsi/scsi_host.h>
74  #include <scsi/scsi_cmnd.h>
75  #include <linux/libata.h>
76  #include "sata_promise.h"
77  
78  #define DRV_NAME	"sata_sx4"
79  #define DRV_VERSION	"0.12"
80  
81  static int dimm_test;
82  module_param(dimm_test, int, 0644);
83  MODULE_PARM_DESC(dimm_test, "Enable DIMM test during startup (1 = enabled)");
84  
85  enum {
86  	PDC_MMIO_BAR		= 3,
87  	PDC_DIMM_BAR		= 4,
88  
89  	PDC_PRD_TBL		= 0x44,	/* Direct command DMA table addr */
90  
91  	PDC_PKT_SUBMIT		= 0x40, /* Command packet pointer addr */
92  	PDC_HDMA_PKT_SUBMIT	= 0x100, /* Host DMA packet pointer addr */
93  	PDC_INT_SEQMASK		= 0x40,	/* Mask of asserted SEQ INTs */
94  	PDC_HDMA_CTLSTAT	= 0x12C, /* Host DMA control / status */
95  
96  	PDC_CTLSTAT		= 0x60,	/* IDEn control / status */
97  
98  	PDC_20621_SEQCTL	= 0x400,
99  	PDC_20621_SEQMASK	= 0x480,
100  	PDC_20621_GENERAL_CTL	= 0x484,
101  	PDC_20621_PAGE_SIZE	= (32 * 1024),
102  
103  	/* chosen, not constant, values; we design our own DIMM mem map */
104  	PDC_20621_DIMM_WINDOW	= 0x0C,	/* page# for 32K DIMM window */
105  	PDC_20621_DIMM_BASE	= 0x00200000,
106  	PDC_20621_DIMM_DATA	= (64 * 1024),
107  	PDC_DIMM_DATA_STEP	= (256 * 1024),
108  	PDC_DIMM_WINDOW_STEP	= (8 * 1024),
109  	PDC_DIMM_HOST_PRD	= (6 * 1024),
110  	PDC_DIMM_HOST_PKT	= (128 * 0),
111  	PDC_DIMM_HPKT_PRD	= (128 * 1),
112  	PDC_DIMM_ATA_PKT	= (128 * 2),
113  	PDC_DIMM_APKT_PRD	= (128 * 3),
114  	PDC_DIMM_HEADER_SZ	= PDC_DIMM_APKT_PRD + 128,
115  	PDC_PAGE_WINDOW		= 0x40,
116  	PDC_PAGE_DATA		= PDC_PAGE_WINDOW +
117  				  (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
118  	PDC_PAGE_SET		= PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
119  
120  	PDC_CHIP0_OFS		= 0xC0000, /* offset of chip #0 */
121  
122  	PDC_20621_ERR_MASK	= (1<<19) | (1<<20) | (1<<21) | (1<<22) |
123  				  (1<<23),
124  
125  	board_20621		= 0,	/* FastTrak S150 SX4 */
126  
127  	PDC_MASK_INT		= (1 << 10), /* HDMA/ATA mask int */
128  	PDC_RESET		= (1 << 11), /* HDMA/ATA reset */
129  	PDC_DMA_ENABLE		= (1 << 7),  /* DMA start/stop */
130  
131  	PDC_MAX_HDMA		= 32,
132  	PDC_HDMA_Q_MASK		= (PDC_MAX_HDMA - 1),
133  
134  	PDC_DIMM0_SPD_DEV_ADDRESS	= 0x50,
135  	PDC_DIMM1_SPD_DEV_ADDRESS	= 0x51,
136  	PDC_I2C_CONTROL			= 0x48,
137  	PDC_I2C_ADDR_DATA		= 0x4C,
138  	PDC_DIMM0_CONTROL		= 0x80,
139  	PDC_DIMM1_CONTROL		= 0x84,
140  	PDC_SDRAM_CONTROL		= 0x88,
141  	PDC_I2C_WRITE			= 0,		/* master -> slave */
142  	PDC_I2C_READ			= (1 << 6),	/* master <- slave */
143  	PDC_I2C_START			= (1 << 7),	/* start I2C proto */
144  	PDC_I2C_MASK_INT		= (1 << 5),	/* mask I2C interrupt */
145  	PDC_I2C_COMPLETE		= (1 << 16),	/* I2C normal compl. */
146  	PDC_I2C_NO_ACK			= (1 << 20),	/* slave no-ack addr */
147  	PDC_DIMM_SPD_SUBADDRESS_START	= 0x00,
148  	PDC_DIMM_SPD_SUBADDRESS_END	= 0x7F,
149  	PDC_DIMM_SPD_ROW_NUM		= 3,
150  	PDC_DIMM_SPD_COLUMN_NUM		= 4,
151  	PDC_DIMM_SPD_MODULE_ROW		= 5,
152  	PDC_DIMM_SPD_TYPE		= 11,
153  	PDC_DIMM_SPD_FRESH_RATE		= 12,
154  	PDC_DIMM_SPD_BANK_NUM		= 17,
155  	PDC_DIMM_SPD_CAS_LATENCY	= 18,
156  	PDC_DIMM_SPD_ATTRIBUTE		= 21,
157  	PDC_DIMM_SPD_ROW_PRE_CHARGE	= 27,
158  	PDC_DIMM_SPD_ROW_ACTIVE_DELAY	= 28,
159  	PDC_DIMM_SPD_RAS_CAS_DELAY	= 29,
160  	PDC_DIMM_SPD_ACTIVE_PRECHARGE	= 30,
161  	PDC_DIMM_SPD_SYSTEM_FREQ	= 126,
162  	PDC_CTL_STATUS			= 0x08,
163  	PDC_DIMM_WINDOW_CTLR		= 0x0C,
164  	PDC_TIME_CONTROL		= 0x3C,
165  	PDC_TIME_PERIOD			= 0x40,
166  	PDC_TIME_COUNTER		= 0x44,
167  	PDC_GENERAL_CTLR		= 0x484,
168  	PCI_PLL_INIT			= 0x8A531824,
169  	PCI_X_TCOUNT			= 0xEE1E5CFF,
170  
171  	/* PDC_TIME_CONTROL bits */
172  	PDC_TIMER_BUZZER		= (1 << 10),
173  	PDC_TIMER_MODE_PERIODIC		= 0,		/* bits 9:8 == 00 */
174  	PDC_TIMER_MODE_ONCE		= (1 << 8),	/* bits 9:8 == 01 */
175  	PDC_TIMER_ENABLE		= (1 << 7),
176  	PDC_TIMER_MASK_INT		= (1 << 5),
177  	PDC_TIMER_SEQ_MASK		= 0x1f,		/* SEQ ID for timer */
178  	PDC_TIMER_DEFAULT		= PDC_TIMER_MODE_ONCE |
179  					  PDC_TIMER_ENABLE |
180  					  PDC_TIMER_MASK_INT,
181  };
182  
183  #define ECC_ERASE_BUF_SZ (128 * 1024)
184  
185  struct pdc_port_priv {
186  	u8			dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
187  	u8			*pkt;
188  	dma_addr_t		pkt_dma;
189  };
190  
191  struct pdc_host_priv {
192  	unsigned int		doing_hdma;
193  	unsigned int		hdma_prod;
194  	unsigned int		hdma_cons;
195  	struct {
196  		struct ata_queued_cmd *qc;
197  		unsigned int	seq;
198  		unsigned long	pkt_ofs;
199  	} hdma[32];
200  };
201  
202  
203  static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
204  static void pdc_error_handler(struct ata_port *ap);
205  static void pdc_freeze(struct ata_port *ap);
206  static void pdc_thaw(struct ata_port *ap);
207  static int pdc_port_start(struct ata_port *ap);
208  static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
209  static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
210  static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
211  static unsigned int pdc20621_dimm_init(struct ata_host *host);
212  static int pdc20621_detect_dimm(struct ata_host *host);
213  static unsigned int pdc20621_i2c_read(struct ata_host *host,
214  				      u32 device, u32 subaddr, u32 *pdata);
215  static int pdc20621_prog_dimm0(struct ata_host *host);
216  static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
217  static void pdc20621_get_from_dimm(struct ata_host *host,
218  				   void *psource, u32 offset, u32 size);
219  static void pdc20621_put_to_dimm(struct ata_host *host,
220  				 void *psource, u32 offset, u32 size);
221  static void pdc20621_irq_clear(struct ata_port *ap);
222  static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
223  static int pdc_softreset(struct ata_link *link, unsigned int *class,
224  			 unsigned long deadline);
225  static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
226  static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
227  
228  
229  static const struct scsi_host_template pdc_sata_sht = {
230  	ATA_BASE_SHT(DRV_NAME),
231  	.sg_tablesize		= LIBATA_MAX_PRD,
232  	.dma_boundary		= ATA_DMA_BOUNDARY,
233  };
234  
235  static struct ata_port_operations pdc_20621_ops = {
236  	.inherits		= &ata_sff_port_ops,
237  
238  	.check_atapi_dma	= pdc_check_atapi_dma,
239  	.qc_prep		= pdc20621_qc_prep,
240  	.qc_issue		= pdc20621_qc_issue,
241  
242  	.freeze			= pdc_freeze,
243  	.thaw			= pdc_thaw,
244  	.softreset		= pdc_softreset,
245  	.error_handler		= pdc_error_handler,
246  	.lost_interrupt		= ATA_OP_NULL,
247  	.post_internal_cmd	= pdc_post_internal_cmd,
248  
249  	.port_start		= pdc_port_start,
250  
251  	.sff_tf_load		= pdc_tf_load_mmio,
252  	.sff_exec_command	= pdc_exec_command_mmio,
253  	.sff_irq_clear		= pdc20621_irq_clear,
254  };
255  
256  static const struct ata_port_info pdc_port_info[] = {
257  	/* board_20621 */
258  	{
259  		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
260  				  ATA_FLAG_PIO_POLLING,
261  		.pio_mask	= ATA_PIO4,
262  		.mwdma_mask	= ATA_MWDMA2,
263  		.udma_mask	= ATA_UDMA6,
264  		.port_ops	= &pdc_20621_ops,
265  	},
266  
267  };
268  
269  static const struct pci_device_id pdc_sata_pci_tbl[] = {
270  	{ PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
271  
272  	{ }	/* terminate list */
273  };
274  
275  static struct pci_driver pdc_sata_pci_driver = {
276  	.name			= DRV_NAME,
277  	.id_table		= pdc_sata_pci_tbl,
278  	.probe			= pdc_sata_init_one,
279  	.remove			= ata_pci_remove_one,
280  };
281  
282  
pdc_port_start(struct ata_port * ap)283  static int pdc_port_start(struct ata_port *ap)
284  {
285  	struct device *dev = ap->host->dev;
286  	struct pdc_port_priv *pp;
287  
288  	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
289  	if (!pp)
290  		return -ENOMEM;
291  
292  	pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
293  	if (!pp->pkt)
294  		return -ENOMEM;
295  
296  	ap->private_data = pp;
297  
298  	return 0;
299  }
300  
pdc20621_ata_sg(u8 * buf,unsigned int portno,unsigned int total_len)301  static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
302  				   unsigned int total_len)
303  {
304  	u32 addr;
305  	unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
306  	__le32 *buf32 = (__le32 *) buf;
307  
308  	/* output ATA packet S/G table */
309  	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
310  	       (PDC_DIMM_DATA_STEP * portno);
311  
312  	buf32[dw] = cpu_to_le32(addr);
313  	buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
314  }
315  
pdc20621_host_sg(u8 * buf,unsigned int portno,unsigned int total_len)316  static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
317  				    unsigned int total_len)
318  {
319  	u32 addr;
320  	unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
321  	__le32 *buf32 = (__le32 *) buf;
322  
323  	/* output Host DMA packet S/G table */
324  	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
325  	       (PDC_DIMM_DATA_STEP * portno);
326  
327  	buf32[dw] = cpu_to_le32(addr);
328  	buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
329  }
330  
pdc20621_ata_pkt(struct ata_taskfile * tf,unsigned int devno,u8 * buf,unsigned int portno)331  static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
332  					    unsigned int devno, u8 *buf,
333  					    unsigned int portno)
334  {
335  	unsigned int i, dw;
336  	__le32 *buf32 = (__le32 *) buf;
337  	u8 dev_reg;
338  
339  	unsigned int dimm_sg = PDC_20621_DIMM_BASE +
340  			       (PDC_DIMM_WINDOW_STEP * portno) +
341  			       PDC_DIMM_APKT_PRD;
342  
343  	i = PDC_DIMM_ATA_PKT;
344  
345  	/*
346  	 * Set up ATA packet
347  	 */
348  	if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
349  		buf[i++] = PDC_PKT_READ;
350  	else if (tf->protocol == ATA_PROT_NODATA)
351  		buf[i++] = PDC_PKT_NODATA;
352  	else
353  		buf[i++] = 0;
354  	buf[i++] = 0;			/* reserved */
355  	buf[i++] = portno + 1;		/* seq. id */
356  	buf[i++] = 0xff;		/* delay seq. id */
357  
358  	/* dimm dma S/G, and next-pkt */
359  	dw = i >> 2;
360  	if (tf->protocol == ATA_PROT_NODATA)
361  		buf32[dw] = 0;
362  	else
363  		buf32[dw] = cpu_to_le32(dimm_sg);
364  	buf32[dw + 1] = 0;
365  	i += 8;
366  
367  	if (devno == 0)
368  		dev_reg = ATA_DEVICE_OBS;
369  	else
370  		dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
371  
372  	/* select device */
373  	buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
374  	buf[i++] = dev_reg;
375  
376  	/* device control register */
377  	buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
378  	buf[i++] = tf->ctl;
379  
380  	return i;
381  }
382  
pdc20621_host_pkt(struct ata_taskfile * tf,u8 * buf,unsigned int portno)383  static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
384  				     unsigned int portno)
385  {
386  	unsigned int dw;
387  	u32 tmp;
388  	__le32 *buf32 = (__le32 *) buf;
389  
390  	unsigned int host_sg = PDC_20621_DIMM_BASE +
391  			       (PDC_DIMM_WINDOW_STEP * portno) +
392  			       PDC_DIMM_HOST_PRD;
393  	unsigned int dimm_sg = PDC_20621_DIMM_BASE +
394  			       (PDC_DIMM_WINDOW_STEP * portno) +
395  			       PDC_DIMM_HPKT_PRD;
396  
397  	dw = PDC_DIMM_HOST_PKT >> 2;
398  
399  	/*
400  	 * Set up Host DMA packet
401  	 */
402  	if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
403  		tmp = PDC_PKT_READ;
404  	else
405  		tmp = 0;
406  	tmp |= ((portno + 1 + 4) << 16);	/* seq. id */
407  	tmp |= (0xff << 24);			/* delay seq. id */
408  	buf32[dw + 0] = cpu_to_le32(tmp);
409  	buf32[dw + 1] = cpu_to_le32(host_sg);
410  	buf32[dw + 2] = cpu_to_le32(dimm_sg);
411  	buf32[dw + 3] = 0;
412  }
413  
pdc20621_dma_prep(struct ata_queued_cmd * qc)414  static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
415  {
416  	struct scatterlist *sg;
417  	struct ata_port *ap = qc->ap;
418  	struct pdc_port_priv *pp = ap->private_data;
419  	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
420  	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
421  	unsigned int portno = ap->port_no;
422  	unsigned int i, si, idx, total_len = 0, sgt_len;
423  	__le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
424  
425  	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
426  
427  	/* hard-code chip #0 */
428  	mmio += PDC_CHIP0_OFS;
429  
430  	/*
431  	 * Build S/G table
432  	 */
433  	idx = 0;
434  	for_each_sg(qc->sg, sg, qc->n_elem, si) {
435  		buf[idx++] = cpu_to_le32(sg_dma_address(sg));
436  		buf[idx++] = cpu_to_le32(sg_dma_len(sg));
437  		total_len += sg_dma_len(sg);
438  	}
439  	buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
440  	sgt_len = idx * 4;
441  
442  	/*
443  	 * Build ATA, host DMA packets
444  	 */
445  	pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
446  	pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
447  
448  	pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
449  	i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
450  
451  	if (qc->tf.flags & ATA_TFLAG_LBA48)
452  		i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
453  	else
454  		i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
455  
456  	pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
457  
458  	/* copy three S/G tables and two packets to DIMM MMIO window */
459  	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
460  		    &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
461  	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
462  		    PDC_DIMM_HOST_PRD,
463  		    &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
464  
465  	/* force host FIFO dump */
466  	writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
467  
468  	readl(dimm_mmio);	/* MMIO PCI posting flush */
469  
470  	ata_port_dbg(ap, "ata pkt buf ofs %u, prd size %u, mmio copied\n",
471  		     i, sgt_len);
472  }
473  
pdc20621_nodata_prep(struct ata_queued_cmd * qc)474  static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
475  {
476  	struct ata_port *ap = qc->ap;
477  	struct pdc_port_priv *pp = ap->private_data;
478  	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
479  	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
480  	unsigned int portno = ap->port_no;
481  	unsigned int i;
482  
483  	/* hard-code chip #0 */
484  	mmio += PDC_CHIP0_OFS;
485  
486  	i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
487  
488  	if (qc->tf.flags & ATA_TFLAG_LBA48)
489  		i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
490  	else
491  		i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
492  
493  	pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
494  
495  	/* copy three S/G tables and two packets to DIMM MMIO window */
496  	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
497  		    &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
498  
499  	/* force host FIFO dump */
500  	writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
501  
502  	readl(dimm_mmio);	/* MMIO PCI posting flush */
503  
504  	ata_port_dbg(ap, "ata pkt buf ofs %u, mmio copied\n", i);
505  }
506  
pdc20621_qc_prep(struct ata_queued_cmd * qc)507  static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
508  {
509  	switch (qc->tf.protocol) {
510  	case ATA_PROT_DMA:
511  		pdc20621_dma_prep(qc);
512  		break;
513  	case ATA_PROT_NODATA:
514  		pdc20621_nodata_prep(qc);
515  		break;
516  	default:
517  		break;
518  	}
519  
520  	return AC_ERR_OK;
521  }
522  
__pdc20621_push_hdma(struct ata_queued_cmd * qc,unsigned int seq,u32 pkt_ofs)523  static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
524  				 unsigned int seq,
525  				 u32 pkt_ofs)
526  {
527  	struct ata_port *ap = qc->ap;
528  	struct ata_host *host = ap->host;
529  	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
530  
531  	/* hard-code chip #0 */
532  	mmio += PDC_CHIP0_OFS;
533  
534  	writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
535  	readl(mmio + PDC_20621_SEQCTL + (seq * 4));	/* flush */
536  
537  	writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
538  	readl(mmio + PDC_HDMA_PKT_SUBMIT);	/* flush */
539  }
540  
pdc20621_push_hdma(struct ata_queued_cmd * qc,unsigned int seq,u32 pkt_ofs)541  static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
542  				unsigned int seq,
543  				u32 pkt_ofs)
544  {
545  	struct ata_port *ap = qc->ap;
546  	struct pdc_host_priv *pp = ap->host->private_data;
547  	unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
548  
549  	if (!pp->doing_hdma) {
550  		__pdc20621_push_hdma(qc, seq, pkt_ofs);
551  		pp->doing_hdma = 1;
552  		return;
553  	}
554  
555  	pp->hdma[idx].qc = qc;
556  	pp->hdma[idx].seq = seq;
557  	pp->hdma[idx].pkt_ofs = pkt_ofs;
558  	pp->hdma_prod++;
559  }
560  
pdc20621_pop_hdma(struct ata_queued_cmd * qc)561  static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
562  {
563  	struct ata_port *ap = qc->ap;
564  	struct pdc_host_priv *pp = ap->host->private_data;
565  	unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
566  
567  	/* if nothing on queue, we're done */
568  	if (pp->hdma_prod == pp->hdma_cons) {
569  		pp->doing_hdma = 0;
570  		return;
571  	}
572  
573  	__pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
574  			     pp->hdma[idx].pkt_ofs);
575  	pp->hdma_cons++;
576  }
577  
pdc20621_dump_hdma(struct ata_queued_cmd * qc)578  static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
579  {
580  	struct ata_port *ap = qc->ap;
581  	unsigned int port_no = ap->port_no;
582  	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
583  
584  	dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
585  	dimm_mmio += PDC_DIMM_HOST_PKT;
586  
587  	ata_port_dbg(ap, "HDMA 0x%08X 0x%08X 0x%08X 0x%08X\n",
588  		     readl(dimm_mmio), readl(dimm_mmio + 4),
589  		     readl(dimm_mmio + 8), readl(dimm_mmio + 12));
590  }
591  
pdc20621_packet_start(struct ata_queued_cmd * qc)592  static void pdc20621_packet_start(struct ata_queued_cmd *qc)
593  {
594  	struct ata_port *ap = qc->ap;
595  	struct ata_host *host = ap->host;
596  	unsigned int port_no = ap->port_no;
597  	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
598  	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
599  	u8 seq = (u8) (port_no + 1);
600  	unsigned int port_ofs;
601  
602  	/* hard-code chip #0 */
603  	mmio += PDC_CHIP0_OFS;
604  
605  	wmb();			/* flush PRD, pkt writes */
606  
607  	port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
608  
609  	/* if writing, we (1) DMA to DIMM, then (2) do ATA command */
610  	if (rw && qc->tf.protocol == ATA_PROT_DMA) {
611  		seq += 4;
612  
613  		pdc20621_dump_hdma(qc);
614  		pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
615  		ata_port_dbg(ap, "queued ofs 0x%x (%u), seq %u\n",
616  			port_ofs + PDC_DIMM_HOST_PKT,
617  			port_ofs + PDC_DIMM_HOST_PKT,
618  			seq);
619  	} else {
620  		writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
621  		readl(mmio + PDC_20621_SEQCTL + (seq * 4));	/* flush */
622  
623  		writel(port_ofs + PDC_DIMM_ATA_PKT,
624  		       ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
625  		readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
626  		ata_port_dbg(ap, "submitted ofs 0x%x (%u), seq %u\n",
627  			port_ofs + PDC_DIMM_ATA_PKT,
628  			port_ofs + PDC_DIMM_ATA_PKT,
629  			seq);
630  	}
631  }
632  
pdc20621_qc_issue(struct ata_queued_cmd * qc)633  static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
634  {
635  	switch (qc->tf.protocol) {
636  	case ATA_PROT_NODATA:
637  		if (qc->tf.flags & ATA_TFLAG_POLLING)
638  			break;
639  		fallthrough;
640  	case ATA_PROT_DMA:
641  		pdc20621_packet_start(qc);
642  		return 0;
643  
644  	case ATAPI_PROT_DMA:
645  		BUG();
646  		break;
647  
648  	default:
649  		break;
650  	}
651  
652  	return ata_sff_qc_issue(qc);
653  }
654  
pdc20621_host_intr(struct ata_port * ap,struct ata_queued_cmd * qc,unsigned int doing_hdma,void __iomem * mmio)655  static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
656  					  struct ata_queued_cmd *qc,
657  					  unsigned int doing_hdma,
658  					  void __iomem *mmio)
659  {
660  	unsigned int port_no = ap->port_no;
661  	unsigned int port_ofs =
662  		PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
663  	u8 status;
664  	unsigned int handled = 0;
665  
666  	if ((qc->tf.protocol == ATA_PROT_DMA) &&	/* read */
667  	    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
668  
669  		/* step two - DMA from DIMM to host */
670  		if (doing_hdma) {
671  			ata_port_dbg(ap, "read hdma, 0x%x 0x%x\n",
672  				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
673  			/* get drive status; clear intr; complete txn */
674  			qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
675  			ata_qc_complete(qc);
676  			pdc20621_pop_hdma(qc);
677  		}
678  
679  		/* step one - exec ATA command */
680  		else {
681  			u8 seq = (u8) (port_no + 1 + 4);
682  			ata_port_dbg(ap, "read ata, 0x%x 0x%x\n",
683  				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
684  
685  			/* submit hdma pkt */
686  			pdc20621_dump_hdma(qc);
687  			pdc20621_push_hdma(qc, seq,
688  					   port_ofs + PDC_DIMM_HOST_PKT);
689  		}
690  		handled = 1;
691  
692  	} else if (qc->tf.protocol == ATA_PROT_DMA) {	/* write */
693  
694  		/* step one - DMA from host to DIMM */
695  		if (doing_hdma) {
696  			u8 seq = (u8) (port_no + 1);
697  			ata_port_dbg(ap, "write hdma, 0x%x 0x%x\n",
698  				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
699  
700  			/* submit ata pkt */
701  			writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
702  			readl(mmio + PDC_20621_SEQCTL + (seq * 4));
703  			writel(port_ofs + PDC_DIMM_ATA_PKT,
704  			       ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
705  			readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
706  		}
707  
708  		/* step two - execute ATA command */
709  		else {
710  			ata_port_dbg(ap, "write ata, 0x%x 0x%x\n",
711  				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
712  			/* get drive status; clear intr; complete txn */
713  			qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
714  			ata_qc_complete(qc);
715  			pdc20621_pop_hdma(qc);
716  		}
717  		handled = 1;
718  
719  	/* command completion, but no data xfer */
720  	} else if (qc->tf.protocol == ATA_PROT_NODATA) {
721  
722  		status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
723  		ata_port_dbg(ap, "BUS_NODATA (drv_stat 0x%X)\n", status);
724  		qc->err_mask |= ac_err_mask(status);
725  		ata_qc_complete(qc);
726  		handled = 1;
727  
728  	} else {
729  		ap->stats.idle_irq++;
730  	}
731  
732  	return handled;
733  }
734  
pdc20621_irq_clear(struct ata_port * ap)735  static void pdc20621_irq_clear(struct ata_port *ap)
736  {
737  	ioread8(ap->ioaddr.status_addr);
738  }
739  
pdc20621_interrupt(int irq,void * dev_instance)740  static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
741  {
742  	struct ata_host *host = dev_instance;
743  	struct ata_port *ap;
744  	u32 mask = 0;
745  	unsigned int i, tmp, port_no;
746  	unsigned int handled = 0;
747  	void __iomem *mmio_base;
748  
749  	if (!host || !host->iomap[PDC_MMIO_BAR])
750  		return IRQ_NONE;
751  
752  	mmio_base = host->iomap[PDC_MMIO_BAR];
753  
754  	/* reading should also clear interrupts */
755  	mmio_base += PDC_CHIP0_OFS;
756  	mask = readl(mmio_base + PDC_20621_SEQMASK);
757  
758  	if (mask == 0xffffffff)
759  		return IRQ_NONE;
760  
761  	mask &= 0xffff;		/* only 16 tags possible */
762  	if (!mask)
763  		return IRQ_NONE;
764  
765  	spin_lock(&host->lock);
766  
767  	for (i = 1; i < 9; i++) {
768  		port_no = i - 1;
769  		if (port_no > 3)
770  			port_no -= 4;
771  		if (port_no >= host->n_ports)
772  			ap = NULL;
773  		else
774  			ap = host->ports[port_no];
775  		tmp = mask & (1 << i);
776  		if (ap)
777  			ata_port_dbg(ap, "seq %u, tmp %x\n", i, tmp);
778  		if (tmp && ap) {
779  			struct ata_queued_cmd *qc;
780  
781  			qc = ata_qc_from_tag(ap, ap->link.active_tag);
782  			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
783  				handled += pdc20621_host_intr(ap, qc, (i > 4),
784  							      mmio_base);
785  		}
786  	}
787  
788  	spin_unlock(&host->lock);
789  
790  	return IRQ_RETVAL(handled);
791  }
792  
pdc_freeze(struct ata_port * ap)793  static void pdc_freeze(struct ata_port *ap)
794  {
795  	void __iomem *mmio = ap->ioaddr.cmd_addr;
796  	u32 tmp;
797  
798  	/* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
799  
800  	tmp = readl(mmio + PDC_CTLSTAT);
801  	tmp |= PDC_MASK_INT;
802  	tmp &= ~PDC_DMA_ENABLE;
803  	writel(tmp, mmio + PDC_CTLSTAT);
804  	readl(mmio + PDC_CTLSTAT); /* flush */
805  }
806  
pdc_thaw(struct ata_port * ap)807  static void pdc_thaw(struct ata_port *ap)
808  {
809  	void __iomem *mmio = ap->ioaddr.cmd_addr;
810  	u32 tmp;
811  
812  	/* FIXME: start HDMA engine, if zero ATA engines running */
813  
814  	/* clear IRQ */
815  	ioread8(ap->ioaddr.status_addr);
816  
817  	/* turn IRQ back on */
818  	tmp = readl(mmio + PDC_CTLSTAT);
819  	tmp &= ~PDC_MASK_INT;
820  	writel(tmp, mmio + PDC_CTLSTAT);
821  	readl(mmio + PDC_CTLSTAT); /* flush */
822  }
823  
pdc_reset_port(struct ata_port * ap)824  static void pdc_reset_port(struct ata_port *ap)
825  {
826  	void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
827  	unsigned int i;
828  	u32 tmp;
829  
830  	/* FIXME: handle HDMA copy engine */
831  
832  	for (i = 11; i > 0; i--) {
833  		tmp = readl(mmio);
834  		if (tmp & PDC_RESET)
835  			break;
836  
837  		udelay(100);
838  
839  		tmp |= PDC_RESET;
840  		writel(tmp, mmio);
841  	}
842  
843  	tmp &= ~PDC_RESET;
844  	writel(tmp, mmio);
845  	readl(mmio);	/* flush */
846  }
847  
pdc_softreset(struct ata_link * link,unsigned int * class,unsigned long deadline)848  static int pdc_softreset(struct ata_link *link, unsigned int *class,
849  			 unsigned long deadline)
850  {
851  	pdc_reset_port(link->ap);
852  	return ata_sff_softreset(link, class, deadline);
853  }
854  
pdc_error_handler(struct ata_port * ap)855  static void pdc_error_handler(struct ata_port *ap)
856  {
857  	if (!ata_port_is_frozen(ap))
858  		pdc_reset_port(ap);
859  
860  	ata_sff_error_handler(ap);
861  }
862  
pdc_post_internal_cmd(struct ata_queued_cmd * qc)863  static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
864  {
865  	struct ata_port *ap = qc->ap;
866  
867  	/* make DMA engine forget about the failed command */
868  	if (qc->flags & ATA_QCFLAG_EH)
869  		pdc_reset_port(ap);
870  }
871  
pdc_check_atapi_dma(struct ata_queued_cmd * qc)872  static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
873  {
874  	u8 *scsicmd = qc->scsicmd->cmnd;
875  	int pio = 1; /* atapi dma off by default */
876  
877  	/* Whitelist commands that may use DMA. */
878  	switch (scsicmd[0]) {
879  	case WRITE_12:
880  	case WRITE_10:
881  	case WRITE_6:
882  	case READ_12:
883  	case READ_10:
884  	case READ_6:
885  	case 0xad: /* READ_DVD_STRUCTURE */
886  	case 0xbe: /* READ_CD */
887  		pio = 0;
888  	}
889  	/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
890  	if (scsicmd[0] == WRITE_10) {
891  		unsigned int lba =
892  			(scsicmd[2] << 24) |
893  			(scsicmd[3] << 16) |
894  			(scsicmd[4] << 8) |
895  			scsicmd[5];
896  		if (lba >= 0xFFFF4FA2)
897  			pio = 1;
898  	}
899  	return pio;
900  }
901  
pdc_tf_load_mmio(struct ata_port * ap,const struct ata_taskfile * tf)902  static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
903  {
904  	WARN_ON(tf->protocol == ATA_PROT_DMA ||
905  		tf->protocol == ATAPI_PROT_DMA);
906  	ata_sff_tf_load(ap, tf);
907  }
908  
909  
pdc_exec_command_mmio(struct ata_port * ap,const struct ata_taskfile * tf)910  static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
911  {
912  	WARN_ON(tf->protocol == ATA_PROT_DMA ||
913  		tf->protocol == ATAPI_PROT_DMA);
914  	ata_sff_exec_command(ap, tf);
915  }
916  
917  
pdc_sata_setup_port(struct ata_ioports * port,void __iomem * base)918  static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
919  {
920  	port->cmd_addr		= base;
921  	port->data_addr		= base;
922  	port->feature_addr	=
923  	port->error_addr	= base + 0x4;
924  	port->nsect_addr	= base + 0x8;
925  	port->lbal_addr		= base + 0xc;
926  	port->lbam_addr		= base + 0x10;
927  	port->lbah_addr		= base + 0x14;
928  	port->device_addr	= base + 0x18;
929  	port->command_addr	=
930  	port->status_addr	= base + 0x1c;
931  	port->altstatus_addr	=
932  	port->ctl_addr		= base + 0x38;
933  }
934  
935  
pdc20621_get_from_dimm(struct ata_host * host,void * psource,u32 offset,u32 size)936  static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
937  				   u32 offset, u32 size)
938  {
939  	u32 window_size;
940  	u16 idx;
941  	u8 page_mask;
942  	long dist;
943  	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
944  	void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
945  
946  	/* hard-code chip #0 */
947  	mmio += PDC_CHIP0_OFS;
948  
949  	page_mask = 0x00;
950  	window_size = 0x2000 * 4; /* 32K byte uchar size */
951  	idx = (u16) (offset / window_size);
952  
953  	writel(0x01, mmio + PDC_GENERAL_CTLR);
954  	readl(mmio + PDC_GENERAL_CTLR);
955  	writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
956  	readl(mmio + PDC_DIMM_WINDOW_CTLR);
957  
958  	offset -= (idx * window_size);
959  	idx++;
960  	dist = min(size, window_size - offset);
961  	memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
962  
963  	psource += dist;
964  	size -= dist;
965  	for (; (long) size >= (long) window_size ;) {
966  		writel(0x01, mmio + PDC_GENERAL_CTLR);
967  		readl(mmio + PDC_GENERAL_CTLR);
968  		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
969  		readl(mmio + PDC_DIMM_WINDOW_CTLR);
970  		memcpy_fromio(psource, dimm_mmio, window_size / 4);
971  		psource += window_size;
972  		size -= window_size;
973  		idx++;
974  	}
975  
976  	if (size) {
977  		writel(0x01, mmio + PDC_GENERAL_CTLR);
978  		readl(mmio + PDC_GENERAL_CTLR);
979  		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
980  		readl(mmio + PDC_DIMM_WINDOW_CTLR);
981  		memcpy_fromio(psource, dimm_mmio, size / 4);
982  	}
983  }
984  
985  
pdc20621_put_to_dimm(struct ata_host * host,void * psource,u32 offset,u32 size)986  static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
987  				 u32 offset, u32 size)
988  {
989  	u32 window_size;
990  	u16 idx;
991  	u8 page_mask;
992  	long dist;
993  	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
994  	void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
995  
996  	/* hard-code chip #0 */
997  	mmio += PDC_CHIP0_OFS;
998  
999  	page_mask = 0x00;
1000  	window_size = 0x2000 * 4;       /* 32K byte uchar size */
1001  	idx = (u16) (offset / window_size);
1002  
1003  	writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1004  	readl(mmio + PDC_DIMM_WINDOW_CTLR);
1005  	offset -= (idx * window_size);
1006  	idx++;
1007  	dist = min(size, window_size - offset);
1008  	memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1009  	writel(0x01, mmio + PDC_GENERAL_CTLR);
1010  	readl(mmio + PDC_GENERAL_CTLR);
1011  
1012  	psource += dist;
1013  	size -= dist;
1014  	for (; (long) size >= (long) window_size ;) {
1015  		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1016  		readl(mmio + PDC_DIMM_WINDOW_CTLR);
1017  		memcpy_toio(dimm_mmio, psource, window_size / 4);
1018  		writel(0x01, mmio + PDC_GENERAL_CTLR);
1019  		readl(mmio + PDC_GENERAL_CTLR);
1020  		psource += window_size;
1021  		size -= window_size;
1022  		idx++;
1023  	}
1024  
1025  	if (size) {
1026  		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1027  		readl(mmio + PDC_DIMM_WINDOW_CTLR);
1028  		memcpy_toio(dimm_mmio, psource, size / 4);
1029  		writel(0x01, mmio + PDC_GENERAL_CTLR);
1030  		readl(mmio + PDC_GENERAL_CTLR);
1031  	}
1032  }
1033  
1034  
pdc20621_i2c_read(struct ata_host * host,u32 device,u32 subaddr,u32 * pdata)1035  static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1036  				      u32 subaddr, u32 *pdata)
1037  {
1038  	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1039  	u32 i2creg  = 0;
1040  	u32 status;
1041  	u32 count = 0;
1042  
1043  	/* hard-code chip #0 */
1044  	mmio += PDC_CHIP0_OFS;
1045  
1046  	i2creg |= device << 24;
1047  	i2creg |= subaddr << 16;
1048  
1049  	/* Set the device and subaddress */
1050  	writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1051  	readl(mmio + PDC_I2C_ADDR_DATA);
1052  
1053  	/* Write Control to perform read operation, mask int */
1054  	writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1055  	       mmio + PDC_I2C_CONTROL);
1056  
1057  	for (count = 0; count <= 1000; count ++) {
1058  		status = readl(mmio + PDC_I2C_CONTROL);
1059  		if (status & PDC_I2C_COMPLETE) {
1060  			status = readl(mmio + PDC_I2C_ADDR_DATA);
1061  			break;
1062  		} else if (count == 1000)
1063  			return 0;
1064  	}
1065  
1066  	*pdata = (status >> 8) & 0x000000ff;
1067  	return 1;
1068  }
1069  
1070  
pdc20621_detect_dimm(struct ata_host * host)1071  static int pdc20621_detect_dimm(struct ata_host *host)
1072  {
1073  	u32 data = 0;
1074  	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1075  			     PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1076  		if (data == 100)
1077  			return 100;
1078  	} else
1079  		return 0;
1080  
1081  	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1082  		if (data <= 0x75)
1083  			return 133;
1084  	} else
1085  		return 0;
1086  
1087  	return 0;
1088  }
1089  
1090  
pdc20621_prog_dimm0(struct ata_host * host)1091  static int pdc20621_prog_dimm0(struct ata_host *host)
1092  {
1093  	u32 spd0[50];
1094  	u32 data = 0;
1095  	int size, i;
1096  	u8 bdimmsize;
1097  	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1098  	static const struct {
1099  		unsigned int reg;
1100  		unsigned int ofs;
1101  	} pdc_i2c_read_data [] = {
1102  		{ PDC_DIMM_SPD_TYPE, 11 },
1103  		{ PDC_DIMM_SPD_FRESH_RATE, 12 },
1104  		{ PDC_DIMM_SPD_COLUMN_NUM, 4 },
1105  		{ PDC_DIMM_SPD_ATTRIBUTE, 21 },
1106  		{ PDC_DIMM_SPD_ROW_NUM, 3 },
1107  		{ PDC_DIMM_SPD_BANK_NUM, 17 },
1108  		{ PDC_DIMM_SPD_MODULE_ROW, 5 },
1109  		{ PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1110  		{ PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1111  		{ PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1112  		{ PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1113  		{ PDC_DIMM_SPD_CAS_LATENCY, 18 },
1114  	};
1115  
1116  	/* hard-code chip #0 */
1117  	mmio += PDC_CHIP0_OFS;
1118  
1119  	for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1120  		pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1121  				  pdc_i2c_read_data[i].reg,
1122  				  &spd0[pdc_i2c_read_data[i].ofs]);
1123  
1124  	data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1125  	data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1126  		((((spd0[27] + 9) / 10) - 1) << 8) ;
1127  	data |= (((((spd0[29] > spd0[28])
1128  		    ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1129  	data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1130  
1131  	if (spd0[18] & 0x08)
1132  		data |= ((0x03) << 14);
1133  	else if (spd0[18] & 0x04)
1134  		data |= ((0x02) << 14);
1135  	else if (spd0[18] & 0x01)
1136  		data |= ((0x01) << 14);
1137  	else
1138  		data |= (0 << 14);
1139  
1140  	/*
1141  	   Calculate the size of bDIMMSize (power of 2) and
1142  	   merge the DIMM size by program start/end address.
1143  	*/
1144  
1145  	bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1146  	size = (1 << bdimmsize) >> 20;	/* size = xxx(MB) */
1147  	data |= (((size / 16) - 1) << 16);
1148  	data |= (0 << 23);
1149  	data |= 8;
1150  	writel(data, mmio + PDC_DIMM0_CONTROL);
1151  	readl(mmio + PDC_DIMM0_CONTROL);
1152  	return size;
1153  }
1154  
1155  
pdc20621_prog_dimm_global(struct ata_host * host)1156  static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1157  {
1158  	u32 data, spd0;
1159  	int error, i;
1160  	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1161  
1162  	/* hard-code chip #0 */
1163  	mmio += PDC_CHIP0_OFS;
1164  
1165  	/*
1166  	  Set To Default : DIMM Module Global Control Register (0x022259F1)
1167  	  DIMM Arbitration Disable (bit 20)
1168  	  DIMM Data/Control Output Driving Selection (bit12 - bit15)
1169  	  Refresh Enable (bit 17)
1170  	*/
1171  
1172  	data = 0x022259F1;
1173  	writel(data, mmio + PDC_SDRAM_CONTROL);
1174  	readl(mmio + PDC_SDRAM_CONTROL);
1175  
1176  	/* Turn on for ECC */
1177  	if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1178  			       PDC_DIMM_SPD_TYPE, &spd0)) {
1179  		dev_err(host->dev,
1180  			"Failed in i2c read: device=%#x, subaddr=%#x\n",
1181  			PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1182  		return 1;
1183  	}
1184  	if (spd0 == 0x02) {
1185  		data |= (0x01 << 16);
1186  		writel(data, mmio + PDC_SDRAM_CONTROL);
1187  		readl(mmio + PDC_SDRAM_CONTROL);
1188  		dev_err(host->dev, "Local DIMM ECC Enabled\n");
1189  	}
1190  
1191  	/* DIMM Initialization Select/Enable (bit 18/19) */
1192  	data &= (~(1<<18));
1193  	data |= (1<<19);
1194  	writel(data, mmio + PDC_SDRAM_CONTROL);
1195  
1196  	error = 1;
1197  	for (i = 1; i <= 10; i++) {   /* polling ~5 secs */
1198  		data = readl(mmio + PDC_SDRAM_CONTROL);
1199  		if (!(data & (1<<19))) {
1200  			error = 0;
1201  			break;
1202  		}
1203  		msleep(i*100);
1204  	}
1205  	return error;
1206  }
1207  
1208  
pdc20621_dimm_init(struct ata_host * host)1209  static unsigned int pdc20621_dimm_init(struct ata_host *host)
1210  {
1211  	int speed, size, length;
1212  	u32 addr, spd0, pci_status;
1213  	u32 time_period = 0;
1214  	u32 tcount = 0;
1215  	u32 ticks = 0;
1216  	u32 clock = 0;
1217  	u32 fparam = 0;
1218  	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1219  
1220  	/* hard-code chip #0 */
1221  	mmio += PDC_CHIP0_OFS;
1222  
1223  	/* Initialize PLL based upon PCI Bus Frequency */
1224  
1225  	/* Initialize Time Period Register */
1226  	writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1227  	time_period = readl(mmio + PDC_TIME_PERIOD);
1228  	dev_dbg(host->dev, "Time Period Register (0x40): 0x%x\n", time_period);
1229  
1230  	/* Enable timer */
1231  	writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1232  	readl(mmio + PDC_TIME_CONTROL);
1233  
1234  	/* Wait 3 seconds */
1235  	msleep(3000);
1236  
1237  	/*
1238  	   When timer is enabled, counter is decreased every internal
1239  	   clock cycle.
1240  	*/
1241  
1242  	tcount = readl(mmio + PDC_TIME_COUNTER);
1243  	dev_dbg(host->dev, "Time Counter Register (0x44): 0x%x\n", tcount);
1244  
1245  	/*
1246  	   If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1247  	   register should be >= (0xffffffff - 3x10^8).
1248  	*/
1249  	if (tcount >= PCI_X_TCOUNT) {
1250  		ticks = (time_period - tcount);
1251  		dev_dbg(host->dev, "Num counters 0x%x (%d)\n", ticks, ticks);
1252  
1253  		clock = (ticks / 300000);
1254  		dev_dbg(host->dev, "10 * Internal clk = 0x%x (%d)\n",
1255  			clock, clock);
1256  
1257  		clock = (clock * 33);
1258  		dev_dbg(host->dev, "10 * Internal clk * 33 = 0x%x (%d)\n",
1259  			clock, clock);
1260  
1261  		/* PLL F Param (bit 22:16) */
1262  		fparam = (1400000 / clock) - 2;
1263  		dev_dbg(host->dev, "PLL F Param: 0x%x (%d)\n", fparam, fparam);
1264  
1265  		/* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1266  		pci_status = (0x8a001824 | (fparam << 16));
1267  	} else
1268  		pci_status = PCI_PLL_INIT;
1269  
1270  	/* Initialize PLL. */
1271  	dev_dbg(host->dev, "pci_status: 0x%x\n", pci_status);
1272  	writel(pci_status, mmio + PDC_CTL_STATUS);
1273  	readl(mmio + PDC_CTL_STATUS);
1274  
1275  	/*
1276  	   Read SPD of DIMM by I2C interface,
1277  	   and program the DIMM Module Controller.
1278  	*/
1279  	if (!(speed = pdc20621_detect_dimm(host))) {
1280  		dev_err(host->dev, "Detect Local DIMM Fail\n");
1281  		return 1;	/* DIMM error */
1282  	}
1283  	dev_dbg(host->dev, "Local DIMM Speed = %d\n", speed);
1284  
1285  	/* Programming DIMM0 Module Control Register (index_CID0:80h) */
1286  	size = pdc20621_prog_dimm0(host);
1287  	dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
1288  
1289  	/* Programming DIMM Module Global Control Register (index_CID0:88h) */
1290  	if (pdc20621_prog_dimm_global(host)) {
1291  		dev_err(host->dev,
1292  			"Programming DIMM Module Global Control Register Fail\n");
1293  		return 1;
1294  	}
1295  
1296  	if (dimm_test) {
1297  		u8 test_parttern1[40] =
1298  			{0x55,0xAA,'P','r','o','m','i','s','e',' ',
1299  			'N','o','t',' ','Y','e','t',' ',
1300  			'D','e','f','i','n','e','d',' ',
1301  			'1','.','1','0',
1302  			'9','8','0','3','1','6','1','2',0,0};
1303  		u8 test_parttern2[40] = {0};
1304  
1305  		pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1306  		pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1307  
1308  		pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1309  		pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1310  		dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
1311  		       test_parttern2[1], &(test_parttern2[2]));
1312  		pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1313  				       40);
1314  		dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
1315  			 test_parttern2[0],
1316  			 test_parttern2[1], &(test_parttern2[2]));
1317  
1318  		pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1319  		pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1320  		dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
1321  			 test_parttern2[0],
1322  			 test_parttern2[1], &(test_parttern2[2]));
1323  	}
1324  
1325  	/* ECC initiliazation. */
1326  
1327  	if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1328  			       PDC_DIMM_SPD_TYPE, &spd0)) {
1329  		dev_err(host->dev,
1330  			"Failed in i2c read: device=%#x, subaddr=%#x\n",
1331  		       PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1332  		return 1;
1333  	}
1334  	if (spd0 == 0x02) {
1335  		void *buf;
1336  		dev_dbg(host->dev, "Start ECC initialization\n");
1337  		addr = 0;
1338  		length = size * 1024 * 1024;
1339  		buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1340  		if (!buf)
1341  			return 1;
1342  		while (addr < length) {
1343  			pdc20621_put_to_dimm(host, buf, addr,
1344  					     ECC_ERASE_BUF_SZ);
1345  			addr += ECC_ERASE_BUF_SZ;
1346  		}
1347  		kfree(buf);
1348  		dev_dbg(host->dev, "Finish ECC initialization\n");
1349  	}
1350  	return 0;
1351  }
1352  
1353  
pdc_20621_init(struct ata_host * host)1354  static void pdc_20621_init(struct ata_host *host)
1355  {
1356  	u32 tmp;
1357  	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1358  
1359  	/* hard-code chip #0 */
1360  	mmio += PDC_CHIP0_OFS;
1361  
1362  	/*
1363  	 * Select page 0x40 for our 32k DIMM window
1364  	 */
1365  	tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1366  	tmp |= PDC_PAGE_WINDOW;	/* page 40h; arbitrarily selected */
1367  	writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1368  
1369  	/*
1370  	 * Reset Host DMA
1371  	 */
1372  	tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1373  	tmp |= PDC_RESET;
1374  	writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1375  	readl(mmio + PDC_HDMA_CTLSTAT);		/* flush */
1376  
1377  	udelay(10);
1378  
1379  	tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1380  	tmp &= ~PDC_RESET;
1381  	writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1382  	readl(mmio + PDC_HDMA_CTLSTAT);		/* flush */
1383  }
1384  
pdc_sata_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1385  static int pdc_sata_init_one(struct pci_dev *pdev,
1386  			     const struct pci_device_id *ent)
1387  {
1388  	const struct ata_port_info *ppi[] =
1389  		{ &pdc_port_info[ent->driver_data], NULL };
1390  	struct ata_host *host;
1391  	struct pdc_host_priv *hpriv;
1392  	int i, rc;
1393  
1394  	ata_print_version_once(&pdev->dev, DRV_VERSION);
1395  
1396  	/* allocate host */
1397  	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1398  	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1399  	if (!host || !hpriv)
1400  		return -ENOMEM;
1401  
1402  	host->private_data = hpriv;
1403  
1404  	/* acquire resources and fill host */
1405  	rc = pcim_enable_device(pdev);
1406  	if (rc)
1407  		return rc;
1408  
1409  	rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1410  				DRV_NAME);
1411  	if (rc == -EBUSY)
1412  		pcim_pin_device(pdev);
1413  	if (rc)
1414  		return rc;
1415  	host->iomap = pcim_iomap_table(pdev);
1416  
1417  	for (i = 0; i < 4; i++) {
1418  		struct ata_port *ap = host->ports[i];
1419  		void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1420  		unsigned int offset = 0x200 + i * 0x80;
1421  
1422  		pdc_sata_setup_port(&ap->ioaddr, base + offset);
1423  
1424  		ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1425  		ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1426  		ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1427  	}
1428  
1429  	/* configure and activate */
1430  	rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
1431  	if (rc)
1432  		return rc;
1433  
1434  	if (pdc20621_dimm_init(host))
1435  		return -ENOMEM;
1436  	pdc_20621_init(host);
1437  
1438  	pci_set_master(pdev);
1439  	return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1440  				 IRQF_SHARED, &pdc_sata_sht);
1441  }
1442  
1443  module_pci_driver(pdc_sata_pci_driver);
1444  
1445  MODULE_AUTHOR("Jeff Garzik");
1446  MODULE_DESCRIPTION("Promise SATA low-level driver");
1447  MODULE_LICENSE("GPL");
1448  MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1449  MODULE_VERSION(DRV_VERSION);
1450