xref: /openbmc/linux/drivers/ata/sata_highbank.c (revision a06c488d)
1 /*
2  * Calxeda Highbank AHCI SATA platform driver
3  * Copyright 2012 Calxeda, Inc.
4  *
5  * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/kernel.h>
20 #include <linux/gfp.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/of_device.h>
28 #include <linux/of_address.h>
29 #include <linux/platform_device.h>
30 #include <linux/libata.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/export.h>
34 #include <linux/gpio.h>
35 #include <linux/of_gpio.h>
36 
37 #include "ahci.h"
38 
39 #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
40 #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
41 #define SERDES_CR_CTL			0x80a0
42 #define SERDES_CR_ADDR			0x80a1
43 #define SERDES_CR_DATA			0x80a2
44 #define CR_BUSY				0x0001
45 #define CR_START			0x0001
46 #define CR_WR_RDN			0x0002
47 #define CPHY_TX_INPUT_STS		0x2001
48 #define CPHY_RX_INPUT_STS		0x2002
49 #define CPHY_SATA_TX_OVERRIDE		0x8000
50 #define CPHY_SATA_RX_OVERRIDE	 	0x4000
51 #define CPHY_TX_OVERRIDE		0x2004
52 #define CPHY_RX_OVERRIDE		0x2005
53 #define SPHY_LANE			0x100
54 #define SPHY_HALF_RATE			0x0001
55 #define CPHY_SATA_DPLL_MODE		0x0700
56 #define CPHY_SATA_DPLL_SHIFT		8
57 #define CPHY_SATA_DPLL_RESET		(1 << 11)
58 #define CPHY_SATA_TX_ATTEN		0x1c00
59 #define CPHY_SATA_TX_ATTEN_SHIFT	10
60 #define CPHY_PHY_COUNT			6
61 #define CPHY_LANE_COUNT			4
62 #define CPHY_PORT_COUNT			(CPHY_PHY_COUNT * CPHY_LANE_COUNT)
63 
64 static DEFINE_SPINLOCK(cphy_lock);
65 /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
66  * sata ports to their phys and then to their lanes within the phys
67  */
68 struct phy_lane_info {
69 	void __iomem *phy_base;
70 	u8 lane_mapping;
71 	u8 phy_devs;
72 	u8 tx_atten;
73 };
74 static struct phy_lane_info port_data[CPHY_PORT_COUNT];
75 
76 static DEFINE_SPINLOCK(sgpio_lock);
77 #define SCLOCK				0
78 #define SLOAD				1
79 #define SDATA				2
80 #define SGPIO_PINS			3
81 #define SGPIO_PORTS			8
82 
83 struct ecx_plat_data {
84 	u32		n_ports;
85 	/* number of extra clocks that the SGPIO PIC controller expects */
86 	u32		pre_clocks;
87 	u32		post_clocks;
88 	unsigned	sgpio_gpio[SGPIO_PINS];
89 	u32		sgpio_pattern;
90 	u32		port_to_sgpio[SGPIO_PORTS];
91 };
92 
93 #define SGPIO_SIGNALS			3
94 #define ECX_ACTIVITY_BITS		0x300000
95 #define ECX_ACTIVITY_SHIFT		0
96 #define ECX_LOCATE_BITS			0x80000
97 #define ECX_LOCATE_SHIFT		1
98 #define ECX_FAULT_BITS			0x400000
99 #define ECX_FAULT_SHIFT			2
100 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
101 				u32 shift)
102 {
103 	return 1 << (3 * pdata->port_to_sgpio[port] + shift);
104 }
105 
106 static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
107 {
108 	if (state & ECX_ACTIVITY_BITS)
109 		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
110 						ECX_ACTIVITY_SHIFT);
111 	else
112 		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
113 						ECX_ACTIVITY_SHIFT);
114 	if (state & ECX_LOCATE_BITS)
115 		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
116 						ECX_LOCATE_SHIFT);
117 	else
118 		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
119 						ECX_LOCATE_SHIFT);
120 	if (state & ECX_FAULT_BITS)
121 		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
122 						ECX_FAULT_SHIFT);
123 	else
124 		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
125 						ECX_FAULT_SHIFT);
126 }
127 
128 /*
129  * Tell the LED controller that the signal has changed by raising the clock
130  * line for 50 uS and then lowering it for 50 uS.
131  */
132 static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
133 {
134 	gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1);
135 	udelay(50);
136 	gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0);
137 	udelay(50);
138 }
139 
140 static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
141 					ssize_t size)
142 {
143 	struct ahci_host_priv *hpriv =  ap->host->private_data;
144 	struct ecx_plat_data *pdata = hpriv->plat_data;
145 	struct ahci_port_priv *pp = ap->private_data;
146 	unsigned long flags;
147 	int pmp, i;
148 	struct ahci_em_priv *emp;
149 	u32 sgpio_out;
150 
151 	/* get the slot number from the message */
152 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
153 	if (pmp < EM_MAX_SLOTS)
154 		emp = &pp->em_priv[pmp];
155 	else
156 		return -EINVAL;
157 
158 	if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
159 		return size;
160 
161 	spin_lock_irqsave(&sgpio_lock, flags);
162 	ecx_parse_sgpio(pdata, ap->port_no, state);
163 	sgpio_out = pdata->sgpio_pattern;
164 	for (i = 0; i < pdata->pre_clocks; i++)
165 		ecx_led_cycle_clock(pdata);
166 
167 	gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
168 	ecx_led_cycle_clock(pdata);
169 	gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
170 	/*
171 	 * bit-bang out the SGPIO pattern, by consuming a bit and then
172 	 * clocking it out.
173 	 */
174 	for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
175 		gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1);
176 		sgpio_out >>= 1;
177 		ecx_led_cycle_clock(pdata);
178 	}
179 	for (i = 0; i < pdata->post_clocks; i++)
180 		ecx_led_cycle_clock(pdata);
181 
182 	/* save off new led state for port/slot */
183 	emp->led_state = state;
184 
185 	spin_unlock_irqrestore(&sgpio_lock, flags);
186 	return size;
187 }
188 
189 static void highbank_set_em_messages(struct device *dev,
190 					struct ahci_host_priv *hpriv,
191 					struct ata_port_info *pi)
192 {
193 	struct device_node *np = dev->of_node;
194 	struct ecx_plat_data *pdata = hpriv->plat_data;
195 	int i;
196 	int err;
197 
198 	for (i = 0; i < SGPIO_PINS; i++) {
199 		err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
200 		if (IS_ERR_VALUE(err))
201 			return;
202 
203 		pdata->sgpio_gpio[i] = err;
204 		err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO");
205 		if (err) {
206 			pr_err("sata_highbank gpio_request %d failed: %d\n",
207 					i, err);
208 			return;
209 		}
210 		gpio_direction_output(pdata->sgpio_gpio[i], 1);
211 	}
212 	of_property_read_u32_array(np, "calxeda,led-order",
213 						pdata->port_to_sgpio,
214 						pdata->n_ports);
215 	if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
216 		pdata->pre_clocks = 0;
217 	if (of_property_read_u32(np, "calxeda,post-clocks",
218 				&pdata->post_clocks))
219 		pdata->post_clocks = 0;
220 
221 	/* store em_loc */
222 	hpriv->em_loc = 0;
223 	hpriv->em_buf_sz = 4;
224 	hpriv->em_msg_type = EM_MSG_TYPE_LED;
225 	pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
226 }
227 
228 static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
229 {
230 	u32 data;
231 	u8 dev = port_data[sata_port].phy_devs;
232 	spin_lock(&cphy_lock);
233 	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
234 	data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
235 	spin_unlock(&cphy_lock);
236 	return data;
237 }
238 
239 static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
240 {
241 	u8 dev = port_data[sata_port].phy_devs;
242 	spin_lock(&cphy_lock);
243 	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
244 	writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
245 	spin_unlock(&cphy_lock);
246 }
247 
248 static void combo_phy_wait_for_ready(u8 sata_port)
249 {
250 	while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
251 		udelay(5);
252 }
253 
254 static u32 combo_phy_read(u8 sata_port, u32 addr)
255 {
256 	combo_phy_wait_for_ready(sata_port);
257 	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
258 	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
259 	combo_phy_wait_for_ready(sata_port);
260 	return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
261 }
262 
263 static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
264 {
265 	combo_phy_wait_for_ready(sata_port);
266 	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
267 	__combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
268 	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
269 }
270 
271 static void highbank_cphy_disable_overrides(u8 sata_port)
272 {
273 	u8 lane = port_data[sata_port].lane_mapping;
274 	u32 tmp;
275 	if (unlikely(port_data[sata_port].phy_base == NULL))
276 		return;
277 	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
278 	tmp &= ~CPHY_SATA_RX_OVERRIDE;
279 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
280 }
281 
282 static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
283 {
284 	u8 lane = port_data[sata_port].lane_mapping;
285 	u32 tmp;
286 
287 	if (val & 0x8)
288 		return;
289 
290 	tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
291 	tmp &= ~CPHY_SATA_TX_OVERRIDE;
292 	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
293 
294 	tmp |= CPHY_SATA_TX_OVERRIDE;
295 	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
296 
297 	tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
298 	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
299 }
300 
301 static void cphy_override_rx_mode(u8 sata_port, u32 val)
302 {
303 	u8 lane = port_data[sata_port].lane_mapping;
304 	u32 tmp;
305 	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
306 	tmp &= ~CPHY_SATA_RX_OVERRIDE;
307 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
308 
309 	tmp |= CPHY_SATA_RX_OVERRIDE;
310 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
311 
312 	tmp &= ~CPHY_SATA_DPLL_MODE;
313 	tmp |= val << CPHY_SATA_DPLL_SHIFT;
314 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
315 
316 	tmp |= CPHY_SATA_DPLL_RESET;
317 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
318 
319 	tmp &= ~CPHY_SATA_DPLL_RESET;
320 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
321 
322 	msleep(15);
323 }
324 
325 static void highbank_cphy_override_lane(u8 sata_port)
326 {
327 	u8 lane = port_data[sata_port].lane_mapping;
328 	u32 tmp, k = 0;
329 
330 	if (unlikely(port_data[sata_port].phy_base == NULL))
331 		return;
332 	do {
333 		tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
334 						lane * SPHY_LANE);
335 	} while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
336 	cphy_override_rx_mode(sata_port, 3);
337 	cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
338 }
339 
340 static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
341 {
342 	struct device_node *sata_node = dev->of_node;
343 	int phy_count = 0, phy, port = 0, i;
344 	void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
345 	struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
346 	u32 tx_atten[CPHY_PORT_COUNT] = {};
347 
348 	memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
349 
350 	do {
351 		u32 tmp;
352 		struct of_phandle_args phy_data;
353 		if (of_parse_phandle_with_args(sata_node,
354 				"calxeda,port-phys", "#phy-cells",
355 				port, &phy_data))
356 			break;
357 		for (phy = 0; phy < phy_count; phy++) {
358 			if (phy_nodes[phy] == phy_data.np)
359 				break;
360 		}
361 		if (phy_nodes[phy] == NULL) {
362 			phy_nodes[phy] = phy_data.np;
363 			cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
364 			if (cphy_base[phy] == NULL) {
365 				return 0;
366 			}
367 			phy_count += 1;
368 		}
369 		port_data[port].lane_mapping = phy_data.args[0];
370 		of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
371 		port_data[port].phy_devs = tmp;
372 		port_data[port].phy_base = cphy_base[phy];
373 		of_node_put(phy_data.np);
374 		port += 1;
375 	} while (port < CPHY_PORT_COUNT);
376 	of_property_read_u32_array(sata_node, "calxeda,tx-atten",
377 				tx_atten, port);
378 	for (i = 0; i < port; i++)
379 		port_data[i].tx_atten = (u8) tx_atten[i];
380 	return 0;
381 }
382 
383 /*
384  * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
385  * Retrying the phy hard reset can work around the issue, but the drive
386  * may fail again. In less than 150 out of 15000 test runs, it took more
387  * than 10 tries for the link to be established (but never more than 35).
388  * Triple the maximum observed retry count to provide plenty of margin for
389  * rare events and to guarantee that the link is established.
390  *
391  * Also, the default 2 second time-out on a failed drive is too long in
392  * this situation. The uboot implementation of the same driver function
393  * uses a much shorter time-out period and never experiences a time out
394  * issue. Reducing the time-out to 500ms improves the responsiveness.
395  * The other timing constants were kept the same as the stock AHCI driver.
396  * This change was also tested 15000 times on 24 drives and none of them
397  * experienced a time out.
398  */
399 static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
400 				unsigned long deadline)
401 {
402 	static const unsigned long timing[] = { 5, 100, 500};
403 	struct ata_port *ap = link->ap;
404 	struct ahci_port_priv *pp = ap->private_data;
405 	struct ahci_host_priv *hpriv = ap->host->private_data;
406 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
407 	struct ata_taskfile tf;
408 	bool online;
409 	u32 sstatus;
410 	int rc;
411 	int retry = 100;
412 
413 	ahci_stop_engine(ap);
414 
415 	/* clear D2H reception area to properly wait for D2H FIS */
416 	ata_tf_init(link->device, &tf);
417 	tf.command = ATA_BUSY;
418 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
419 
420 	do {
421 		highbank_cphy_disable_overrides(link->ap->port_no);
422 		rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
423 		highbank_cphy_override_lane(link->ap->port_no);
424 
425 		/* If the status is 1, we are connected, but the link did not
426 		 * come up. So retry resetting the link again.
427 		 */
428 		if (sata_scr_read(link, SCR_STATUS, &sstatus))
429 			break;
430 		if (!(sstatus & 0x3))
431 			break;
432 	} while (!online && retry--);
433 
434 	hpriv->start_engine(ap);
435 
436 	if (online)
437 		*class = ahci_dev_classify(ap);
438 
439 	return rc;
440 }
441 
442 static struct ata_port_operations ahci_highbank_ops = {
443 	.inherits		= &ahci_ops,
444 	.hardreset		= ahci_highbank_hardreset,
445 	.transmit_led_message   = ecx_transmit_led_message,
446 };
447 
448 static const struct ata_port_info ahci_highbank_port_info = {
449 	.flags          = AHCI_FLAG_COMMON,
450 	.pio_mask       = ATA_PIO4,
451 	.udma_mask      = ATA_UDMA6,
452 	.port_ops       = &ahci_highbank_ops,
453 };
454 
455 static struct scsi_host_template ahci_highbank_platform_sht = {
456 	AHCI_SHT("sata_highbank"),
457 };
458 
459 static const struct of_device_id ahci_of_match[] = {
460 	{ .compatible = "calxeda,hb-ahci" },
461 	{},
462 };
463 MODULE_DEVICE_TABLE(of, ahci_of_match);
464 
465 static int ahci_highbank_probe(struct platform_device *pdev)
466 {
467 	struct device *dev = &pdev->dev;
468 	struct ahci_host_priv *hpriv;
469 	struct ecx_plat_data *pdata;
470 	struct ata_host *host;
471 	struct resource *mem;
472 	int irq;
473 	int i;
474 	int rc;
475 	u32 n_ports;
476 	struct ata_port_info pi = ahci_highbank_port_info;
477 	const struct ata_port_info *ppi[] = { &pi, NULL };
478 
479 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
480 	if (!mem) {
481 		dev_err(dev, "no mmio space\n");
482 		return -EINVAL;
483 	}
484 
485 	irq = platform_get_irq(pdev, 0);
486 	if (irq <= 0) {
487 		dev_err(dev, "no irq\n");
488 		return -EINVAL;
489 	}
490 
491 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
492 	if (!hpriv) {
493 		dev_err(dev, "can't alloc ahci_host_priv\n");
494 		return -ENOMEM;
495 	}
496 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
497 	if (!pdata) {
498 		dev_err(dev, "can't alloc ecx_plat_data\n");
499 		return -ENOMEM;
500 	}
501 
502 	hpriv->irq = irq;
503 	hpriv->flags |= (unsigned long)pi.private_data;
504 
505 	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
506 	if (!hpriv->mmio) {
507 		dev_err(dev, "can't map %pR\n", mem);
508 		return -ENOMEM;
509 	}
510 
511 	rc = highbank_initialize_phys(dev, hpriv->mmio);
512 	if (rc)
513 		return rc;
514 
515 
516 	ahci_save_initial_config(dev, hpriv);
517 
518 	/* prepare host */
519 	if (hpriv->cap & HOST_CAP_NCQ)
520 		pi.flags |= ATA_FLAG_NCQ;
521 
522 	if (hpriv->cap & HOST_CAP_PMP)
523 		pi.flags |= ATA_FLAG_PMP;
524 
525 	if (hpriv->cap & HOST_CAP_64)
526 		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
527 
528 	/* CAP.NP sometimes indicate the index of the last enabled
529 	 * port, at other times, that of the last possible port, so
530 	 * determining the maximum port number requires looking at
531 	 * both CAP.NP and port_map.
532 	 */
533 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
534 
535 	pdata->n_ports = n_ports;
536 	hpriv->plat_data = pdata;
537 	highbank_set_em_messages(dev, hpriv, &pi);
538 
539 	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
540 	if (!host) {
541 		rc = -ENOMEM;
542 		goto err0;
543 	}
544 
545 	host->private_data = hpriv;
546 
547 	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
548 		host->flags |= ATA_HOST_PARALLEL_SCAN;
549 
550 	for (i = 0; i < host->n_ports; i++) {
551 		struct ata_port *ap = host->ports[i];
552 
553 		ata_port_desc(ap, "mmio %pR", mem);
554 		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
555 
556 		/* set enclosure management message type */
557 		if (ap->flags & ATA_FLAG_EM)
558 			ap->em_message_type = hpriv->em_msg_type;
559 
560 		/* disabled/not-implemented port */
561 		if (!(hpriv->port_map & (1 << i)))
562 			ap->ops = &ata_dummy_port_ops;
563 	}
564 
565 	rc = ahci_reset_controller(host);
566 	if (rc)
567 		goto err0;
568 
569 	ahci_init_controller(host);
570 	ahci_print_info(host, "platform");
571 
572 	rc = ahci_host_activate(host, &ahci_highbank_platform_sht);
573 	if (rc)
574 		goto err0;
575 
576 	return 0;
577 err0:
578 	return rc;
579 }
580 
581 #ifdef CONFIG_PM_SLEEP
582 static int ahci_highbank_suspend(struct device *dev)
583 {
584 	struct ata_host *host = dev_get_drvdata(dev);
585 	struct ahci_host_priv *hpriv = host->private_data;
586 	void __iomem *mmio = hpriv->mmio;
587 	u32 ctl;
588 	int rc;
589 
590 	if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
591 		dev_err(dev, "firmware update required for suspend/resume\n");
592 		return -EIO;
593 	}
594 
595 	/*
596 	 * AHCI spec rev1.1 section 8.3.3:
597 	 * Software must disable interrupts prior to requesting a
598 	 * transition of the HBA to D3 state.
599 	 */
600 	ctl = readl(mmio + HOST_CTL);
601 	ctl &= ~HOST_IRQ_EN;
602 	writel(ctl, mmio + HOST_CTL);
603 	readl(mmio + HOST_CTL); /* flush */
604 
605 	rc = ata_host_suspend(host, PMSG_SUSPEND);
606 	if (rc)
607 		return rc;
608 
609 	return 0;
610 }
611 
612 static int ahci_highbank_resume(struct device *dev)
613 {
614 	struct ata_host *host = dev_get_drvdata(dev);
615 	int rc;
616 
617 	if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
618 		rc = ahci_reset_controller(host);
619 		if (rc)
620 			return rc;
621 
622 		ahci_init_controller(host);
623 	}
624 
625 	ata_host_resume(host);
626 
627 	return 0;
628 }
629 #endif
630 
631 static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
632 		  ahci_highbank_suspend, ahci_highbank_resume);
633 
634 static struct platform_driver ahci_highbank_driver = {
635 	.remove = ata_platform_remove_one,
636         .driver = {
637                 .name = "highbank-ahci",
638                 .of_match_table = ahci_of_match,
639                 .pm = &ahci_highbank_pm_ops,
640         },
641 	.probe = ahci_highbank_probe,
642 };
643 
644 module_platform_driver(ahci_highbank_driver);
645 
646 MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
647 MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
648 MODULE_LICENSE("GPL");
649 MODULE_ALIAS("sata:highbank");
650