xref: /openbmc/linux/drivers/ata/sata_highbank.c (revision cd5d5810)
1 /*
2  * Calxeda Highbank AHCI SATA platform driver
3  * Copyright 2012 Calxeda, Inc.
4  *
5  * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/kernel.h>
20 #include <linux/gfp.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/types.h>
24 #include <linux/err.h>
25 #include <linux/io.h>
26 #include <linux/spinlock.h>
27 #include <linux/device.h>
28 #include <linux/of_device.h>
29 #include <linux/of_address.h>
30 #include <linux/platform_device.h>
31 #include <linux/libata.h>
32 #include <linux/ahci_platform.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/export.h>
36 #include <linux/gpio.h>
37 #include <linux/of_gpio.h>
38 
39 #include "ahci.h"
40 
41 #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
42 #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
43 #define SERDES_CR_CTL			0x80a0
44 #define SERDES_CR_ADDR			0x80a1
45 #define SERDES_CR_DATA			0x80a2
46 #define CR_BUSY				0x0001
47 #define CR_START			0x0001
48 #define CR_WR_RDN			0x0002
49 #define CPHY_TX_INPUT_STS		0x2001
50 #define CPHY_RX_INPUT_STS		0x2002
51 #define CPHY_SATA_TX_OVERRIDE		0x8000
52 #define CPHY_SATA_RX_OVERRIDE	 	0x4000
53 #define CPHY_TX_OVERRIDE		0x2004
54 #define CPHY_RX_OVERRIDE		0x2005
55 #define SPHY_LANE			0x100
56 #define SPHY_HALF_RATE			0x0001
57 #define CPHY_SATA_DPLL_MODE		0x0700
58 #define CPHY_SATA_DPLL_SHIFT		8
59 #define CPHY_SATA_DPLL_RESET		(1 << 11)
60 #define CPHY_SATA_TX_ATTEN		0x1c00
61 #define CPHY_SATA_TX_ATTEN_SHIFT	10
62 #define CPHY_PHY_COUNT			6
63 #define CPHY_LANE_COUNT			4
64 #define CPHY_PORT_COUNT			(CPHY_PHY_COUNT * CPHY_LANE_COUNT)
65 
66 static DEFINE_SPINLOCK(cphy_lock);
67 /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
68  * sata ports to their phys and then to their lanes within the phys
69  */
70 struct phy_lane_info {
71 	void __iomem *phy_base;
72 	u8 lane_mapping;
73 	u8 phy_devs;
74 	u8 tx_atten;
75 };
76 static struct phy_lane_info port_data[CPHY_PORT_COUNT];
77 
78 static DEFINE_SPINLOCK(sgpio_lock);
79 #define SCLOCK				0
80 #define SLOAD				1
81 #define SDATA				2
82 #define SGPIO_PINS			3
83 #define SGPIO_PORTS			8
84 
85 struct ecx_plat_data {
86 	u32		n_ports;
87 	/* number of extra clocks that the SGPIO PIC controller expects */
88 	u32		pre_clocks;
89 	u32		post_clocks;
90 	unsigned	sgpio_gpio[SGPIO_PINS];
91 	u32		sgpio_pattern;
92 	u32		port_to_sgpio[SGPIO_PORTS];
93 };
94 
95 #define SGPIO_SIGNALS			3
96 #define ECX_ACTIVITY_BITS		0x300000
97 #define ECX_ACTIVITY_SHIFT		0
98 #define ECX_LOCATE_BITS			0x80000
99 #define ECX_LOCATE_SHIFT		1
100 #define ECX_FAULT_BITS			0x400000
101 #define ECX_FAULT_SHIFT			2
102 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
103 				u32 shift)
104 {
105 	return 1 << (3 * pdata->port_to_sgpio[port] + shift);
106 }
107 
108 static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
109 {
110 	if (state & ECX_ACTIVITY_BITS)
111 		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
112 						ECX_ACTIVITY_SHIFT);
113 	else
114 		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
115 						ECX_ACTIVITY_SHIFT);
116 	if (state & ECX_LOCATE_BITS)
117 		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
118 						ECX_LOCATE_SHIFT);
119 	else
120 		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
121 						ECX_LOCATE_SHIFT);
122 	if (state & ECX_FAULT_BITS)
123 		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
124 						ECX_FAULT_SHIFT);
125 	else
126 		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
127 						ECX_FAULT_SHIFT);
128 }
129 
130 /*
131  * Tell the LED controller that the signal has changed by raising the clock
132  * line for 50 uS and then lowering it for 50 uS.
133  */
134 static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
135 {
136 	gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1);
137 	udelay(50);
138 	gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0);
139 	udelay(50);
140 }
141 
142 static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
143 					ssize_t size)
144 {
145 	struct ahci_host_priv *hpriv =  ap->host->private_data;
146 	struct ecx_plat_data *pdata = (struct ecx_plat_data *) hpriv->plat_data;
147 	struct ahci_port_priv *pp = ap->private_data;
148 	unsigned long flags;
149 	int pmp, i;
150 	struct ahci_em_priv *emp;
151 	u32 sgpio_out;
152 
153 	/* get the slot number from the message */
154 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
155 	if (pmp < EM_MAX_SLOTS)
156 		emp = &pp->em_priv[pmp];
157 	else
158 		return -EINVAL;
159 
160 	if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
161 		return size;
162 
163 	spin_lock_irqsave(&sgpio_lock, flags);
164 	ecx_parse_sgpio(pdata, ap->port_no, state);
165 	sgpio_out = pdata->sgpio_pattern;
166 	for (i = 0; i < pdata->pre_clocks; i++)
167 		ecx_led_cycle_clock(pdata);
168 
169 	gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
170 	ecx_led_cycle_clock(pdata);
171 	gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
172 	/*
173 	 * bit-bang out the SGPIO pattern, by consuming a bit and then
174 	 * clocking it out.
175 	 */
176 	for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
177 		gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1);
178 		sgpio_out >>= 1;
179 		ecx_led_cycle_clock(pdata);
180 	}
181 	for (i = 0; i < pdata->post_clocks; i++)
182 		ecx_led_cycle_clock(pdata);
183 
184 	/* save off new led state for port/slot */
185 	emp->led_state = state;
186 
187 	spin_unlock_irqrestore(&sgpio_lock, flags);
188 	return size;
189 }
190 
191 static void highbank_set_em_messages(struct device *dev,
192 					struct ahci_host_priv *hpriv,
193 					struct ata_port_info *pi)
194 {
195 	struct device_node *np = dev->of_node;
196 	struct ecx_plat_data *pdata = hpriv->plat_data;
197 	int i;
198 	int err;
199 
200 	for (i = 0; i < SGPIO_PINS; i++) {
201 		err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
202 		if (IS_ERR_VALUE(err))
203 			return;
204 
205 		pdata->sgpio_gpio[i] = err;
206 		err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO");
207 		if (err) {
208 			pr_err("sata_highbank gpio_request %d failed: %d\n",
209 					i, err);
210 			return;
211 		}
212 		gpio_direction_output(pdata->sgpio_gpio[i], 1);
213 	}
214 	of_property_read_u32_array(np, "calxeda,led-order",
215 						pdata->port_to_sgpio,
216 						pdata->n_ports);
217 	if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
218 		pdata->pre_clocks = 0;
219 	if (of_property_read_u32(np, "calxeda,post-clocks",
220 				&pdata->post_clocks))
221 		pdata->post_clocks = 0;
222 
223 	/* store em_loc */
224 	hpriv->em_loc = 0;
225 	hpriv->em_buf_sz = 4;
226 	hpriv->em_msg_type = EM_MSG_TYPE_LED;
227 	pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
228 }
229 
230 static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
231 {
232 	u32 data;
233 	u8 dev = port_data[sata_port].phy_devs;
234 	spin_lock(&cphy_lock);
235 	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
236 	data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
237 	spin_unlock(&cphy_lock);
238 	return data;
239 }
240 
241 static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
242 {
243 	u8 dev = port_data[sata_port].phy_devs;
244 	spin_lock(&cphy_lock);
245 	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
246 	writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
247 	spin_unlock(&cphy_lock);
248 }
249 
250 static void combo_phy_wait_for_ready(u8 sata_port)
251 {
252 	while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
253 		udelay(5);
254 }
255 
256 static u32 combo_phy_read(u8 sata_port, u32 addr)
257 {
258 	combo_phy_wait_for_ready(sata_port);
259 	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
260 	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
261 	combo_phy_wait_for_ready(sata_port);
262 	return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
263 }
264 
265 static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
266 {
267 	combo_phy_wait_for_ready(sata_port);
268 	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
269 	__combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
270 	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
271 }
272 
273 static void highbank_cphy_disable_overrides(u8 sata_port)
274 {
275 	u8 lane = port_data[sata_port].lane_mapping;
276 	u32 tmp;
277 	if (unlikely(port_data[sata_port].phy_base == NULL))
278 		return;
279 	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
280 	tmp &= ~CPHY_SATA_RX_OVERRIDE;
281 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
282 }
283 
284 static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
285 {
286 	u8 lane = port_data[sata_port].lane_mapping;
287 	u32 tmp;
288 
289 	if (val & 0x8)
290 		return;
291 
292 	tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
293 	tmp &= ~CPHY_SATA_TX_OVERRIDE;
294 	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
295 
296 	tmp |= CPHY_SATA_TX_OVERRIDE;
297 	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
298 
299 	tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
300 	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
301 }
302 
303 static void cphy_override_rx_mode(u8 sata_port, u32 val)
304 {
305 	u8 lane = port_data[sata_port].lane_mapping;
306 	u32 tmp;
307 	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
308 	tmp &= ~CPHY_SATA_RX_OVERRIDE;
309 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
310 
311 	tmp |= CPHY_SATA_RX_OVERRIDE;
312 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
313 
314 	tmp &= ~CPHY_SATA_DPLL_MODE;
315 	tmp |= val << CPHY_SATA_DPLL_SHIFT;
316 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
317 
318 	tmp |= CPHY_SATA_DPLL_RESET;
319 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
320 
321 	tmp &= ~CPHY_SATA_DPLL_RESET;
322 	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
323 
324 	msleep(15);
325 }
326 
327 static void highbank_cphy_override_lane(u8 sata_port)
328 {
329 	u8 lane = port_data[sata_port].lane_mapping;
330 	u32 tmp, k = 0;
331 
332 	if (unlikely(port_data[sata_port].phy_base == NULL))
333 		return;
334 	do {
335 		tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
336 						lane * SPHY_LANE);
337 	} while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
338 	cphy_override_rx_mode(sata_port, 3);
339 	cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
340 }
341 
342 static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
343 {
344 	struct device_node *sata_node = dev->of_node;
345 	int phy_count = 0, phy, port = 0, i;
346 	void __iomem *cphy_base[CPHY_PHY_COUNT];
347 	struct device_node *phy_nodes[CPHY_PHY_COUNT];
348 	u32 tx_atten[CPHY_PORT_COUNT];
349 
350 	memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
351 	memset(phy_nodes, 0, sizeof(struct device_node*) * CPHY_PHY_COUNT);
352 	memset(tx_atten, 0xff, CPHY_PORT_COUNT);
353 
354 	do {
355 		u32 tmp;
356 		struct of_phandle_args phy_data;
357 		if (of_parse_phandle_with_args(sata_node,
358 				"calxeda,port-phys", "#phy-cells",
359 				port, &phy_data))
360 			break;
361 		for (phy = 0; phy < phy_count; phy++) {
362 			if (phy_nodes[phy] == phy_data.np)
363 				break;
364 		}
365 		if (phy_nodes[phy] == NULL) {
366 			phy_nodes[phy] = phy_data.np;
367 			cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
368 			if (cphy_base[phy] == NULL) {
369 				return 0;
370 			}
371 			phy_count += 1;
372 		}
373 		port_data[port].lane_mapping = phy_data.args[0];
374 		of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
375 		port_data[port].phy_devs = tmp;
376 		port_data[port].phy_base = cphy_base[phy];
377 		of_node_put(phy_data.np);
378 		port += 1;
379 	} while (port < CPHY_PORT_COUNT);
380 	of_property_read_u32_array(sata_node, "calxeda,tx-atten",
381 				tx_atten, port);
382 	for (i = 0; i < port; i++)
383 		port_data[i].tx_atten = (u8) tx_atten[i];
384 	return 0;
385 }
386 
387 /*
388  * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
389  * Retrying the phy hard reset can work around the issue, but the drive
390  * may fail again. In less than 150 out of 15000 test runs, it took more
391  * than 10 tries for the link to be established (but never more than 35).
392  * Triple the maximum observed retry count to provide plenty of margin for
393  * rare events and to guarantee that the link is established.
394  *
395  * Also, the default 2 second time-out on a failed drive is too long in
396  * this situation. The uboot implementation of the same driver function
397  * uses a much shorter time-out period and never experiences a time out
398  * issue. Reducing the time-out to 500ms improves the responsiveness.
399  * The other timing constants were kept the same as the stock AHCI driver.
400  * This change was also tested 15000 times on 24 drives and none of them
401  * experienced a time out.
402  */
403 static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
404 				unsigned long deadline)
405 {
406 	static const unsigned long timing[] = { 5, 100, 500};
407 	struct ata_port *ap = link->ap;
408 	struct ahci_port_priv *pp = ap->private_data;
409 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
410 	struct ata_taskfile tf;
411 	bool online;
412 	u32 sstatus;
413 	int rc;
414 	int retry = 100;
415 
416 	ahci_stop_engine(ap);
417 
418 	/* clear D2H reception area to properly wait for D2H FIS */
419 	ata_tf_init(link->device, &tf);
420 	tf.command = ATA_BUSY;
421 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
422 
423 	do {
424 		highbank_cphy_disable_overrides(link->ap->port_no);
425 		rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
426 		highbank_cphy_override_lane(link->ap->port_no);
427 
428 		/* If the status is 1, we are connected, but the link did not
429 		 * come up. So retry resetting the link again.
430 		 */
431 		if (sata_scr_read(link, SCR_STATUS, &sstatus))
432 			break;
433 		if (!(sstatus & 0x3))
434 			break;
435 	} while (!online && retry--);
436 
437 	ahci_start_engine(ap);
438 
439 	if (online)
440 		*class = ahci_dev_classify(ap);
441 
442 	return rc;
443 }
444 
445 static struct ata_port_operations ahci_highbank_ops = {
446 	.inherits		= &ahci_ops,
447 	.hardreset		= ahci_highbank_hardreset,
448 	.transmit_led_message   = ecx_transmit_led_message,
449 };
450 
451 static const struct ata_port_info ahci_highbank_port_info = {
452 	.flags          = AHCI_FLAG_COMMON,
453 	.pio_mask       = ATA_PIO4,
454 	.udma_mask      = ATA_UDMA6,
455 	.port_ops       = &ahci_highbank_ops,
456 };
457 
458 static struct scsi_host_template ahci_highbank_platform_sht = {
459 	AHCI_SHT("sata_highbank"),
460 };
461 
462 static const struct of_device_id ahci_of_match[] = {
463 	{ .compatible = "calxeda,hb-ahci" },
464 	{},
465 };
466 MODULE_DEVICE_TABLE(of, ahci_of_match);
467 
468 static int ahci_highbank_probe(struct platform_device *pdev)
469 {
470 	struct device *dev = &pdev->dev;
471 	struct ahci_host_priv *hpriv;
472 	struct ecx_plat_data *pdata;
473 	struct ata_host *host;
474 	struct resource *mem;
475 	int irq;
476 	int i;
477 	int rc;
478 	u32 n_ports;
479 	struct ata_port_info pi = ahci_highbank_port_info;
480 	const struct ata_port_info *ppi[] = { &pi, NULL };
481 
482 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
483 	if (!mem) {
484 		dev_err(dev, "no mmio space\n");
485 		return -EINVAL;
486 	}
487 
488 	irq = platform_get_irq(pdev, 0);
489 	if (irq <= 0) {
490 		dev_err(dev, "no irq\n");
491 		return -EINVAL;
492 	}
493 
494 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
495 	if (!hpriv) {
496 		dev_err(dev, "can't alloc ahci_host_priv\n");
497 		return -ENOMEM;
498 	}
499 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
500 	if (!pdata) {
501 		dev_err(dev, "can't alloc ecx_plat_data\n");
502 		return -ENOMEM;
503 	}
504 
505 	hpriv->flags |= (unsigned long)pi.private_data;
506 
507 	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
508 	if (!hpriv->mmio) {
509 		dev_err(dev, "can't map %pR\n", mem);
510 		return -ENOMEM;
511 	}
512 
513 	rc = highbank_initialize_phys(dev, hpriv->mmio);
514 	if (rc)
515 		return rc;
516 
517 
518 	ahci_save_initial_config(dev, hpriv, 0, 0);
519 
520 	/* prepare host */
521 	if (hpriv->cap & HOST_CAP_NCQ)
522 		pi.flags |= ATA_FLAG_NCQ;
523 
524 	if (hpriv->cap & HOST_CAP_PMP)
525 		pi.flags |= ATA_FLAG_PMP;
526 
527 	if (hpriv->cap & HOST_CAP_64)
528 		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
529 
530 	/* CAP.NP sometimes indicate the index of the last enabled
531 	 * port, at other times, that of the last possible port, so
532 	 * determining the maximum port number requires looking at
533 	 * both CAP.NP and port_map.
534 	 */
535 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
536 
537 	pdata->n_ports = n_ports;
538 	hpriv->plat_data = pdata;
539 	highbank_set_em_messages(dev, hpriv, &pi);
540 
541 	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
542 	if (!host) {
543 		rc = -ENOMEM;
544 		goto err0;
545 	}
546 
547 	host->private_data = hpriv;
548 
549 	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
550 		host->flags |= ATA_HOST_PARALLEL_SCAN;
551 
552 	for (i = 0; i < host->n_ports; i++) {
553 		struct ata_port *ap = host->ports[i];
554 
555 		ata_port_desc(ap, "mmio %pR", mem);
556 		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
557 
558 		/* set enclosure management message type */
559 		if (ap->flags & ATA_FLAG_EM)
560 			ap->em_message_type = hpriv->em_msg_type;
561 
562 		/* disabled/not-implemented port */
563 		if (!(hpriv->port_map & (1 << i)))
564 			ap->ops = &ata_dummy_port_ops;
565 	}
566 
567 	rc = ahci_reset_controller(host);
568 	if (rc)
569 		goto err0;
570 
571 	ahci_init_controller(host);
572 	ahci_print_info(host, "platform");
573 
574 	rc = ata_host_activate(host, irq, ahci_interrupt, 0,
575 					&ahci_highbank_platform_sht);
576 	if (rc)
577 		goto err0;
578 
579 	return 0;
580 err0:
581 	return rc;
582 }
583 
584 #ifdef CONFIG_PM_SLEEP
585 static int ahci_highbank_suspend(struct device *dev)
586 {
587 	struct ata_host *host = dev_get_drvdata(dev);
588 	struct ahci_host_priv *hpriv = host->private_data;
589 	void __iomem *mmio = hpriv->mmio;
590 	u32 ctl;
591 	int rc;
592 
593 	if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
594 		dev_err(dev, "firmware update required for suspend/resume\n");
595 		return -EIO;
596 	}
597 
598 	/*
599 	 * AHCI spec rev1.1 section 8.3.3:
600 	 * Software must disable interrupts prior to requesting a
601 	 * transition of the HBA to D3 state.
602 	 */
603 	ctl = readl(mmio + HOST_CTL);
604 	ctl &= ~HOST_IRQ_EN;
605 	writel(ctl, mmio + HOST_CTL);
606 	readl(mmio + HOST_CTL); /* flush */
607 
608 	rc = ata_host_suspend(host, PMSG_SUSPEND);
609 	if (rc)
610 		return rc;
611 
612 	return 0;
613 }
614 
615 static int ahci_highbank_resume(struct device *dev)
616 {
617 	struct ata_host *host = dev_get_drvdata(dev);
618 	int rc;
619 
620 	if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
621 		rc = ahci_reset_controller(host);
622 		if (rc)
623 			return rc;
624 
625 		ahci_init_controller(host);
626 	}
627 
628 	ata_host_resume(host);
629 
630 	return 0;
631 }
632 #endif
633 
634 static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
635 		  ahci_highbank_suspend, ahci_highbank_resume);
636 
637 static struct platform_driver ahci_highbank_driver = {
638 	.remove = ata_platform_remove_one,
639         .driver = {
640                 .name = "highbank-ahci",
641                 .owner = THIS_MODULE,
642                 .of_match_table = ahci_of_match,
643                 .pm = &ahci_highbank_pm_ops,
644         },
645 	.probe = ahci_highbank_probe,
646 };
647 
648 module_platform_driver(ahci_highbank_driver);
649 
650 MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
651 MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
652 MODULE_LICENSE("GPL");
653 MODULE_ALIAS("sata:highbank");
654