Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Calxeda Highbank AHCI SATA platform driver
  4 * Copyright 2012 Calxeda, Inc.
  5 *
  6 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8#include <linux/kernel.h>
  9#include <linux/gfp.h>
 10#include <linux/module.h>
 11#include <linux/types.h>
 12#include <linux/err.h>
 13#include <linux/io.h>
 14#include <linux/spinlock.h>
 15#include <linux/device.h>
 16#include <linux/of.h>
 17#include <linux/of_address.h>
 18#include <linux/platform_device.h>
 19#include <linux/libata.h>
 20#include <linux/interrupt.h>
 21#include <linux/delay.h>
 22#include <linux/export.h>
 23#include <linux/gpio/consumer.h>
 
 24
 25#include "ahci.h"
 26
 27#define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
 28#define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
 29#define SERDES_CR_CTL			0x80a0
 30#define SERDES_CR_ADDR			0x80a1
 31#define SERDES_CR_DATA			0x80a2
 32#define CR_BUSY				0x0001
 33#define CR_START			0x0001
 34#define CR_WR_RDN			0x0002
 35#define CPHY_TX_INPUT_STS		0x2001
 36#define CPHY_RX_INPUT_STS		0x2002
 37#define CPHY_SATA_TX_OVERRIDE		0x8000
 38#define CPHY_SATA_RX_OVERRIDE	 	0x4000
 39#define CPHY_TX_OVERRIDE		0x2004
 40#define CPHY_RX_OVERRIDE		0x2005
 41#define SPHY_LANE			0x100
 42#define SPHY_HALF_RATE			0x0001
 43#define CPHY_SATA_DPLL_MODE		0x0700
 44#define CPHY_SATA_DPLL_SHIFT		8
 45#define CPHY_SATA_DPLL_RESET		(1 << 11)
 46#define CPHY_SATA_TX_ATTEN		0x1c00
 47#define CPHY_SATA_TX_ATTEN_SHIFT	10
 48#define CPHY_PHY_COUNT			6
 49#define CPHY_LANE_COUNT			4
 50#define CPHY_PORT_COUNT			(CPHY_PHY_COUNT * CPHY_LANE_COUNT)
 51
 52static DEFINE_SPINLOCK(cphy_lock);
 53/* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
 54 * sata ports to their phys and then to their lanes within the phys
 55 */
 56struct phy_lane_info {
 57	void __iomem *phy_base;
 58	u8 lane_mapping;
 59	u8 phy_devs;
 60	u8 tx_atten;
 61};
 62static struct phy_lane_info port_data[CPHY_PORT_COUNT];
 63
 64static DEFINE_SPINLOCK(sgpio_lock);
 65#define SCLOCK				0
 66#define SLOAD				1
 67#define SDATA				2
 68#define SGPIO_PINS			3
 69#define SGPIO_PORTS			8
 70
 71struct ecx_plat_data {
 72	u32		n_ports;
 73	/* number of extra clocks that the SGPIO PIC controller expects */
 74	u32		pre_clocks;
 75	u32		post_clocks;
 76	struct gpio_desc *sgpio_gpiod[SGPIO_PINS];
 77	u32		sgpio_pattern;
 78	u32		port_to_sgpio[SGPIO_PORTS];
 79};
 80
 81#define SGPIO_SIGNALS			3
 82#define ECX_ACTIVITY_BITS		0x300000
 83#define ECX_ACTIVITY_SHIFT		0
 84#define ECX_LOCATE_BITS			0x80000
 85#define ECX_LOCATE_SHIFT		1
 86#define ECX_FAULT_BITS			0x400000
 87#define ECX_FAULT_SHIFT			2
 88static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
 89				u32 shift)
 90{
 91	return 1 << (3 * pdata->port_to_sgpio[port] + shift);
 92}
 93
 94static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
 95{
 96	if (state & ECX_ACTIVITY_BITS)
 97		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
 98						ECX_ACTIVITY_SHIFT);
 99	else
100		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
101						ECX_ACTIVITY_SHIFT);
102	if (state & ECX_LOCATE_BITS)
103		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
104						ECX_LOCATE_SHIFT);
105	else
106		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
107						ECX_LOCATE_SHIFT);
108	if (state & ECX_FAULT_BITS)
109		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
110						ECX_FAULT_SHIFT);
111	else
112		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
113						ECX_FAULT_SHIFT);
114}
115
116/*
117 * Tell the LED controller that the signal has changed by raising the clock
118 * line for 50 uS and then lowering it for 50 uS.
119 */
120static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
121{
122	gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 1);
123	udelay(50);
124	gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 0);
125	udelay(50);
126}
127
128static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
129					ssize_t size)
130{
131	struct ahci_host_priv *hpriv =  ap->host->private_data;
132	struct ecx_plat_data *pdata = hpriv->plat_data;
133	struct ahci_port_priv *pp = ap->private_data;
134	unsigned long flags;
135	int pmp, i;
136	struct ahci_em_priv *emp;
137	u32 sgpio_out;
138
139	/* get the slot number from the message */
140	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
141	if (pmp < EM_MAX_SLOTS)
142		emp = &pp->em_priv[pmp];
143	else
144		return -EINVAL;
145
146	if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
147		return size;
148
149	spin_lock_irqsave(&sgpio_lock, flags);
150	ecx_parse_sgpio(pdata, ap->port_no, state);
151	sgpio_out = pdata->sgpio_pattern;
152	for (i = 0; i < pdata->pre_clocks; i++)
153		ecx_led_cycle_clock(pdata);
154
155	gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 1);
156	ecx_led_cycle_clock(pdata);
157	gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 0);
158	/*
159	 * bit-bang out the SGPIO pattern, by consuming a bit and then
160	 * clocking it out.
161	 */
162	for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
163		gpiod_set_value(pdata->sgpio_gpiod[SDATA], sgpio_out & 1);
164		sgpio_out >>= 1;
165		ecx_led_cycle_clock(pdata);
166	}
167	for (i = 0; i < pdata->post_clocks; i++)
168		ecx_led_cycle_clock(pdata);
169
170	/* save off new led state for port/slot */
171	emp->led_state = state;
172
173	spin_unlock_irqrestore(&sgpio_lock, flags);
174	return size;
175}
176
177static void highbank_set_em_messages(struct device *dev,
178					struct ahci_host_priv *hpriv,
179					struct ata_port_info *pi)
180{
181	struct device_node *np = dev->of_node;
182	struct ecx_plat_data *pdata = hpriv->plat_data;
183	int i;
 
184
185	for (i = 0; i < SGPIO_PINS; i++) {
186		struct gpio_desc *gpiod;
187
188		gpiod = devm_gpiod_get_index(dev, "calxeda,sgpio", i,
189					     GPIOD_OUT_HIGH);
190		if (IS_ERR(gpiod)) {
191			dev_err(dev, "failed to get GPIO %d\n", i);
192			continue;
 
 
 
193		}
194		gpiod_set_consumer_name(gpiod, "CX SGPIO");
195
196		pdata->sgpio_gpiod[i] = gpiod;
197	}
198	of_property_read_u32_array(np, "calxeda,led-order",
199						pdata->port_to_sgpio,
200						pdata->n_ports);
201	if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
202		pdata->pre_clocks = 0;
203	if (of_property_read_u32(np, "calxeda,post-clocks",
204				&pdata->post_clocks))
205		pdata->post_clocks = 0;
206
207	/* store em_loc */
208	hpriv->em_loc = 0;
209	hpriv->em_buf_sz = 4;
210	hpriv->em_msg_type = EM_MSG_TYPE_LED;
211	pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
212}
213
214static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
215{
216	u32 data;
217	u8 dev = port_data[sata_port].phy_devs;
218	spin_lock(&cphy_lock);
219	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
220	data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
221	spin_unlock(&cphy_lock);
222	return data;
223}
224
225static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
226{
227	u8 dev = port_data[sata_port].phy_devs;
228	spin_lock(&cphy_lock);
229	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
230	writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
231	spin_unlock(&cphy_lock);
232}
233
234static void combo_phy_wait_for_ready(u8 sata_port)
235{
236	while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
237		udelay(5);
238}
239
240static u32 combo_phy_read(u8 sata_port, u32 addr)
241{
242	combo_phy_wait_for_ready(sata_port);
243	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
244	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
245	combo_phy_wait_for_ready(sata_port);
246	return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
247}
248
249static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
250{
251	combo_phy_wait_for_ready(sata_port);
252	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
253	__combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
254	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
255}
256
257static void highbank_cphy_disable_overrides(u8 sata_port)
258{
259	u8 lane = port_data[sata_port].lane_mapping;
260	u32 tmp;
261	if (unlikely(port_data[sata_port].phy_base == NULL))
262		return;
263	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
264	tmp &= ~CPHY_SATA_RX_OVERRIDE;
265	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
266}
267
268static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
269{
270	u8 lane = port_data[sata_port].lane_mapping;
271	u32 tmp;
272
273	if (val & 0x8)
274		return;
275
276	tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
277	tmp &= ~CPHY_SATA_TX_OVERRIDE;
278	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
279
280	tmp |= CPHY_SATA_TX_OVERRIDE;
281	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
282
283	tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
284	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
285}
286
287static void cphy_override_rx_mode(u8 sata_port, u32 val)
288{
289	u8 lane = port_data[sata_port].lane_mapping;
290	u32 tmp;
291	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
292	tmp &= ~CPHY_SATA_RX_OVERRIDE;
293	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
294
295	tmp |= CPHY_SATA_RX_OVERRIDE;
296	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
297
298	tmp &= ~CPHY_SATA_DPLL_MODE;
299	tmp |= val << CPHY_SATA_DPLL_SHIFT;
300	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
301
302	tmp |= CPHY_SATA_DPLL_RESET;
303	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
304
305	tmp &= ~CPHY_SATA_DPLL_RESET;
306	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
307
308	msleep(15);
309}
310
311static void highbank_cphy_override_lane(u8 sata_port)
312{
313	u8 lane = port_data[sata_port].lane_mapping;
314	u32 tmp, k = 0;
315
316	if (unlikely(port_data[sata_port].phy_base == NULL))
317		return;
318	do {
319		tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
320						lane * SPHY_LANE);
321	} while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
322	cphy_override_rx_mode(sata_port, 3);
323	cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
324}
325
326static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
327{
328	struct device_node *sata_node = dev->of_node;
329	int phy_count = 0, phy, port = 0, i;
330	void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
331	struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
332	u32 tx_atten[CPHY_PORT_COUNT] = {};
333
334	memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
335
336	do {
337		u32 tmp;
338		struct of_phandle_args phy_data;
339		if (of_parse_phandle_with_args(sata_node,
340				"calxeda,port-phys", "#phy-cells",
341				port, &phy_data))
342			break;
343		for (phy = 0; phy < phy_count; phy++) {
344			if (phy_nodes[phy] == phy_data.np)
345				break;
346		}
347		if (phy_nodes[phy] == NULL) {
348			phy_nodes[phy] = phy_data.np;
349			cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
350			if (cphy_base[phy] == NULL) {
351				return 0;
352			}
353			phy_count += 1;
354		}
355		port_data[port].lane_mapping = phy_data.args[0];
356		of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
357		port_data[port].phy_devs = tmp;
358		port_data[port].phy_base = cphy_base[phy];
359		of_node_put(phy_data.np);
360		port += 1;
361	} while (port < CPHY_PORT_COUNT);
362	of_property_read_u32_array(sata_node, "calxeda,tx-atten",
363				tx_atten, port);
364	for (i = 0; i < port; i++)
365		port_data[i].tx_atten = (u8) tx_atten[i];
366	return 0;
367}
368
369/*
370 * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
371 * Retrying the phy hard reset can work around the issue, but the drive
372 * may fail again. In less than 150 out of 15000 test runs, it took more
373 * than 10 tries for the link to be established (but never more than 35).
374 * Triple the maximum observed retry count to provide plenty of margin for
375 * rare events and to guarantee that the link is established.
376 *
377 * Also, the default 2 second time-out on a failed drive is too long in
378 * this situation. The uboot implementation of the same driver function
379 * uses a much shorter time-out period and never experiences a time out
380 * issue. Reducing the time-out to 500ms improves the responsiveness.
381 * The other timing constants were kept the same as the stock AHCI driver.
382 * This change was also tested 15000 times on 24 drives and none of them
383 * experienced a time out.
384 */
385static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
386				unsigned long deadline)
387{
388	static const unsigned int timing[] = { 5, 100, 500};
389	struct ata_port *ap = link->ap;
390	struct ahci_port_priv *pp = ap->private_data;
391	struct ahci_host_priv *hpriv = ap->host->private_data;
392	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
393	struct ata_taskfile tf;
394	bool online;
395	u32 sstatus;
396	int rc;
397	int retry = 100;
398
399	hpriv->stop_engine(ap);
400
401	/* clear D2H reception area to properly wait for D2H FIS */
402	ata_tf_init(link->device, &tf);
403	tf.status = ATA_BUSY;
404	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
405
406	do {
407		highbank_cphy_disable_overrides(link->ap->port_no);
408		rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
409		highbank_cphy_override_lane(link->ap->port_no);
410
411		/* If the status is 1, we are connected, but the link did not
412		 * come up. So retry resetting the link again.
413		 */
414		if (sata_scr_read(link, SCR_STATUS, &sstatus))
415			break;
416		if (!(sstatus & 0x3))
417			break;
418	} while (!online && retry--);
419
420	hpriv->start_engine(ap);
421
422	if (online)
423		*class = ahci_dev_classify(ap);
424
425	return rc;
426}
427
428static struct ata_port_operations ahci_highbank_ops = {
429	.inherits		= &ahci_ops,
430	.hardreset		= ahci_highbank_hardreset,
431	.transmit_led_message   = ecx_transmit_led_message,
432};
433
434static const struct ata_port_info ahci_highbank_port_info = {
435	.flags          = AHCI_FLAG_COMMON,
436	.pio_mask       = ATA_PIO4,
437	.udma_mask      = ATA_UDMA6,
438	.port_ops       = &ahci_highbank_ops,
439};
440
441static const struct scsi_host_template ahci_highbank_platform_sht = {
442	AHCI_SHT("sata_highbank"),
443};
444
445static const struct of_device_id ahci_of_match[] = {
446	{ .compatible = "calxeda,hb-ahci" },
447	{ /* sentinel */ }
448};
449MODULE_DEVICE_TABLE(of, ahci_of_match);
450
451static int ahci_highbank_probe(struct platform_device *pdev)
452{
453	struct device *dev = &pdev->dev;
454	struct ahci_host_priv *hpriv;
455	struct ecx_plat_data *pdata;
456	struct ata_host *host;
457	struct resource *mem;
458	int irq;
459	int i;
460	int rc;
461	u32 n_ports;
462	struct ata_port_info pi = ahci_highbank_port_info;
463	const struct ata_port_info *ppi[] = { &pi, NULL };
464
465	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
466	if (!mem) {
467		dev_err(dev, "no mmio space\n");
468		return -EINVAL;
469	}
470
471	irq = platform_get_irq(pdev, 0);
472	if (irq < 0)
473		return irq;
474	if (!irq)
475		return -EINVAL;
 
476
477	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
478	if (!hpriv) {
479		dev_err(dev, "can't alloc ahci_host_priv\n");
480		return -ENOMEM;
481	}
482	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
483	if (!pdata) {
484		dev_err(dev, "can't alloc ecx_plat_data\n");
485		return -ENOMEM;
486	}
487
488	hpriv->irq = irq;
489	hpriv->flags |= (unsigned long)pi.private_data;
490
491	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
492	if (!hpriv->mmio) {
493		dev_err(dev, "can't map %pR\n", mem);
494		return -ENOMEM;
495	}
496
497	rc = highbank_initialize_phys(dev, hpriv->mmio);
498	if (rc)
499		return rc;
500
501
502	ahci_save_initial_config(dev, hpriv);
503
504	/* prepare host */
505	if (hpriv->cap & HOST_CAP_NCQ)
506		pi.flags |= ATA_FLAG_NCQ;
507
508	if (hpriv->cap & HOST_CAP_PMP)
509		pi.flags |= ATA_FLAG_PMP;
510
511	if (hpriv->cap & HOST_CAP_64)
512		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
513
514	/* CAP.NP sometimes indicate the index of the last enabled
515	 * port, at other times, that of the last possible port, so
516	 * determining the maximum port number requires looking at
517	 * both CAP.NP and port_map.
518	 */
519	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
520
521	pdata->n_ports = n_ports;
522	hpriv->plat_data = pdata;
523	highbank_set_em_messages(dev, hpriv, &pi);
524
525	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
526	if (!host) {
527		rc = -ENOMEM;
528		goto err0;
529	}
530
531	host->private_data = hpriv;
532
533	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
534		host->flags |= ATA_HOST_PARALLEL_SCAN;
535
536	for (i = 0; i < host->n_ports; i++) {
537		struct ata_port *ap = host->ports[i];
538
539		ata_port_desc(ap, "mmio %pR", mem);
540		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
541
542		/* set enclosure management message type */
543		if (ap->flags & ATA_FLAG_EM)
544			ap->em_message_type = hpriv->em_msg_type;
545
546		/* disabled/not-implemented port */
547		if (!(hpriv->port_map & (1 << i)))
548			ap->ops = &ata_dummy_port_ops;
549	}
550
551	rc = ahci_reset_controller(host);
552	if (rc)
553		goto err0;
554
555	ahci_init_controller(host);
556	ahci_print_info(host, "platform");
557
558	rc = ahci_host_activate(host, &ahci_highbank_platform_sht);
 
559	if (rc)
560		goto err0;
561
562	return 0;
563err0:
564	return rc;
565}
566
567#ifdef CONFIG_PM_SLEEP
568static int ahci_highbank_suspend(struct device *dev)
569{
570	struct ata_host *host = dev_get_drvdata(dev);
571	struct ahci_host_priv *hpriv = host->private_data;
572	void __iomem *mmio = hpriv->mmio;
573	u32 ctl;
 
574
575	if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
576		dev_err(dev, "firmware update required for suspend/resume\n");
577		return -EIO;
578	}
579
580	/*
581	 * AHCI spec rev1.1 section 8.3.3:
582	 * Software must disable interrupts prior to requesting a
583	 * transition of the HBA to D3 state.
584	 */
585	ctl = readl(mmio + HOST_CTL);
586	ctl &= ~HOST_IRQ_EN;
587	writel(ctl, mmio + HOST_CTL);
588	readl(mmio + HOST_CTL); /* flush */
589
590	ata_host_suspend(host, PMSG_SUSPEND);
 
 
 
591	return 0;
592}
593
594static int ahci_highbank_resume(struct device *dev)
595{
596	struct ata_host *host = dev_get_drvdata(dev);
597	int rc;
598
599	if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
600		rc = ahci_reset_controller(host);
601		if (rc)
602			return rc;
603
604		ahci_init_controller(host);
605	}
606
607	ata_host_resume(host);
608
609	return 0;
610}
611#endif
612
613static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
614		  ahci_highbank_suspend, ahci_highbank_resume);
615
616static struct platform_driver ahci_highbank_driver = {
617	.remove_new = ata_platform_remove_one,
618        .driver = {
619                .name = "highbank-ahci",
 
620                .of_match_table = ahci_of_match,
621                .pm = &ahci_highbank_pm_ops,
622        },
623	.probe = ahci_highbank_probe,
624};
625
626module_platform_driver(ahci_highbank_driver);
627
628MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
629MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
630MODULE_LICENSE("GPL");
631MODULE_ALIAS("sata:highbank");
v3.15
 
  1/*
  2 * Calxeda Highbank AHCI SATA platform driver
  3 * Copyright 2012 Calxeda, Inc.
  4 *
  5 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
  6 *
  7 * This program is free software; you can redistribute it and/or modify it
  8 * under the terms and conditions of the GNU General Public License,
  9 * version 2, as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope it will be useful, but WITHOUT
 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 14 * more details.
 15 *
 16 * You should have received a copy of the GNU General Public License along with
 17 * this program.  If not, see <http://www.gnu.org/licenses/>.
 18 */
 19#include <linux/kernel.h>
 20#include <linux/gfp.h>
 21#include <linux/module.h>
 22#include <linux/types.h>
 23#include <linux/err.h>
 24#include <linux/io.h>
 25#include <linux/spinlock.h>
 26#include <linux/device.h>
 27#include <linux/of_device.h>
 28#include <linux/of_address.h>
 29#include <linux/platform_device.h>
 30#include <linux/libata.h>
 31#include <linux/interrupt.h>
 32#include <linux/delay.h>
 33#include <linux/export.h>
 34#include <linux/gpio.h>
 35#include <linux/of_gpio.h>
 36
 37#include "ahci.h"
 38
 39#define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
 40#define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
 41#define SERDES_CR_CTL			0x80a0
 42#define SERDES_CR_ADDR			0x80a1
 43#define SERDES_CR_DATA			0x80a2
 44#define CR_BUSY				0x0001
 45#define CR_START			0x0001
 46#define CR_WR_RDN			0x0002
 47#define CPHY_TX_INPUT_STS		0x2001
 48#define CPHY_RX_INPUT_STS		0x2002
 49#define CPHY_SATA_TX_OVERRIDE		0x8000
 50#define CPHY_SATA_RX_OVERRIDE	 	0x4000
 51#define CPHY_TX_OVERRIDE		0x2004
 52#define CPHY_RX_OVERRIDE		0x2005
 53#define SPHY_LANE			0x100
 54#define SPHY_HALF_RATE			0x0001
 55#define CPHY_SATA_DPLL_MODE		0x0700
 56#define CPHY_SATA_DPLL_SHIFT		8
 57#define CPHY_SATA_DPLL_RESET		(1 << 11)
 58#define CPHY_SATA_TX_ATTEN		0x1c00
 59#define CPHY_SATA_TX_ATTEN_SHIFT	10
 60#define CPHY_PHY_COUNT			6
 61#define CPHY_LANE_COUNT			4
 62#define CPHY_PORT_COUNT			(CPHY_PHY_COUNT * CPHY_LANE_COUNT)
 63
 64static DEFINE_SPINLOCK(cphy_lock);
 65/* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
 66 * sata ports to their phys and then to their lanes within the phys
 67 */
 68struct phy_lane_info {
 69	void __iomem *phy_base;
 70	u8 lane_mapping;
 71	u8 phy_devs;
 72	u8 tx_atten;
 73};
 74static struct phy_lane_info port_data[CPHY_PORT_COUNT];
 75
 76static DEFINE_SPINLOCK(sgpio_lock);
 77#define SCLOCK				0
 78#define SLOAD				1
 79#define SDATA				2
 80#define SGPIO_PINS			3
 81#define SGPIO_PORTS			8
 82
 83struct ecx_plat_data {
 84	u32		n_ports;
 85	/* number of extra clocks that the SGPIO PIC controller expects */
 86	u32		pre_clocks;
 87	u32		post_clocks;
 88	unsigned	sgpio_gpio[SGPIO_PINS];
 89	u32		sgpio_pattern;
 90	u32		port_to_sgpio[SGPIO_PORTS];
 91};
 92
 93#define SGPIO_SIGNALS			3
 94#define ECX_ACTIVITY_BITS		0x300000
 95#define ECX_ACTIVITY_SHIFT		0
 96#define ECX_LOCATE_BITS			0x80000
 97#define ECX_LOCATE_SHIFT		1
 98#define ECX_FAULT_BITS			0x400000
 99#define ECX_FAULT_SHIFT			2
100static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
101				u32 shift)
102{
103	return 1 << (3 * pdata->port_to_sgpio[port] + shift);
104}
105
106static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
107{
108	if (state & ECX_ACTIVITY_BITS)
109		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
110						ECX_ACTIVITY_SHIFT);
111	else
112		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
113						ECX_ACTIVITY_SHIFT);
114	if (state & ECX_LOCATE_BITS)
115		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
116						ECX_LOCATE_SHIFT);
117	else
118		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
119						ECX_LOCATE_SHIFT);
120	if (state & ECX_FAULT_BITS)
121		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
122						ECX_FAULT_SHIFT);
123	else
124		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
125						ECX_FAULT_SHIFT);
126}
127
128/*
129 * Tell the LED controller that the signal has changed by raising the clock
130 * line for 50 uS and then lowering it for 50 uS.
131 */
132static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
133{
134	gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1);
135	udelay(50);
136	gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0);
137	udelay(50);
138}
139
140static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
141					ssize_t size)
142{
143	struct ahci_host_priv *hpriv =  ap->host->private_data;
144	struct ecx_plat_data *pdata = hpriv->plat_data;
145	struct ahci_port_priv *pp = ap->private_data;
146	unsigned long flags;
147	int pmp, i;
148	struct ahci_em_priv *emp;
149	u32 sgpio_out;
150
151	/* get the slot number from the message */
152	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
153	if (pmp < EM_MAX_SLOTS)
154		emp = &pp->em_priv[pmp];
155	else
156		return -EINVAL;
157
158	if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
159		return size;
160
161	spin_lock_irqsave(&sgpio_lock, flags);
162	ecx_parse_sgpio(pdata, ap->port_no, state);
163	sgpio_out = pdata->sgpio_pattern;
164	for (i = 0; i < pdata->pre_clocks; i++)
165		ecx_led_cycle_clock(pdata);
166
167	gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
168	ecx_led_cycle_clock(pdata);
169	gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
170	/*
171	 * bit-bang out the SGPIO pattern, by consuming a bit and then
172	 * clocking it out.
173	 */
174	for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
175		gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1);
176		sgpio_out >>= 1;
177		ecx_led_cycle_clock(pdata);
178	}
179	for (i = 0; i < pdata->post_clocks; i++)
180		ecx_led_cycle_clock(pdata);
181
182	/* save off new led state for port/slot */
183	emp->led_state = state;
184
185	spin_unlock_irqrestore(&sgpio_lock, flags);
186	return size;
187}
188
189static void highbank_set_em_messages(struct device *dev,
190					struct ahci_host_priv *hpriv,
191					struct ata_port_info *pi)
192{
193	struct device_node *np = dev->of_node;
194	struct ecx_plat_data *pdata = hpriv->plat_data;
195	int i;
196	int err;
197
198	for (i = 0; i < SGPIO_PINS; i++) {
199		err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
200		if (IS_ERR_VALUE(err))
201			return;
202
203		pdata->sgpio_gpio[i] = err;
204		err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO");
205		if (err) {
206			pr_err("sata_highbank gpio_request %d failed: %d\n",
207					i, err);
208			return;
209		}
210		gpio_direction_output(pdata->sgpio_gpio[i], 1);
 
 
211	}
212	of_property_read_u32_array(np, "calxeda,led-order",
213						pdata->port_to_sgpio,
214						pdata->n_ports);
215	if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
216		pdata->pre_clocks = 0;
217	if (of_property_read_u32(np, "calxeda,post-clocks",
218				&pdata->post_clocks))
219		pdata->post_clocks = 0;
220
221	/* store em_loc */
222	hpriv->em_loc = 0;
223	hpriv->em_buf_sz = 4;
224	hpriv->em_msg_type = EM_MSG_TYPE_LED;
225	pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
226}
227
228static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
229{
230	u32 data;
231	u8 dev = port_data[sata_port].phy_devs;
232	spin_lock(&cphy_lock);
233	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
234	data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
235	spin_unlock(&cphy_lock);
236	return data;
237}
238
239static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
240{
241	u8 dev = port_data[sata_port].phy_devs;
242	spin_lock(&cphy_lock);
243	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
244	writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
245	spin_unlock(&cphy_lock);
246}
247
248static void combo_phy_wait_for_ready(u8 sata_port)
249{
250	while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
251		udelay(5);
252}
253
254static u32 combo_phy_read(u8 sata_port, u32 addr)
255{
256	combo_phy_wait_for_ready(sata_port);
257	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
258	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
259	combo_phy_wait_for_ready(sata_port);
260	return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
261}
262
263static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
264{
265	combo_phy_wait_for_ready(sata_port);
266	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
267	__combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
268	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
269}
270
271static void highbank_cphy_disable_overrides(u8 sata_port)
272{
273	u8 lane = port_data[sata_port].lane_mapping;
274	u32 tmp;
275	if (unlikely(port_data[sata_port].phy_base == NULL))
276		return;
277	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
278	tmp &= ~CPHY_SATA_RX_OVERRIDE;
279	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
280}
281
282static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
283{
284	u8 lane = port_data[sata_port].lane_mapping;
285	u32 tmp;
286
287	if (val & 0x8)
288		return;
289
290	tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
291	tmp &= ~CPHY_SATA_TX_OVERRIDE;
292	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
293
294	tmp |= CPHY_SATA_TX_OVERRIDE;
295	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
296
297	tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
298	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
299}
300
301static void cphy_override_rx_mode(u8 sata_port, u32 val)
302{
303	u8 lane = port_data[sata_port].lane_mapping;
304	u32 tmp;
305	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
306	tmp &= ~CPHY_SATA_RX_OVERRIDE;
307	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
308
309	tmp |= CPHY_SATA_RX_OVERRIDE;
310	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
311
312	tmp &= ~CPHY_SATA_DPLL_MODE;
313	tmp |= val << CPHY_SATA_DPLL_SHIFT;
314	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
315
316	tmp |= CPHY_SATA_DPLL_RESET;
317	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
318
319	tmp &= ~CPHY_SATA_DPLL_RESET;
320	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
321
322	msleep(15);
323}
324
325static void highbank_cphy_override_lane(u8 sata_port)
326{
327	u8 lane = port_data[sata_port].lane_mapping;
328	u32 tmp, k = 0;
329
330	if (unlikely(port_data[sata_port].phy_base == NULL))
331		return;
332	do {
333		tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
334						lane * SPHY_LANE);
335	} while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
336	cphy_override_rx_mode(sata_port, 3);
337	cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
338}
339
340static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
341{
342	struct device_node *sata_node = dev->of_node;
343	int phy_count = 0, phy, port = 0, i;
344	void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
345	struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
346	u32 tx_atten[CPHY_PORT_COUNT] = {};
347
348	memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
349
350	do {
351		u32 tmp;
352		struct of_phandle_args phy_data;
353		if (of_parse_phandle_with_args(sata_node,
354				"calxeda,port-phys", "#phy-cells",
355				port, &phy_data))
356			break;
357		for (phy = 0; phy < phy_count; phy++) {
358			if (phy_nodes[phy] == phy_data.np)
359				break;
360		}
361		if (phy_nodes[phy] == NULL) {
362			phy_nodes[phy] = phy_data.np;
363			cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
364			if (cphy_base[phy] == NULL) {
365				return 0;
366			}
367			phy_count += 1;
368		}
369		port_data[port].lane_mapping = phy_data.args[0];
370		of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
371		port_data[port].phy_devs = tmp;
372		port_data[port].phy_base = cphy_base[phy];
373		of_node_put(phy_data.np);
374		port += 1;
375	} while (port < CPHY_PORT_COUNT);
376	of_property_read_u32_array(sata_node, "calxeda,tx-atten",
377				tx_atten, port);
378	for (i = 0; i < port; i++)
379		port_data[i].tx_atten = (u8) tx_atten[i];
380	return 0;
381}
382
383/*
384 * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
385 * Retrying the phy hard reset can work around the issue, but the drive
386 * may fail again. In less than 150 out of 15000 test runs, it took more
387 * than 10 tries for the link to be established (but never more than 35).
388 * Triple the maximum observed retry count to provide plenty of margin for
389 * rare events and to guarantee that the link is established.
390 *
391 * Also, the default 2 second time-out on a failed drive is too long in
392 * this situation. The uboot implementation of the same driver function
393 * uses a much shorter time-out period and never experiences a time out
394 * issue. Reducing the time-out to 500ms improves the responsiveness.
395 * The other timing constants were kept the same as the stock AHCI driver.
396 * This change was also tested 15000 times on 24 drives and none of them
397 * experienced a time out.
398 */
399static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
400				unsigned long deadline)
401{
402	static const unsigned long timing[] = { 5, 100, 500};
403	struct ata_port *ap = link->ap;
404	struct ahci_port_priv *pp = ap->private_data;
405	struct ahci_host_priv *hpriv = ap->host->private_data;
406	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
407	struct ata_taskfile tf;
408	bool online;
409	u32 sstatus;
410	int rc;
411	int retry = 100;
412
413	ahci_stop_engine(ap);
414
415	/* clear D2H reception area to properly wait for D2H FIS */
416	ata_tf_init(link->device, &tf);
417	tf.command = ATA_BUSY;
418	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
419
420	do {
421		highbank_cphy_disable_overrides(link->ap->port_no);
422		rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
423		highbank_cphy_override_lane(link->ap->port_no);
424
425		/* If the status is 1, we are connected, but the link did not
426		 * come up. So retry resetting the link again.
427		 */
428		if (sata_scr_read(link, SCR_STATUS, &sstatus))
429			break;
430		if (!(sstatus & 0x3))
431			break;
432	} while (!online && retry--);
433
434	hpriv->start_engine(ap);
435
436	if (online)
437		*class = ahci_dev_classify(ap);
438
439	return rc;
440}
441
442static struct ata_port_operations ahci_highbank_ops = {
443	.inherits		= &ahci_ops,
444	.hardreset		= ahci_highbank_hardreset,
445	.transmit_led_message   = ecx_transmit_led_message,
446};
447
448static const struct ata_port_info ahci_highbank_port_info = {
449	.flags          = AHCI_FLAG_COMMON,
450	.pio_mask       = ATA_PIO4,
451	.udma_mask      = ATA_UDMA6,
452	.port_ops       = &ahci_highbank_ops,
453};
454
455static struct scsi_host_template ahci_highbank_platform_sht = {
456	AHCI_SHT("sata_highbank"),
457};
458
459static const struct of_device_id ahci_of_match[] = {
460	{ .compatible = "calxeda,hb-ahci" },
461	{},
462};
463MODULE_DEVICE_TABLE(of, ahci_of_match);
464
465static int ahci_highbank_probe(struct platform_device *pdev)
466{
467	struct device *dev = &pdev->dev;
468	struct ahci_host_priv *hpriv;
469	struct ecx_plat_data *pdata;
470	struct ata_host *host;
471	struct resource *mem;
472	int irq;
473	int i;
474	int rc;
475	u32 n_ports;
476	struct ata_port_info pi = ahci_highbank_port_info;
477	const struct ata_port_info *ppi[] = { &pi, NULL };
478
479	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
480	if (!mem) {
481		dev_err(dev, "no mmio space\n");
482		return -EINVAL;
483	}
484
485	irq = platform_get_irq(pdev, 0);
486	if (irq <= 0) {
487		dev_err(dev, "no irq\n");
 
488		return -EINVAL;
489	}
490
491	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
492	if (!hpriv) {
493		dev_err(dev, "can't alloc ahci_host_priv\n");
494		return -ENOMEM;
495	}
496	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
497	if (!pdata) {
498		dev_err(dev, "can't alloc ecx_plat_data\n");
499		return -ENOMEM;
500	}
501
 
502	hpriv->flags |= (unsigned long)pi.private_data;
503
504	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
505	if (!hpriv->mmio) {
506		dev_err(dev, "can't map %pR\n", mem);
507		return -ENOMEM;
508	}
509
510	rc = highbank_initialize_phys(dev, hpriv->mmio);
511	if (rc)
512		return rc;
513
514
515	ahci_save_initial_config(dev, hpriv, 0, 0);
516
517	/* prepare host */
518	if (hpriv->cap & HOST_CAP_NCQ)
519		pi.flags |= ATA_FLAG_NCQ;
520
521	if (hpriv->cap & HOST_CAP_PMP)
522		pi.flags |= ATA_FLAG_PMP;
523
524	if (hpriv->cap & HOST_CAP_64)
525		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
526
527	/* CAP.NP sometimes indicate the index of the last enabled
528	 * port, at other times, that of the last possible port, so
529	 * determining the maximum port number requires looking at
530	 * both CAP.NP and port_map.
531	 */
532	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
533
534	pdata->n_ports = n_ports;
535	hpriv->plat_data = pdata;
536	highbank_set_em_messages(dev, hpriv, &pi);
537
538	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
539	if (!host) {
540		rc = -ENOMEM;
541		goto err0;
542	}
543
544	host->private_data = hpriv;
545
546	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
547		host->flags |= ATA_HOST_PARALLEL_SCAN;
548
549	for (i = 0; i < host->n_ports; i++) {
550		struct ata_port *ap = host->ports[i];
551
552		ata_port_desc(ap, "mmio %pR", mem);
553		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
554
555		/* set enclosure management message type */
556		if (ap->flags & ATA_FLAG_EM)
557			ap->em_message_type = hpriv->em_msg_type;
558
559		/* disabled/not-implemented port */
560		if (!(hpriv->port_map & (1 << i)))
561			ap->ops = &ata_dummy_port_ops;
562	}
563
564	rc = ahci_reset_controller(host);
565	if (rc)
566		goto err0;
567
568	ahci_init_controller(host);
569	ahci_print_info(host, "platform");
570
571	rc = ata_host_activate(host, irq, ahci_interrupt, 0,
572					&ahci_highbank_platform_sht);
573	if (rc)
574		goto err0;
575
576	return 0;
577err0:
578	return rc;
579}
580
581#ifdef CONFIG_PM_SLEEP
582static int ahci_highbank_suspend(struct device *dev)
583{
584	struct ata_host *host = dev_get_drvdata(dev);
585	struct ahci_host_priv *hpriv = host->private_data;
586	void __iomem *mmio = hpriv->mmio;
587	u32 ctl;
588	int rc;
589
590	if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
591		dev_err(dev, "firmware update required for suspend/resume\n");
592		return -EIO;
593	}
594
595	/*
596	 * AHCI spec rev1.1 section 8.3.3:
597	 * Software must disable interrupts prior to requesting a
598	 * transition of the HBA to D3 state.
599	 */
600	ctl = readl(mmio + HOST_CTL);
601	ctl &= ~HOST_IRQ_EN;
602	writel(ctl, mmio + HOST_CTL);
603	readl(mmio + HOST_CTL); /* flush */
604
605	rc = ata_host_suspend(host, PMSG_SUSPEND);
606	if (rc)
607		return rc;
608
609	return 0;
610}
611
612static int ahci_highbank_resume(struct device *dev)
613{
614	struct ata_host *host = dev_get_drvdata(dev);
615	int rc;
616
617	if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
618		rc = ahci_reset_controller(host);
619		if (rc)
620			return rc;
621
622		ahci_init_controller(host);
623	}
624
625	ata_host_resume(host);
626
627	return 0;
628}
629#endif
630
631static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
632		  ahci_highbank_suspend, ahci_highbank_resume);
633
634static struct platform_driver ahci_highbank_driver = {
635	.remove = ata_platform_remove_one,
636        .driver = {
637                .name = "highbank-ahci",
638                .owner = THIS_MODULE,
639                .of_match_table = ahci_of_match,
640                .pm = &ahci_highbank_pm_ops,
641        },
642	.probe = ahci_highbank_probe,
643};
644
645module_platform_driver(ahci_highbank_driver);
646
647MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
648MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
649MODULE_LICENSE("GPL");
650MODULE_ALIAS("sata:highbank");