Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 * Libata based driver for Apple "macio" family of PATA controllers
   3 *
   4 * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
   5 *                     <benh@kernel.crashing.org>
   6 *
   7 * Some bits and pieces from drivers/ide/ppc/pmac.c
   8 *
   9 */
  10
  11#undef DEBUG
  12#undef DEBUG_DMA
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/init.h>
  17#include <linux/blkdev.h>
  18#include <linux/ata.h>
  19#include <linux/libata.h>
  20#include <linux/adb.h>
  21#include <linux/pmu.h>
  22#include <linux/scatterlist.h>
 
  23#include <linux/of.h>
  24#include <linux/gfp.h>
 
  25
  26#include <scsi/scsi.h>
  27#include <scsi/scsi_host.h>
  28#include <scsi/scsi_device.h>
  29
  30#include <asm/macio.h>
  31#include <asm/io.h>
  32#include <asm/dbdma.h>
  33#include <asm/pci-bridge.h>
  34#include <asm/machdep.h>
  35#include <asm/pmac_feature.h>
  36#include <asm/mediabay.h>
  37
  38#ifdef DEBUG_DMA
  39#define dev_dbgdma(dev, format, arg...)		\
  40	dev_printk(KERN_DEBUG , dev , format , ## arg)
  41#else
  42#define dev_dbgdma(dev, format, arg...)		\
  43	({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
  44#endif
  45
  46#define DRV_NAME	"pata_macio"
  47#define DRV_VERSION	"0.9"
  48
  49/* Models of macio ATA controller */
  50enum {
  51	controller_ohare,	/* OHare based */
  52	controller_heathrow,	/* Heathrow/Paddington */
  53	controller_kl_ata3,	/* KeyLargo ATA-3 */
  54	controller_kl_ata4,	/* KeyLargo ATA-4 */
  55	controller_un_ata6,	/* UniNorth2 ATA-6 */
  56	controller_k2_ata6,	/* K2 ATA-6 */
  57	controller_sh_ata6,	/* Shasta ATA-6 */
  58};
  59
  60static const char* macio_ata_names[] = {
  61	"OHare ATA",		/* OHare based */
  62	"Heathrow ATA",		/* Heathrow/Paddington */
  63	"KeyLargo ATA-3",	/* KeyLargo ATA-3 (MDMA only) */
  64	"KeyLargo ATA-4",	/* KeyLargo ATA-4 (UDMA/66) */
  65	"UniNorth ATA-6",	/* UniNorth2 ATA-6 (UDMA/100) */
  66	"K2 ATA-6",		/* K2 ATA-6 (UDMA/100) */
  67	"Shasta ATA-6",		/* Shasta ATA-6 (UDMA/133) */
  68};
  69
  70/*
  71 * Extra registers, both 32-bit little-endian
  72 */
  73#define IDE_TIMING_CONFIG	0x200
  74#define IDE_INTERRUPT		0x300
  75
  76/* Kauai (U2) ATA has different register setup */
  77#define IDE_KAUAI_PIO_CONFIG	0x200
  78#define IDE_KAUAI_ULTRA_CONFIG	0x210
  79#define IDE_KAUAI_POLL_CONFIG	0x220
  80
  81/*
  82 * Timing configuration register definitions
  83 */
  84
  85/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
  86#define SYSCLK_TICKS(t)		(((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
  87#define SYSCLK_TICKS_66(t)	(((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
  88#define IDE_SYSCLK_NS		30	/* 33Mhz cell */
  89#define IDE_SYSCLK_66_NS	15	/* 66Mhz cell */
  90
  91/* 133Mhz cell, found in shasta.
  92 * See comments about 100 Mhz Uninorth 2...
  93 * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
  94 * weird and I don't now why .. at this stage
  95 */
  96#define TR_133_PIOREG_PIO_MASK		0xff000fff
  97#define TR_133_PIOREG_MDMA_MASK		0x00fff800
  98#define TR_133_UDMAREG_UDMA_MASK	0x0003ffff
  99#define TR_133_UDMAREG_UDMA_EN		0x00000001
 100
 101/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
 102 * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
 103 * controlled like gem or fw. It appears to be an evolution of keylargo
 104 * ATA4 with a timing register extended to 2x32bits registers (one
 105 * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
 106 * It has it's own local feature control register as well.
 107 *
 108 * After scratching my mind over the timing values, at least for PIO
 109 * and MDMA, I think I've figured the format of the timing register,
 110 * though I use pre-calculated tables for UDMA as usual...
 111 */
 112#define TR_100_PIO_ADDRSETUP_MASK	0xff000000 /* Size of field unknown */
 113#define TR_100_PIO_ADDRSETUP_SHIFT	24
 114#define TR_100_MDMA_MASK		0x00fff000
 115#define TR_100_MDMA_RECOVERY_MASK	0x00fc0000
 116#define TR_100_MDMA_RECOVERY_SHIFT	18
 117#define TR_100_MDMA_ACCESS_MASK		0x0003f000
 118#define TR_100_MDMA_ACCESS_SHIFT	12
 119#define TR_100_PIO_MASK			0xff000fff
 120#define TR_100_PIO_RECOVERY_MASK	0x00000fc0
 121#define TR_100_PIO_RECOVERY_SHIFT	6
 122#define TR_100_PIO_ACCESS_MASK		0x0000003f
 123#define TR_100_PIO_ACCESS_SHIFT		0
 124
 125#define TR_100_UDMAREG_UDMA_MASK	0x0000ffff
 126#define TR_100_UDMAREG_UDMA_EN		0x00000001
 127
 128
 129/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
 130 * 40 connector cable and to 4 on 80 connector one.
 131 * Clock unit is 15ns (66Mhz)
 132 *
 133 * 3 Values can be programmed:
 134 *  - Write data setup, which appears to match the cycle time. They
 135 *    also call it DIOW setup.
 136 *  - Ready to pause time (from spec)
 137 *  - Address setup. That one is weird. I don't see where exactly
 138 *    it fits in UDMA cycles, I got it's name from an obscure piece
 139 *    of commented out code in Darwin. They leave it to 0, we do as
 140 *    well, despite a comment that would lead to think it has a
 141 *    min value of 45ns.
 142 * Apple also add 60ns to the write data setup (or cycle time ?) on
 143 * reads.
 144 */
 145#define TR_66_UDMA_MASK			0xfff00000
 146#define TR_66_UDMA_EN			0x00100000 /* Enable Ultra mode for DMA */
 147#define TR_66_PIO_ADDRSETUP_MASK	0xe0000000 /* Address setup */
 148#define TR_66_PIO_ADDRSETUP_SHIFT	29
 149#define TR_66_UDMA_RDY2PAUS_MASK	0x1e000000 /* Ready 2 pause time */
 150#define TR_66_UDMA_RDY2PAUS_SHIFT	25
 151#define TR_66_UDMA_WRDATASETUP_MASK	0x01e00000 /* Write data setup time */
 152#define TR_66_UDMA_WRDATASETUP_SHIFT	21
 153#define TR_66_MDMA_MASK			0x000ffc00
 154#define TR_66_MDMA_RECOVERY_MASK	0x000f8000
 155#define TR_66_MDMA_RECOVERY_SHIFT	15
 156#define TR_66_MDMA_ACCESS_MASK		0x00007c00
 157#define TR_66_MDMA_ACCESS_SHIFT		10
 158#define TR_66_PIO_MASK			0xe00003ff
 159#define TR_66_PIO_RECOVERY_MASK		0x000003e0
 160#define TR_66_PIO_RECOVERY_SHIFT	5
 161#define TR_66_PIO_ACCESS_MASK		0x0000001f
 162#define TR_66_PIO_ACCESS_SHIFT		0
 163
 164/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
 165 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
 166 *
 167 * The access time and recovery time can be programmed. Some older
 168 * Darwin code base limit OHare to 150ns cycle time. I decided to do
 169 * the same here fore safety against broken old hardware ;)
 170 * The HalfTick bit, when set, adds half a clock (15ns) to the access
 171 * time and removes one from recovery. It's not supported on KeyLargo
 172 * implementation afaik. The E bit appears to be set for PIO mode 0 and
 173 * is used to reach long timings used in this mode.
 174 */
 175#define TR_33_MDMA_MASK			0x003ff800
 176#define TR_33_MDMA_RECOVERY_MASK	0x001f0000
 177#define TR_33_MDMA_RECOVERY_SHIFT	16
 178#define TR_33_MDMA_ACCESS_MASK		0x0000f800
 179#define TR_33_MDMA_ACCESS_SHIFT		11
 180#define TR_33_MDMA_HALFTICK		0x00200000
 181#define TR_33_PIO_MASK			0x000007ff
 182#define TR_33_PIO_E			0x00000400
 183#define TR_33_PIO_RECOVERY_MASK		0x000003e0
 184#define TR_33_PIO_RECOVERY_SHIFT	5
 185#define TR_33_PIO_ACCESS_MASK		0x0000001f
 186#define TR_33_PIO_ACCESS_SHIFT		0
 187
 188/*
 189 * Interrupt register definitions. Only present on newer cells
 190 * (Keylargo and later afaik) so we don't use it.
 191 */
 192#define IDE_INTR_DMA			0x80000000
 193#define IDE_INTR_DEVICE			0x40000000
 194
 195/*
 196 * FCR Register on Kauai. Not sure what bit 0x4 is  ...
 197 */
 198#define KAUAI_FCR_UATA_MAGIC		0x00000004
 199#define KAUAI_FCR_UATA_RESET_N		0x00000002
 200#define KAUAI_FCR_UATA_ENABLE		0x00000001
 201
 202
 203/* Allow up to 256 DBDMA commands per xfer */
 204#define MAX_DCMDS		256
 205
 206/* Don't let a DMA segment go all the way to 64K */
 207#define MAX_DBDMA_SEG		0xff00
 208
 209
 210/*
 211 * Wait 1s for disk to answer on IDE bus after a hard reset
 212 * of the device (via GPIO/FCR).
 213 *
 214 * Some devices seem to "pollute" the bus even after dropping
 215 * the BSY bit (typically some combo drives slave on the UDMA
 216 * bus) after a hard reset. Since we hard reset all drives on
 217 * KeyLargo ATA66, we have to keep that delay around. I may end
 218 * up not hard resetting anymore on these and keep the delay only
 219 * for older interfaces instead (we have to reset when coming
 220 * from MacOS...) --BenH.
 221 */
 222#define IDE_WAKEUP_DELAY_MS	1000
 223
 224struct pata_macio_timing;
 225
 226struct pata_macio_priv {
 227	int				kind;
 228	int				aapl_bus_id;
 229	int				mediabay : 1;
 230	struct device_node		*node;
 231	struct macio_dev		*mdev;
 232	struct pci_dev			*pdev;
 233	struct device			*dev;
 234	int				irq;
 235	u32				treg[2][2];
 236	void __iomem			*tfregs;
 237	void __iomem			*kauai_fcr;
 238	struct dbdma_cmd *		dma_table_cpu;
 239	dma_addr_t			dma_table_dma;
 240	struct ata_host			*host;
 241	const struct pata_macio_timing	*timings;
 242};
 243
 244/* Previous variants of this driver used to calculate timings
 245 * for various variants of the chip and use tables for others.
 246 *
 247 * Not only was this confusing, but in addition, it isn't clear
 248 * whether our calculation code was correct. It didn't entirely
 249 * match the darwin code and whatever documentation I could find
 250 * on these cells
 251 *
 252 * I decided to entirely rely on a table instead for this version
 253 * of the driver. Also, because I don't really care about derated
 254 * modes and really old HW other than making it work, I'm not going
 255 * to calculate / snoop timing values for something else than the
 256 * standard modes.
 257 */
 258struct pata_macio_timing {
 259	int	mode;
 260	u32	reg1;	/* Bits to set in first timing reg */
 261	u32	reg2;	/* Bits to set in second timing reg */
 262};
 263
 264static const struct pata_macio_timing pata_macio_ohare_timings[] = {
 265	{ XFER_PIO_0,		0x00000526,	0, },
 266	{ XFER_PIO_1,		0x00000085,	0, },
 267	{ XFER_PIO_2,		0x00000025,	0, },
 268	{ XFER_PIO_3,		0x00000025,	0, },
 269	{ XFER_PIO_4,		0x00000025,	0, },
 270	{ XFER_MW_DMA_0,	0x00074000,	0, },
 271	{ XFER_MW_DMA_1,	0x00221000,	0, },
 272	{ XFER_MW_DMA_2,	0x00211000,	0, },
 273	{ -1, 0, 0 }
 274};
 275
 276static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
 277	{ XFER_PIO_0,		0x00000526,	0, },
 278	{ XFER_PIO_1,		0x00000085,	0, },
 279	{ XFER_PIO_2,		0x00000025,	0, },
 280	{ XFER_PIO_3,		0x00000025,	0, },
 281	{ XFER_PIO_4,		0x00000025,	0, },
 282	{ XFER_MW_DMA_0,	0x00074000,	0, },
 283	{ XFER_MW_DMA_1,	0x00221000,	0, },
 284	{ XFER_MW_DMA_2,	0x00211000,	0, },
 285	{ -1, 0, 0 }
 286};
 287
 288static const struct pata_macio_timing pata_macio_kl33_timings[] = {
 289	{ XFER_PIO_0,		0x00000526,	0, },
 290	{ XFER_PIO_1,		0x00000085,	0, },
 291	{ XFER_PIO_2,		0x00000025,	0, },
 292	{ XFER_PIO_3,		0x00000025,	0, },
 293	{ XFER_PIO_4,		0x00000025,	0, },
 294	{ XFER_MW_DMA_0,	0x00084000,	0, },
 295	{ XFER_MW_DMA_1,	0x00021800,	0, },
 296	{ XFER_MW_DMA_2,	0x00011800,	0, },
 297	{ -1, 0, 0 }
 298};
 299
 300static const struct pata_macio_timing pata_macio_kl66_timings[] = {
 301	{ XFER_PIO_0,		0x0000038c,	0, },
 302	{ XFER_PIO_1,		0x0000020a,	0, },
 303	{ XFER_PIO_2,		0x00000127,	0, },
 304	{ XFER_PIO_3,		0x000000c6,	0, },
 305	{ XFER_PIO_4,		0x00000065,	0, },
 306	{ XFER_MW_DMA_0,	0x00084000,	0, },
 307	{ XFER_MW_DMA_1,	0x00029800,	0, },
 308	{ XFER_MW_DMA_2,	0x00019400,	0, },
 309	{ XFER_UDMA_0,		0x19100000,	0, },
 310	{ XFER_UDMA_1,		0x14d00000,	0, },
 311	{ XFER_UDMA_2,		0x10900000,	0, },
 312	{ XFER_UDMA_3,		0x0c700000,	0, },
 313	{ XFER_UDMA_4,		0x0c500000,	0, },
 314	{ -1, 0, 0 }
 315};
 316
 317static const struct pata_macio_timing pata_macio_kauai_timings[] = {
 318	{ XFER_PIO_0,		0x08000a92,	0, },
 319	{ XFER_PIO_1,		0x0800060f,	0, },
 320	{ XFER_PIO_2,		0x0800038b,	0, },
 321	{ XFER_PIO_3,		0x05000249,	0, },
 322	{ XFER_PIO_4,		0x04000148,	0, },
 323	{ XFER_MW_DMA_0,	0x00618000,	0, },
 324	{ XFER_MW_DMA_1,	0x00209000,	0, },
 325	{ XFER_MW_DMA_2,	0x00148000,	0, },
 326	{ XFER_UDMA_0,		         0,	0x000070c1, },
 327	{ XFER_UDMA_1,		         0,	0x00005d81, },
 328	{ XFER_UDMA_2,		         0,	0x00004a61, },
 329	{ XFER_UDMA_3,		         0,	0x00003a51, },
 330	{ XFER_UDMA_4,		         0,	0x00002a31, },
 331	{ XFER_UDMA_5,		         0,	0x00002921, },
 332	{ -1, 0, 0 }
 333};
 334
 335static const struct pata_macio_timing pata_macio_shasta_timings[] = {
 336	{ XFER_PIO_0,		0x0a000c97,	0, },
 337	{ XFER_PIO_1,		0x07000712,	0, },
 338	{ XFER_PIO_2,		0x040003cd,	0, },
 339	{ XFER_PIO_3,		0x0500028b,	0, },
 340	{ XFER_PIO_4,		0x0400010a,	0, },
 341	{ XFER_MW_DMA_0,	0x00820800,	0, },
 342	{ XFER_MW_DMA_1,	0x0028b000,	0, },
 343	{ XFER_MW_DMA_2,	0x001ca000,	0, },
 344	{ XFER_UDMA_0,		         0,	0x00035901, },
 345	{ XFER_UDMA_1,		         0,	0x000348b1, },
 346	{ XFER_UDMA_2,		         0,	0x00033881, },
 347	{ XFER_UDMA_3,		         0,	0x00033861, },
 348	{ XFER_UDMA_4,		         0,	0x00033841, },
 349	{ XFER_UDMA_5,		         0,	0x00033031, },
 350	{ XFER_UDMA_6,		         0,	0x00033021, },
 351	{ -1, 0, 0 }
 352};
 353
 354static const struct pata_macio_timing *pata_macio_find_timing(
 355					    struct pata_macio_priv *priv,
 356					    int mode)
 357{
 358	int i;
 359
 360	for (i = 0; priv->timings[i].mode > 0; i++) {
 361		if (priv->timings[i].mode == mode)
 362			return &priv->timings[i];
 363	}
 364	return NULL;
 365}
 366
 367
 368static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
 369{
 370	struct pata_macio_priv *priv = ap->private_data;
 371	void __iomem *rbase = ap->ioaddr.cmd_addr;
 372
 373	if (priv->kind == controller_sh_ata6 ||
 374	    priv->kind == controller_un_ata6 ||
 375	    priv->kind == controller_k2_ata6) {
 376		writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
 377		writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
 378	} else
 379		writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
 380}
 381
 382static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
 383{
 384	ata_sff_dev_select(ap, device);
 385
 386	/* Apply timings */
 387	pata_macio_apply_timings(ap, device);
 388}
 389
 390static void pata_macio_set_timings(struct ata_port *ap,
 391				   struct ata_device *adev)
 392{
 393	struct pata_macio_priv *priv = ap->private_data;
 394	const struct pata_macio_timing *t;
 395
 396	dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
 397		adev->devno,
 398		adev->pio_mode,
 399		ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
 400		adev->dma_mode,
 401		ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
 402
 403	/* First clear timings */
 404	priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
 405
 406	/* Now get the PIO timings */
 407	t = pata_macio_find_timing(priv, adev->pio_mode);
 408	if (t == NULL) {
 409		dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
 410			 adev->pio_mode);
 411		t = pata_macio_find_timing(priv, XFER_PIO_0);
 412	}
 413	BUG_ON(t == NULL);
 414
 415	/* PIO timings only ever use the first treg */
 416	priv->treg[adev->devno][0] |= t->reg1;
 417
 418	/* Now get DMA timings */
 419	t = pata_macio_find_timing(priv, adev->dma_mode);
 420	if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
 421		dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
 422		t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
 423	}
 424	BUG_ON(t == NULL);
 425
 426	/* DMA timings can use both tregs */
 427	priv->treg[adev->devno][0] |= t->reg1;
 428	priv->treg[adev->devno][1] |= t->reg2;
 429
 430	dev_dbg(priv->dev, " -> %08x %08x\n",
 431		priv->treg[adev->devno][0],
 432		priv->treg[adev->devno][1]);
 433
 434	/* Apply to hardware */
 435	pata_macio_apply_timings(ap, adev->devno);
 436}
 437
 438/*
 439 * Blast some well known "safe" values to the timing registers at init or
 440 * wakeup from sleep time, before we do real calculation
 441 */
 442static void pata_macio_default_timings(struct pata_macio_priv *priv)
 443{
 444	unsigned int value, value2 = 0;
 445
 446	switch(priv->kind) {
 447		case controller_sh_ata6:
 448			value = 0x0a820c97;
 449			value2 = 0x00033031;
 450			break;
 451		case controller_un_ata6:
 452		case controller_k2_ata6:
 453			value = 0x08618a92;
 454			value2 = 0x00002921;
 455			break;
 456		case controller_kl_ata4:
 457			value = 0x0008438c;
 458			break;
 459		case controller_kl_ata3:
 460			value = 0x00084526;
 461			break;
 462		case controller_heathrow:
 463		case controller_ohare:
 464		default:
 465			value = 0x00074526;
 466			break;
 467	}
 468	priv->treg[0][0] = priv->treg[1][0] = value;
 469	priv->treg[0][1] = priv->treg[1][1] = value2;
 470}
 471
 472static int pata_macio_cable_detect(struct ata_port *ap)
 473{
 474	struct pata_macio_priv *priv = ap->private_data;
 475
 476	/* Get cable type from device-tree */
 477	if (priv->kind == controller_kl_ata4 ||
 478	    priv->kind == controller_un_ata6 ||
 479	    priv->kind == controller_k2_ata6 ||
 480	    priv->kind == controller_sh_ata6) {
 481		const char* cable = of_get_property(priv->node, "cable-type",
 482						    NULL);
 483		struct device_node *root = of_find_node_by_path("/");
 484		const char *model = of_get_property(root, "model", NULL);
 485
 
 
 486		if (cable && !strncmp(cable, "80-", 3)) {
 487			/* Some drives fail to detect 80c cable in PowerBook
 488			 * These machine use proprietary short IDE cable
 489			 * anyway
 490			 */
 491			if (!strncmp(model, "PowerBook", 9))
 492				return ATA_CBL_PATA40_SHORT;
 493			else
 494				return ATA_CBL_PATA80;
 495		}
 496	}
 497
 498	/* G5's seem to have incorrect cable type in device-tree.
 499	 * Let's assume they always have a 80 conductor cable, this seem to
 500	 * be always the case unless the user mucked around
 501	 */
 502	if (of_device_is_compatible(priv->node, "K2-UATA") ||
 503	    of_device_is_compatible(priv->node, "shasta-ata"))
 504		return ATA_CBL_PATA80;
 505
 506	/* Anything else is 40 connectors */
 507	return ATA_CBL_PATA40;
 508}
 509
 510static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
 511{
 512	unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
 513	struct ata_port *ap = qc->ap;
 514	struct pata_macio_priv *priv = ap->private_data;
 515	struct scatterlist *sg;
 516	struct dbdma_cmd *table;
 517	unsigned int si, pi;
 518
 519	dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
 520		   __func__, qc, qc->flags, write, qc->dev->devno);
 521
 522	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 523		return;
 524
 525	table = (struct dbdma_cmd *) priv->dma_table_cpu;
 526
 527	pi = 0;
 528	for_each_sg(qc->sg, sg, qc->n_elem, si) {
 529		u32 addr, sg_len, len;
 530
 531		/* determine if physical DMA addr spans 64K boundary.
 532		 * Note h/w doesn't support 64-bit, so we unconditionally
 533		 * truncate dma_addr_t to u32.
 534		 */
 535		addr = (u32) sg_dma_address(sg);
 536		sg_len = sg_dma_len(sg);
 537
 538		while (sg_len) {
 539			/* table overflow should never happen */
 540			BUG_ON (pi++ >= MAX_DCMDS);
 541
 542			len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
 543			st_le16(&table->command, write ? OUTPUT_MORE: INPUT_MORE);
 544			st_le16(&table->req_count, len);
 545			st_le32(&table->phy_addr, addr);
 546			table->cmd_dep = 0;
 547			table->xfer_status = 0;
 548			table->res_count = 0;
 549			addr += len;
 550			sg_len -= len;
 551			++table;
 552		}
 553	}
 554
 555	/* Should never happen according to Tejun */
 556	BUG_ON(!pi);
 557
 558	/* Convert the last command to an input/output */
 559	table--;
 560	st_le16(&table->command, write ? OUTPUT_LAST: INPUT_LAST);
 561	table++;
 562
 563	/* Add the stop command to the end of the list */
 564	memset(table, 0, sizeof(struct dbdma_cmd));
 565	st_le16(&table->command, DBDMA_STOP);
 566
 567	dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
 
 
 568}
 569
 570
 571static void pata_macio_freeze(struct ata_port *ap)
 572{
 573	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 574
 575	if (dma_regs) {
 576		unsigned int timeout = 1000000;
 577
 578		/* Make sure DMA controller is stopped */
 579		writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
 580		while (--timeout && (readl(&dma_regs->status) & RUN))
 581			udelay(1);
 582	}
 583
 584	ata_sff_freeze(ap);
 585}
 586
 587
 588static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
 589{
 590	struct ata_port *ap = qc->ap;
 591	struct pata_macio_priv *priv = ap->private_data;
 592	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 593	int dev = qc->dev->devno;
 594
 595	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 596
 597	/* Make sure DMA commands updates are visible */
 598	writel(priv->dma_table_dma, &dma_regs->cmdptr);
 599
 600	/* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
 601	 * UDMA reads
 602	 */
 603	if (priv->kind == controller_kl_ata4 &&
 604	    (priv->treg[dev][0] & TR_66_UDMA_EN)) {
 605		void __iomem *rbase = ap->ioaddr.cmd_addr;
 606		u32 reg = priv->treg[dev][0];
 607
 608		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
 609			reg += 0x00800000;
 610		writel(reg, rbase + IDE_TIMING_CONFIG);
 611	}
 612
 613	/* issue r/w command */
 614	ap->ops->sff_exec_command(ap, &qc->tf);
 615}
 616
 617static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
 618{
 619	struct ata_port *ap = qc->ap;
 620	struct pata_macio_priv *priv = ap->private_data;
 621	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 622
 623	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 624
 625	writel((RUN << 16) | RUN, &dma_regs->control);
 626	/* Make sure it gets to the controller right now */
 627	(void)readl(&dma_regs->control);
 628}
 629
 630static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
 631{
 632	struct ata_port *ap = qc->ap;
 633	struct pata_macio_priv *priv = ap->private_data;
 634	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 635	unsigned int timeout = 1000000;
 636
 637	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 638
 639	/* Stop the DMA engine and wait for it to full halt */
 640	writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
 641	while (--timeout && (readl(&dma_regs->status) & RUN))
 642		udelay(1);
 643}
 644
 645static u8 pata_macio_bmdma_status(struct ata_port *ap)
 646{
 647	struct pata_macio_priv *priv = ap->private_data;
 648	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 649	u32 dstat, rstat = ATA_DMA_INTR;
 650	unsigned long timeout = 0;
 651
 652	dstat = readl(&dma_regs->status);
 653
 654	dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
 655
 656	/* We have two things to deal with here:
 657	 *
 658	 * - The dbdma won't stop if the command was started
 659	 * but completed with an error without transferring all
 660	 * datas. This happens when bad blocks are met during
 661	 * a multi-block transfer.
 662	 *
 663	 * - The dbdma fifo hasn't yet finished flushing to
 664	 * to system memory when the disk interrupt occurs.
 665	 *
 666	 */
 667
 668	/* First check for errors */
 669	if ((dstat & (RUN|DEAD)) != RUN)
 670		rstat |= ATA_DMA_ERR;
 671
 672	/* If ACTIVE is cleared, the STOP command has been hit and
 673	 * the transfer is complete. If not, we have to flush the
 674	 * channel.
 675	 */
 676	if ((dstat & ACTIVE) == 0)
 677		return rstat;
 678
 679	dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
 680
 681	/* If dbdma didn't execute the STOP command yet, the
 682	 * active bit is still set. We consider that we aren't
 683	 * sharing interrupts (which is hopefully the case with
 684	 * those controllers) and so we just try to flush the
 685	 * channel for pending data in the fifo
 686	 */
 687	udelay(1);
 688	writel((FLUSH << 16) | FLUSH, &dma_regs->control);
 689	for (;;) {
 690		udelay(1);
 691		dstat = readl(&dma_regs->status);
 692		if ((dstat & FLUSH) == 0)
 693			break;
 694		if (++timeout > 1000) {
 695			dev_warn(priv->dev, "timeout flushing DMA\n");
 696			rstat |= ATA_DMA_ERR;
 697			break;
 698		}
 699	}
 700	return rstat;
 701}
 702
 703/* port_start is when we allocate the DMA command list */
 704static int pata_macio_port_start(struct ata_port *ap)
 705{
 706	struct pata_macio_priv *priv = ap->private_data;
 707
 708	if (ap->ioaddr.bmdma_addr == NULL)
 709		return 0;
 710
 711	/* Allocate space for the DBDMA commands.
 712	 *
 713	 * The +2 is +1 for the stop command and +1 to allow for
 714	 * aligning the start address to a multiple of 16 bytes.
 715	 */
 716	priv->dma_table_cpu =
 717		dmam_alloc_coherent(priv->dev,
 718				    (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
 719				    &priv->dma_table_dma, GFP_KERNEL);
 720	if (priv->dma_table_cpu == NULL) {
 721		dev_err(priv->dev, "Unable to allocate DMA command list\n");
 722		ap->ioaddr.bmdma_addr = NULL;
 723		ap->mwdma_mask = 0;
 724		ap->udma_mask = 0;
 725	}
 726	return 0;
 727}
 728
 729static void pata_macio_irq_clear(struct ata_port *ap)
 730{
 731	struct pata_macio_priv *priv = ap->private_data;
 732
 733	/* Nothing to do here */
 734
 735	dev_dbgdma(priv->dev, "%s\n", __func__);
 736}
 737
 738static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
 739{
 740	dev_dbg(priv->dev, "Enabling & resetting... \n");
 741
 742	if (priv->mediabay)
 743		return;
 744
 745	if (priv->kind == controller_ohare && !resume) {
 746		/* The code below is having trouble on some ohare machines
 747		 * (timing related ?). Until I can put my hand on one of these
 748		 * units, I keep the old way
 749		 */
 750		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
 751	} else {
 752		int rc;
 753
 754 		/* Reset and enable controller */
 755		rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 756					 priv->node, priv->aapl_bus_id, 1);
 757		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
 758				    priv->node, priv->aapl_bus_id, 1);
 759		msleep(10);
 760		/* Only bother waiting if there's a reset control */
 761		if (rc == 0) {
 762			ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 763					    priv->node, priv->aapl_bus_id, 0);
 764			msleep(IDE_WAKEUP_DELAY_MS);
 765		}
 766	}
 767
 768	/* If resuming a PCI device, restore the config space here */
 769	if (priv->pdev && resume) {
 770		int rc;
 771
 772		pci_restore_state(priv->pdev);
 773		rc = pcim_enable_device(priv->pdev);
 774		if (rc)
 775			dev_err(&priv->pdev->dev,
 776				"Failed to enable device after resume (%d)\n",
 777				rc);
 778		else
 779			pci_set_master(priv->pdev);
 780	}
 781
 782	/* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
 783	 * seem necessary and speeds up the boot process
 784	 */
 785	if (priv->kauai_fcr)
 786		writel(KAUAI_FCR_UATA_MAGIC |
 787		       KAUAI_FCR_UATA_RESET_N |
 788		       KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
 789}
 790
 791/* Hook the standard slave config to fixup some HW related alignment
 792 * restrictions
 793 */
 794static int pata_macio_slave_config(struct scsi_device *sdev)
 795{
 796	struct ata_port *ap = ata_shost_to_port(sdev->host);
 797	struct pata_macio_priv *priv = ap->private_data;
 798	struct ata_device *dev;
 799	u16 cmd;
 800	int rc;
 801
 802	/* First call original */
 803	rc = ata_scsi_slave_config(sdev);
 804	if (rc)
 805		return rc;
 806
 807	/* This is lifted from sata_nv */
 808	dev = &ap->link.device[sdev->id];
 809
 810	/* OHare has issues with non cache aligned DMA on some chipsets */
 811	if (priv->kind == controller_ohare) {
 812		blk_queue_update_dma_alignment(sdev->request_queue, 31);
 813		blk_queue_update_dma_pad(sdev->request_queue, 31);
 814
 815		/* Tell the world about it */
 816		ata_dev_info(dev, "OHare alignment limits applied\n");
 817		return 0;
 818	}
 819
 820	/* We only have issues with ATAPI */
 821	if (dev->class != ATA_DEV_ATAPI)
 822		return 0;
 823
 824	/* Shasta and K2 seem to have "issues" with reads ... */
 825	if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
 826		/* Allright these are bad, apply restrictions */
 827		blk_queue_update_dma_alignment(sdev->request_queue, 15);
 828		blk_queue_update_dma_pad(sdev->request_queue, 15);
 829
 830		/* We enable MWI and hack cache line size directly here, this
 831		 * is specific to this chipset and not normal values, we happen
 832		 * to somewhat know what we are doing here (which is basically
 833		 * to do the same Apple does and pray they did not get it wrong :-)
 834		 */
 835		BUG_ON(!priv->pdev);
 836		pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
 837		pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
 838		pci_write_config_word(priv->pdev, PCI_COMMAND,
 839				      cmd | PCI_COMMAND_INVALIDATE);
 840
 841		/* Tell the world about it */
 842		ata_dev_info(dev, "K2/Shasta alignment limits applied\n");
 843	}
 844
 845	return 0;
 846}
 847
 848#ifdef CONFIG_PM
 849
 850static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
 851{
 852	int rc;
 853
 854	/* First, core libata suspend to do most of the work */
 855	rc = ata_host_suspend(priv->host, mesg);
 856	if (rc)
 857		return rc;
 858
 859	/* Restore to default timings */
 860	pata_macio_default_timings(priv);
 861
 862	/* Mask interrupt. Not strictly necessary but old driver did
 863	 * it and I'd rather not change that here */
 864	disable_irq(priv->irq);
 865
 866	/* The media bay will handle itself just fine */
 867	if (priv->mediabay)
 868		return 0;
 869
 870	/* Kauai has bus control FCRs directly here */
 871	if (priv->kauai_fcr) {
 872		u32 fcr = readl(priv->kauai_fcr);
 873		fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
 874		writel(fcr, priv->kauai_fcr);
 875	}
 876
 877	/* For PCI, save state and disable DMA. No need to call
 878	 * pci_set_power_state(), the HW doesn't do D states that
 879	 * way, the platform code will take care of suspending the
 880	 * ASIC properly
 881	 */
 882	if (priv->pdev) {
 883		pci_save_state(priv->pdev);
 884		pci_disable_device(priv->pdev);
 885	}
 886
 887	/* Disable the bus on older machines and the cell on kauai */
 888	ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
 889			    priv->aapl_bus_id, 0);
 890
 891	return 0;
 892}
 893
 894static int pata_macio_do_resume(struct pata_macio_priv *priv)
 895{
 896	/* Reset and re-enable the HW */
 897	pata_macio_reset_hw(priv, 1);
 898
 899	/* Sanitize drive timings */
 900	pata_macio_apply_timings(priv->host->ports[0], 0);
 901
 902	/* We want our IRQ back ! */
 903	enable_irq(priv->irq);
 904
 905	/* Let the libata core take it from there */
 906	ata_host_resume(priv->host);
 907
 908	return 0;
 909}
 
 910
 911#endif /* CONFIG_PM */
 912
 913static struct scsi_host_template pata_macio_sht = {
 914	ATA_BASE_SHT(DRV_NAME),
 915	.sg_tablesize		= MAX_DCMDS,
 916	/* We may not need that strict one */
 917	.dma_boundary		= ATA_DMA_BOUNDARY,
 
 
 
 
 918	.slave_configure	= pata_macio_slave_config,
 
 
 
 919};
 920
 921static struct ata_port_operations pata_macio_ops = {
 922	.inherits		= &ata_bmdma_port_ops,
 923
 924	.freeze			= pata_macio_freeze,
 925	.set_piomode		= pata_macio_set_timings,
 926	.set_dmamode		= pata_macio_set_timings,
 927	.cable_detect		= pata_macio_cable_detect,
 928	.sff_dev_select		= pata_macio_dev_select,
 929	.qc_prep		= pata_macio_qc_prep,
 930	.bmdma_setup		= pata_macio_bmdma_setup,
 931	.bmdma_start		= pata_macio_bmdma_start,
 932	.bmdma_stop		= pata_macio_bmdma_stop,
 933	.bmdma_status		= pata_macio_bmdma_status,
 934	.port_start		= pata_macio_port_start,
 935	.sff_irq_clear		= pata_macio_irq_clear,
 936};
 937
 938static void __devinit pata_macio_invariants(struct pata_macio_priv *priv)
 939{
 940	const int *bidp;
 941
 942	/* Identify the type of controller */
 943	if (of_device_is_compatible(priv->node, "shasta-ata")) {
 944		priv->kind = controller_sh_ata6;
 945	        priv->timings = pata_macio_shasta_timings;
 946	} else if (of_device_is_compatible(priv->node, "kauai-ata")) {
 947		priv->kind = controller_un_ata6;
 948	        priv->timings = pata_macio_kauai_timings;
 949	} else if (of_device_is_compatible(priv->node, "K2-UATA")) {
 950		priv->kind = controller_k2_ata6;
 951	        priv->timings = pata_macio_kauai_timings;
 952	} else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
 953		if (strcmp(priv->node->name, "ata-4") == 0) {
 954			priv->kind = controller_kl_ata4;
 955			priv->timings = pata_macio_kl66_timings;
 956		} else {
 957			priv->kind = controller_kl_ata3;
 958			priv->timings = pata_macio_kl33_timings;
 959		}
 960	} else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
 961		priv->kind = controller_heathrow;
 962		priv->timings = pata_macio_heathrow_timings;
 963	} else {
 964		priv->kind = controller_ohare;
 965		priv->timings = pata_macio_ohare_timings;
 966	}
 967
 968	/* XXX FIXME --- setup priv->mediabay here */
 969
 970	/* Get Apple bus ID (for clock and ASIC control) */
 971	bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
 972	priv->aapl_bus_id =  bidp ? *bidp : 0;
 973
 974	/* Fixup missing Apple bus ID in case of media-bay */
 975	if (priv->mediabay && bidp == 0)
 976		priv->aapl_bus_id = 1;
 977}
 978
 979static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr,
 980					   void __iomem * base,
 981					   void __iomem * dma)
 982{
 983	/* cmd_addr is the base of regs for that port */
 984	ioaddr->cmd_addr	= base;
 985
 986	/* taskfile registers */
 987	ioaddr->data_addr	= base + (ATA_REG_DATA    << 4);
 988	ioaddr->error_addr	= base + (ATA_REG_ERR     << 4);
 989	ioaddr->feature_addr	= base + (ATA_REG_FEATURE << 4);
 990	ioaddr->nsect_addr	= base + (ATA_REG_NSECT   << 4);
 991	ioaddr->lbal_addr	= base + (ATA_REG_LBAL    << 4);
 992	ioaddr->lbam_addr	= base + (ATA_REG_LBAM    << 4);
 993	ioaddr->lbah_addr	= base + (ATA_REG_LBAH    << 4);
 994	ioaddr->device_addr	= base + (ATA_REG_DEVICE  << 4);
 995	ioaddr->status_addr	= base + (ATA_REG_STATUS  << 4);
 996	ioaddr->command_addr	= base + (ATA_REG_CMD     << 4);
 997	ioaddr->altstatus_addr	= base + 0x160;
 998	ioaddr->ctl_addr	= base + 0x160;
 999	ioaddr->bmdma_addr	= dma;
1000}
1001
1002static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
1003						   struct ata_port_info   *pinfo)
1004{
1005	int i = 0;
1006
1007	pinfo->pio_mask		= 0;
1008	pinfo->mwdma_mask	= 0;
1009	pinfo->udma_mask	= 0;
1010
1011	while (priv->timings[i].mode > 0) {
1012		unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
1013		switch(priv->timings[i].mode & 0xf0) {
1014		case 0x00: /* PIO */
1015			pinfo->pio_mask |= (mask >> 8);
1016			break;
1017		case 0x20: /* MWDMA */
1018			pinfo->mwdma_mask |= mask;
1019			break;
1020		case 0x40: /* UDMA */
1021			pinfo->udma_mask |= mask;
1022			break;
1023		}
1024		i++;
1025	}
1026	dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n",
1027		pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
1028}
1029
1030static int __devinit pata_macio_common_init(struct pata_macio_priv	*priv,
1031					    resource_size_t		tfregs,
1032					    resource_size_t		dmaregs,
1033					    resource_size_t		fcregs,
1034					    unsigned long		irq)
1035{
1036	struct ata_port_info		pinfo;
1037	const struct ata_port_info	*ppi[] = { &pinfo, NULL };
1038	void __iomem			*dma_regs = NULL;
1039
1040	/* Fill up privates with various invariants collected from the
1041	 * device-tree
1042	 */
1043	pata_macio_invariants(priv);
1044
1045	/* Make sure we have sane initial timings in the cache */
1046	pata_macio_default_timings(priv);
1047
1048	/* Not sure what the real max is but we know it's less than 64K, let's
1049	 * use 64K minus 256
1050	 */
1051	dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
1052
1053	/* Allocate libata host for 1 port */
1054	memset(&pinfo, 0, sizeof(struct ata_port_info));
1055	pmac_macio_calc_timing_masks(priv, &pinfo);
1056	pinfo.flags		= ATA_FLAG_SLAVE_POSS;
1057	pinfo.port_ops		= &pata_macio_ops;
1058	pinfo.private_data	= priv;
1059
1060	priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
1061	if (priv->host == NULL) {
1062		dev_err(priv->dev, "Failed to allocate ATA port structure\n");
1063		return -ENOMEM;
1064	}
1065
1066	/* Setup the private data in host too */
1067	priv->host->private_data = priv;
1068
1069	/* Map base registers */
1070	priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
1071	if (priv->tfregs == NULL) {
1072		dev_err(priv->dev, "Failed to map ATA ports\n");
1073		return -ENOMEM;
1074	}
1075	priv->host->iomap = &priv->tfregs;
1076
1077	/* Map DMA regs */
1078	if (dmaregs != 0) {
1079		dma_regs = devm_ioremap(priv->dev, dmaregs,
1080					sizeof(struct dbdma_regs));
1081		if (dma_regs == NULL)
1082			dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
1083	}
1084
1085	/* If chip has local feature control, map those regs too */
1086	if (fcregs != 0) {
1087		priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
1088		if (priv->kauai_fcr == NULL) {
1089			dev_err(priv->dev, "Failed to map ATA FCR register\n");
1090			return -ENOMEM;
1091		}
1092	}
1093
1094	/* Setup port data structure */
1095	pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
1096			     priv->tfregs, dma_regs);
1097	priv->host->ports[0]->private_data = priv;
1098
1099	/* hard-reset the controller */
1100	pata_macio_reset_hw(priv, 0);
1101	pata_macio_apply_timings(priv->host->ports[0], 0);
1102
1103	/* Enable bus master if necessary */
1104	if (priv->pdev && dma_regs)
1105		pci_set_master(priv->pdev);
1106
1107	dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
1108		 macio_ata_names[priv->kind], priv->aapl_bus_id);
1109
1110	/* Start it up */
1111	priv->irq = irq;
1112	return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
1113				 &pata_macio_sht);
1114}
1115
1116static int __devinit pata_macio_attach(struct macio_dev *mdev,
1117				       const struct of_device_id *match)
1118{
1119	struct pata_macio_priv	*priv;
1120	resource_size_t		tfregs, dmaregs = 0;
1121	unsigned long		irq;
1122	int			rc;
1123
1124	/* Check for broken device-trees */
1125	if (macio_resource_count(mdev) == 0) {
1126		dev_err(&mdev->ofdev.dev,
1127			"No addresses for controller\n");
1128		return -ENXIO;
1129	}
1130
1131	/* Enable managed resources */
1132	macio_enable_devres(mdev);
1133
1134	/* Allocate and init private data structure */
1135	priv = devm_kzalloc(&mdev->ofdev.dev,
1136			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1137	if (priv == NULL) {
1138		dev_err(&mdev->ofdev.dev,
1139			"Failed to allocate private memory\n");
1140		return -ENOMEM;
1141	}
1142	priv->node = of_node_get(mdev->ofdev.dev.of_node);
1143	priv->mdev = mdev;
1144	priv->dev = &mdev->ofdev.dev;
1145
1146	/* Request memory resource for taskfile registers */
1147	if (macio_request_resource(mdev, 0, "pata-macio")) {
1148		dev_err(&mdev->ofdev.dev,
1149			"Cannot obtain taskfile resource\n");
1150		return -EBUSY;
1151	}
1152	tfregs = macio_resource_start(mdev, 0);
1153
1154	/* Request resources for DMA registers if any */
1155	if (macio_resource_count(mdev) >= 2) {
1156		if (macio_request_resource(mdev, 1, "pata-macio-dma"))
1157			dev_err(&mdev->ofdev.dev,
1158				"Cannot obtain DMA resource\n");
1159		else
1160			dmaregs = macio_resource_start(mdev, 1);
1161	}
1162
1163	/*
1164	 * Fixup missing IRQ for some old implementations with broken
1165	 * device-trees.
1166	 *
1167	 * This is a bit bogus, it should be fixed in the device-tree itself,
1168	 * via the existing macio fixups, based on the type of interrupt
1169	 * controller in the machine. However, I have no test HW for this case,
1170	 * and this trick works well enough on those old machines...
1171	 */
1172	if (macio_irq_count(mdev) == 0) {
1173		dev_warn(&mdev->ofdev.dev,
1174			 "No interrupts for controller, using 13\n");
1175		irq = irq_create_mapping(NULL, 13);
1176	} else
1177		irq = macio_irq(mdev, 0);
1178
1179	/* Prevvent media bay callbacks until fully registered */
1180	lock_media_bay(priv->mdev->media_bay);
1181
1182	/* Get register addresses and call common initialization */
1183	rc = pata_macio_common_init(priv,
1184				    tfregs,		/* Taskfile regs */
1185				    dmaregs,		/* DBDMA regs */
1186				    0,			/* Feature control */
1187				    irq);
1188	unlock_media_bay(priv->mdev->media_bay);
1189
1190	return rc;
1191}
1192
1193static int __devexit pata_macio_detach(struct macio_dev *mdev)
1194{
1195	struct ata_host *host = macio_get_drvdata(mdev);
1196	struct pata_macio_priv *priv = host->private_data;
1197
1198	lock_media_bay(priv->mdev->media_bay);
1199
1200	/* Make sure the mediabay callback doesn't try to access
1201	 * dead stuff
1202	 */
1203	priv->host->private_data = NULL;
1204
1205	ata_host_detach(host);
1206
1207	unlock_media_bay(priv->mdev->media_bay);
1208
1209	return 0;
1210}
1211
1212#ifdef CONFIG_PM
1213
1214static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1215{
1216	struct ata_host *host = macio_get_drvdata(mdev);
1217
1218	return pata_macio_do_suspend(host->private_data, mesg);
1219}
1220
1221static int pata_macio_resume(struct macio_dev *mdev)
1222{
1223	struct ata_host *host = macio_get_drvdata(mdev);
1224
1225	return pata_macio_do_resume(host->private_data);
1226}
1227
1228#endif /* CONFIG_PM */
1229
1230#ifdef CONFIG_PMAC_MEDIABAY
1231static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
1232{
1233	struct ata_host *host = macio_get_drvdata(mdev);
1234	struct ata_port *ap;
1235	struct ata_eh_info *ehi;
1236	struct ata_device *dev;
1237	unsigned long flags;
1238
1239	if (!host || !host->private_data)
1240		return;
1241	ap = host->ports[0];
1242	spin_lock_irqsave(ap->lock, flags);
1243	ehi = &ap->link.eh_info;
1244	if (mb_state == MB_CD) {
1245		ata_ehi_push_desc(ehi, "mediabay plug");
1246		ata_ehi_hotplugged(ehi);
1247		ata_port_freeze(ap);
1248	} else {
1249		ata_ehi_push_desc(ehi, "mediabay unplug");
1250		ata_for_each_dev(dev, &ap->link, ALL)
1251			dev->flags |= ATA_DFLAG_DETACH;
1252		ata_port_abort(ap);
1253	}
1254	spin_unlock_irqrestore(ap->lock, flags);
1255
1256}
1257#endif /* CONFIG_PMAC_MEDIABAY */
1258
1259
1260static int __devinit pata_macio_pci_attach(struct pci_dev *pdev,
1261					   const struct pci_device_id *id)
1262{
1263	struct pata_macio_priv	*priv;
1264	struct device_node	*np;
1265	resource_size_t		rbase;
1266
1267	/* We cannot use a MacIO controller without its OF device node */
1268	np = pci_device_to_OF_node(pdev);
1269	if (np == NULL) {
1270		dev_err(&pdev->dev,
1271			"Cannot find OF device node for controller\n");
1272		return -ENODEV;
1273	}
1274
1275	/* Check that it can be enabled */
1276	if (pcim_enable_device(pdev)) {
1277		dev_err(&pdev->dev,
1278			"Cannot enable controller PCI device\n");
1279		return -ENXIO;
1280	}
1281
1282	/* Allocate and init private data structure */
1283	priv = devm_kzalloc(&pdev->dev,
1284			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1285	if (priv == NULL) {
1286		dev_err(&pdev->dev,
1287			"Failed to allocate private memory\n");
1288		return -ENOMEM;
1289	}
1290	priv->node = of_node_get(np);
1291	priv->pdev = pdev;
1292	priv->dev = &pdev->dev;
1293
1294	/* Get MMIO regions */
1295	if (pci_request_regions(pdev, "pata-macio")) {
1296		dev_err(&pdev->dev,
1297			"Cannot obtain PCI resources\n");
1298		return -EBUSY;
1299	}
1300
1301	/* Get register addresses and call common initialization */
1302	rbase = pci_resource_start(pdev, 0);
1303	if (pata_macio_common_init(priv,
1304				   rbase + 0x2000,	/* Taskfile regs */
1305				   rbase + 0x1000,	/* DBDMA regs */
1306				   rbase,		/* Feature control */
1307				   pdev->irq))
1308		return -ENXIO;
1309
1310	return 0;
1311}
1312
1313static void __devexit pata_macio_pci_detach(struct pci_dev *pdev)
1314{
1315	struct ata_host *host = dev_get_drvdata(&pdev->dev);
1316
1317	ata_host_detach(host);
1318}
1319
1320#ifdef CONFIG_PM
1321
1322static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1323{
1324	struct ata_host *host = dev_get_drvdata(&pdev->dev);
1325
1326	return pata_macio_do_suspend(host->private_data, mesg);
1327}
1328
1329static int pata_macio_pci_resume(struct pci_dev *pdev)
1330{
1331	struct ata_host *host = dev_get_drvdata(&pdev->dev);
1332
1333	return pata_macio_do_resume(host->private_data);
1334}
 
1335
1336#endif /* CONFIG_PM */
1337
1338static struct of_device_id pata_macio_match[] =
1339{
1340	{
1341	.name 		= "IDE",
1342	},
1343	{
1344	.name 		= "ATA",
1345	},
1346	{
1347	.type		= "ide",
1348	},
1349	{
1350	.type		= "ata",
1351	},
1352	{},
1353};
 
1354
1355static struct macio_driver pata_macio_driver =
1356{
1357	.driver = {
1358		.name 		= "pata-macio",
1359		.owner		= THIS_MODULE,
1360		.of_match_table	= pata_macio_match,
1361	},
1362	.probe		= pata_macio_attach,
1363	.remove		= pata_macio_detach,
1364#ifdef CONFIG_PM
1365	.suspend	= pata_macio_suspend,
1366	.resume		= pata_macio_resume,
1367#endif
1368#ifdef CONFIG_PMAC_MEDIABAY
1369	.mediabay_event	= pata_macio_mb_event,
1370#endif
1371};
1372
1373static const struct pci_device_id pata_macio_pci_match[] = {
1374	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA),	0 },
1375	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100),	0 },
1376	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100),	0 },
1377	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA),	0 },
1378	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA),	0 },
1379	{},
1380};
1381
1382static struct pci_driver pata_macio_pci_driver = {
1383	.name		= "pata-pci-macio",
1384	.id_table	= pata_macio_pci_match,
1385	.probe		= pata_macio_pci_attach,
1386	.remove		= pata_macio_pci_detach,
1387#ifdef CONFIG_PM
1388	.suspend	= pata_macio_pci_suspend,
1389	.resume		= pata_macio_pci_resume,
1390#endif
1391	.driver = {
1392		.owner		= THIS_MODULE,
1393	},
1394};
1395MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
1396
1397
1398static int __init pata_macio_init(void)
1399{
1400	int rc;
1401
1402	if (!machine_is(powermac))
1403		return -ENODEV;
1404
1405	rc = pci_register_driver(&pata_macio_pci_driver);
1406	if (rc)
1407		return rc;
1408	rc = macio_register_driver(&pata_macio_driver);
1409	if (rc) {
1410		pci_unregister_driver(&pata_macio_pci_driver);
1411		return rc;
1412	}
1413	return 0;
1414}
1415
1416static void __exit pata_macio_exit(void)
1417{
1418	macio_unregister_driver(&pata_macio_driver);
1419	pci_unregister_driver(&pata_macio_pci_driver);
1420}
1421
1422module_init(pata_macio_init);
1423module_exit(pata_macio_exit);
1424
1425MODULE_AUTHOR("Benjamin Herrenschmidt");
1426MODULE_DESCRIPTION("Apple MacIO PATA driver");
1427MODULE_LICENSE("GPL");
1428MODULE_VERSION(DRV_VERSION);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Libata based driver for Apple "macio" family of PATA controllers
   4 *
   5 * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
   6 *                     <benh@kernel.crashing.org>
   7 *
   8 * Some bits and pieces from drivers/ide/ppc/pmac.c
   9 *
  10 */
  11
  12#undef DEBUG
  13#undef DEBUG_DMA
  14
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/blkdev.h>
  19#include <linux/ata.h>
  20#include <linux/libata.h>
  21#include <linux/adb.h>
  22#include <linux/pmu.h>
  23#include <linux/scatterlist.h>
  24#include <linux/irqdomain.h>
  25#include <linux/of.h>
  26#include <linux/gfp.h>
  27#include <linux/pci.h>
  28
  29#include <scsi/scsi.h>
  30#include <scsi/scsi_host.h>
  31#include <scsi/scsi_device.h>
  32
  33#include <asm/macio.h>
  34#include <asm/io.h>
  35#include <asm/dbdma.h>
 
  36#include <asm/machdep.h>
  37#include <asm/pmac_feature.h>
  38#include <asm/mediabay.h>
  39
  40#ifdef DEBUG_DMA
  41#define dev_dbgdma(dev, format, arg...)		\
  42	dev_printk(KERN_DEBUG , dev , format , ## arg)
  43#else
  44#define dev_dbgdma(dev, format, arg...)		\
  45	({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
  46#endif
  47
  48#define DRV_NAME	"pata_macio"
  49#define DRV_VERSION	"0.9"
  50
  51/* Models of macio ATA controller */
  52enum {
  53	controller_ohare,	/* OHare based */
  54	controller_heathrow,	/* Heathrow/Paddington */
  55	controller_kl_ata3,	/* KeyLargo ATA-3 */
  56	controller_kl_ata4,	/* KeyLargo ATA-4 */
  57	controller_un_ata6,	/* UniNorth2 ATA-6 */
  58	controller_k2_ata6,	/* K2 ATA-6 */
  59	controller_sh_ata6,	/* Shasta ATA-6 */
  60};
  61
  62static const char* macio_ata_names[] = {
  63	"OHare ATA",		/* OHare based */
  64	"Heathrow ATA",		/* Heathrow/Paddington */
  65	"KeyLargo ATA-3",	/* KeyLargo ATA-3 (MDMA only) */
  66	"KeyLargo ATA-4",	/* KeyLargo ATA-4 (UDMA/66) */
  67	"UniNorth ATA-6",	/* UniNorth2 ATA-6 (UDMA/100) */
  68	"K2 ATA-6",		/* K2 ATA-6 (UDMA/100) */
  69	"Shasta ATA-6",		/* Shasta ATA-6 (UDMA/133) */
  70};
  71
  72/*
  73 * Extra registers, both 32-bit little-endian
  74 */
  75#define IDE_TIMING_CONFIG	0x200
  76#define IDE_INTERRUPT		0x300
  77
  78/* Kauai (U2) ATA has different register setup */
  79#define IDE_KAUAI_PIO_CONFIG	0x200
  80#define IDE_KAUAI_ULTRA_CONFIG	0x210
  81#define IDE_KAUAI_POLL_CONFIG	0x220
  82
  83/*
  84 * Timing configuration register definitions
  85 */
  86
  87/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
  88#define SYSCLK_TICKS(t)		(((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
  89#define SYSCLK_TICKS_66(t)	(((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
  90#define IDE_SYSCLK_NS		30	/* 33Mhz cell */
  91#define IDE_SYSCLK_66_NS	15	/* 66Mhz cell */
  92
  93/* 133Mhz cell, found in shasta.
  94 * See comments about 100 Mhz Uninorth 2...
  95 * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
  96 * weird and I don't now why .. at this stage
  97 */
  98#define TR_133_PIOREG_PIO_MASK		0xff000fff
  99#define TR_133_PIOREG_MDMA_MASK		0x00fff800
 100#define TR_133_UDMAREG_UDMA_MASK	0x0003ffff
 101#define TR_133_UDMAREG_UDMA_EN		0x00000001
 102
 103/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
 104 * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
 105 * controlled like gem or fw. It appears to be an evolution of keylargo
 106 * ATA4 with a timing register extended to 2x32bits registers (one
 107 * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
 108 * It has it's own local feature control register as well.
 109 *
 110 * After scratching my mind over the timing values, at least for PIO
 111 * and MDMA, I think I've figured the format of the timing register,
 112 * though I use pre-calculated tables for UDMA as usual...
 113 */
 114#define TR_100_PIO_ADDRSETUP_MASK	0xff000000 /* Size of field unknown */
 115#define TR_100_PIO_ADDRSETUP_SHIFT	24
 116#define TR_100_MDMA_MASK		0x00fff000
 117#define TR_100_MDMA_RECOVERY_MASK	0x00fc0000
 118#define TR_100_MDMA_RECOVERY_SHIFT	18
 119#define TR_100_MDMA_ACCESS_MASK		0x0003f000
 120#define TR_100_MDMA_ACCESS_SHIFT	12
 121#define TR_100_PIO_MASK			0xff000fff
 122#define TR_100_PIO_RECOVERY_MASK	0x00000fc0
 123#define TR_100_PIO_RECOVERY_SHIFT	6
 124#define TR_100_PIO_ACCESS_MASK		0x0000003f
 125#define TR_100_PIO_ACCESS_SHIFT		0
 126
 127#define TR_100_UDMAREG_UDMA_MASK	0x0000ffff
 128#define TR_100_UDMAREG_UDMA_EN		0x00000001
 129
 130
 131/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
 132 * 40 connector cable and to 4 on 80 connector one.
 133 * Clock unit is 15ns (66Mhz)
 134 *
 135 * 3 Values can be programmed:
 136 *  - Write data setup, which appears to match the cycle time. They
 137 *    also call it DIOW setup.
 138 *  - Ready to pause time (from spec)
 139 *  - Address setup. That one is weird. I don't see where exactly
 140 *    it fits in UDMA cycles, I got it's name from an obscure piece
 141 *    of commented out code in Darwin. They leave it to 0, we do as
 142 *    well, despite a comment that would lead to think it has a
 143 *    min value of 45ns.
 144 * Apple also add 60ns to the write data setup (or cycle time ?) on
 145 * reads.
 146 */
 147#define TR_66_UDMA_MASK			0xfff00000
 148#define TR_66_UDMA_EN			0x00100000 /* Enable Ultra mode for DMA */
 149#define TR_66_PIO_ADDRSETUP_MASK	0xe0000000 /* Address setup */
 150#define TR_66_PIO_ADDRSETUP_SHIFT	29
 151#define TR_66_UDMA_RDY2PAUS_MASK	0x1e000000 /* Ready 2 pause time */
 152#define TR_66_UDMA_RDY2PAUS_SHIFT	25
 153#define TR_66_UDMA_WRDATASETUP_MASK	0x01e00000 /* Write data setup time */
 154#define TR_66_UDMA_WRDATASETUP_SHIFT	21
 155#define TR_66_MDMA_MASK			0x000ffc00
 156#define TR_66_MDMA_RECOVERY_MASK	0x000f8000
 157#define TR_66_MDMA_RECOVERY_SHIFT	15
 158#define TR_66_MDMA_ACCESS_MASK		0x00007c00
 159#define TR_66_MDMA_ACCESS_SHIFT		10
 160#define TR_66_PIO_MASK			0xe00003ff
 161#define TR_66_PIO_RECOVERY_MASK		0x000003e0
 162#define TR_66_PIO_RECOVERY_SHIFT	5
 163#define TR_66_PIO_ACCESS_MASK		0x0000001f
 164#define TR_66_PIO_ACCESS_SHIFT		0
 165
 166/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
 167 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
 168 *
 169 * The access time and recovery time can be programmed. Some older
 170 * Darwin code base limit OHare to 150ns cycle time. I decided to do
 171 * the same here fore safety against broken old hardware ;)
 172 * The HalfTick bit, when set, adds half a clock (15ns) to the access
 173 * time and removes one from recovery. It's not supported on KeyLargo
 174 * implementation afaik. The E bit appears to be set for PIO mode 0 and
 175 * is used to reach long timings used in this mode.
 176 */
 177#define TR_33_MDMA_MASK			0x003ff800
 178#define TR_33_MDMA_RECOVERY_MASK	0x001f0000
 179#define TR_33_MDMA_RECOVERY_SHIFT	16
 180#define TR_33_MDMA_ACCESS_MASK		0x0000f800
 181#define TR_33_MDMA_ACCESS_SHIFT		11
 182#define TR_33_MDMA_HALFTICK		0x00200000
 183#define TR_33_PIO_MASK			0x000007ff
 184#define TR_33_PIO_E			0x00000400
 185#define TR_33_PIO_RECOVERY_MASK		0x000003e0
 186#define TR_33_PIO_RECOVERY_SHIFT	5
 187#define TR_33_PIO_ACCESS_MASK		0x0000001f
 188#define TR_33_PIO_ACCESS_SHIFT		0
 189
 190/*
 191 * Interrupt register definitions. Only present on newer cells
 192 * (Keylargo and later afaik) so we don't use it.
 193 */
 194#define IDE_INTR_DMA			0x80000000
 195#define IDE_INTR_DEVICE			0x40000000
 196
 197/*
 198 * FCR Register on Kauai. Not sure what bit 0x4 is  ...
 199 */
 200#define KAUAI_FCR_UATA_MAGIC		0x00000004
 201#define KAUAI_FCR_UATA_RESET_N		0x00000002
 202#define KAUAI_FCR_UATA_ENABLE		0x00000001
 203
 204
 205/* Allow up to 256 DBDMA commands per xfer */
 206#define MAX_DCMDS		256
 207
 208/* Don't let a DMA segment go all the way to 64K */
 209#define MAX_DBDMA_SEG		0xff00
 210
 211
 212/*
 213 * Wait 1s for disk to answer on IDE bus after a hard reset
 214 * of the device (via GPIO/FCR).
 215 *
 216 * Some devices seem to "pollute" the bus even after dropping
 217 * the BSY bit (typically some combo drives slave on the UDMA
 218 * bus) after a hard reset. Since we hard reset all drives on
 219 * KeyLargo ATA66, we have to keep that delay around. I may end
 220 * up not hard resetting anymore on these and keep the delay only
 221 * for older interfaces instead (we have to reset when coming
 222 * from MacOS...) --BenH.
 223 */
 224#define IDE_WAKEUP_DELAY_MS	1000
 225
 226struct pata_macio_timing;
 227
 228struct pata_macio_priv {
 229	int				kind;
 230	int				aapl_bus_id;
 231	int				mediabay : 1;
 232	struct device_node		*node;
 233	struct macio_dev		*mdev;
 234	struct pci_dev			*pdev;
 235	struct device			*dev;
 236	int				irq;
 237	u32				treg[2][2];
 238	void __iomem			*tfregs;
 239	void __iomem			*kauai_fcr;
 240	struct dbdma_cmd *		dma_table_cpu;
 241	dma_addr_t			dma_table_dma;
 242	struct ata_host			*host;
 243	const struct pata_macio_timing	*timings;
 244};
 245
 246/* Previous variants of this driver used to calculate timings
 247 * for various variants of the chip and use tables for others.
 248 *
 249 * Not only was this confusing, but in addition, it isn't clear
 250 * whether our calculation code was correct. It didn't entirely
 251 * match the darwin code and whatever documentation I could find
 252 * on these cells
 253 *
 254 * I decided to entirely rely on a table instead for this version
 255 * of the driver. Also, because I don't really care about derated
 256 * modes and really old HW other than making it work, I'm not going
 257 * to calculate / snoop timing values for something else than the
 258 * standard modes.
 259 */
 260struct pata_macio_timing {
 261	int	mode;
 262	u32	reg1;	/* Bits to set in first timing reg */
 263	u32	reg2;	/* Bits to set in second timing reg */
 264};
 265
 266static const struct pata_macio_timing pata_macio_ohare_timings[] = {
 267	{ XFER_PIO_0,		0x00000526,	0, },
 268	{ XFER_PIO_1,		0x00000085,	0, },
 269	{ XFER_PIO_2,		0x00000025,	0, },
 270	{ XFER_PIO_3,		0x00000025,	0, },
 271	{ XFER_PIO_4,		0x00000025,	0, },
 272	{ XFER_MW_DMA_0,	0x00074000,	0, },
 273	{ XFER_MW_DMA_1,	0x00221000,	0, },
 274	{ XFER_MW_DMA_2,	0x00211000,	0, },
 275	{ -1, 0, 0 }
 276};
 277
 278static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
 279	{ XFER_PIO_0,		0x00000526,	0, },
 280	{ XFER_PIO_1,		0x00000085,	0, },
 281	{ XFER_PIO_2,		0x00000025,	0, },
 282	{ XFER_PIO_3,		0x00000025,	0, },
 283	{ XFER_PIO_4,		0x00000025,	0, },
 284	{ XFER_MW_DMA_0,	0x00074000,	0, },
 285	{ XFER_MW_DMA_1,	0x00221000,	0, },
 286	{ XFER_MW_DMA_2,	0x00211000,	0, },
 287	{ -1, 0, 0 }
 288};
 289
 290static const struct pata_macio_timing pata_macio_kl33_timings[] = {
 291	{ XFER_PIO_0,		0x00000526,	0, },
 292	{ XFER_PIO_1,		0x00000085,	0, },
 293	{ XFER_PIO_2,		0x00000025,	0, },
 294	{ XFER_PIO_3,		0x00000025,	0, },
 295	{ XFER_PIO_4,		0x00000025,	0, },
 296	{ XFER_MW_DMA_0,	0x00084000,	0, },
 297	{ XFER_MW_DMA_1,	0x00021800,	0, },
 298	{ XFER_MW_DMA_2,	0x00011800,	0, },
 299	{ -1, 0, 0 }
 300};
 301
 302static const struct pata_macio_timing pata_macio_kl66_timings[] = {
 303	{ XFER_PIO_0,		0x0000038c,	0, },
 304	{ XFER_PIO_1,		0x0000020a,	0, },
 305	{ XFER_PIO_2,		0x00000127,	0, },
 306	{ XFER_PIO_3,		0x000000c6,	0, },
 307	{ XFER_PIO_4,		0x00000065,	0, },
 308	{ XFER_MW_DMA_0,	0x00084000,	0, },
 309	{ XFER_MW_DMA_1,	0x00029800,	0, },
 310	{ XFER_MW_DMA_2,	0x00019400,	0, },
 311	{ XFER_UDMA_0,		0x19100000,	0, },
 312	{ XFER_UDMA_1,		0x14d00000,	0, },
 313	{ XFER_UDMA_2,		0x10900000,	0, },
 314	{ XFER_UDMA_3,		0x0c700000,	0, },
 315	{ XFER_UDMA_4,		0x0c500000,	0, },
 316	{ -1, 0, 0 }
 317};
 318
 319static const struct pata_macio_timing pata_macio_kauai_timings[] = {
 320	{ XFER_PIO_0,		0x08000a92,	0, },
 321	{ XFER_PIO_1,		0x0800060f,	0, },
 322	{ XFER_PIO_2,		0x0800038b,	0, },
 323	{ XFER_PIO_3,		0x05000249,	0, },
 324	{ XFER_PIO_4,		0x04000148,	0, },
 325	{ XFER_MW_DMA_0,	0x00618000,	0, },
 326	{ XFER_MW_DMA_1,	0x00209000,	0, },
 327	{ XFER_MW_DMA_2,	0x00148000,	0, },
 328	{ XFER_UDMA_0,		         0,	0x000070c1, },
 329	{ XFER_UDMA_1,		         0,	0x00005d81, },
 330	{ XFER_UDMA_2,		         0,	0x00004a61, },
 331	{ XFER_UDMA_3,		         0,	0x00003a51, },
 332	{ XFER_UDMA_4,		         0,	0x00002a31, },
 333	{ XFER_UDMA_5,		         0,	0x00002921, },
 334	{ -1, 0, 0 }
 335};
 336
 337static const struct pata_macio_timing pata_macio_shasta_timings[] = {
 338	{ XFER_PIO_0,		0x0a000c97,	0, },
 339	{ XFER_PIO_1,		0x07000712,	0, },
 340	{ XFER_PIO_2,		0x040003cd,	0, },
 341	{ XFER_PIO_3,		0x0500028b,	0, },
 342	{ XFER_PIO_4,		0x0400010a,	0, },
 343	{ XFER_MW_DMA_0,	0x00820800,	0, },
 344	{ XFER_MW_DMA_1,	0x0028b000,	0, },
 345	{ XFER_MW_DMA_2,	0x001ca000,	0, },
 346	{ XFER_UDMA_0,		         0,	0x00035901, },
 347	{ XFER_UDMA_1,		         0,	0x000348b1, },
 348	{ XFER_UDMA_2,		         0,	0x00033881, },
 349	{ XFER_UDMA_3,		         0,	0x00033861, },
 350	{ XFER_UDMA_4,		         0,	0x00033841, },
 351	{ XFER_UDMA_5,		         0,	0x00033031, },
 352	{ XFER_UDMA_6,		         0,	0x00033021, },
 353	{ -1, 0, 0 }
 354};
 355
 356static const struct pata_macio_timing *pata_macio_find_timing(
 357					    struct pata_macio_priv *priv,
 358					    int mode)
 359{
 360	int i;
 361
 362	for (i = 0; priv->timings[i].mode > 0; i++) {
 363		if (priv->timings[i].mode == mode)
 364			return &priv->timings[i];
 365	}
 366	return NULL;
 367}
 368
 369
 370static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
 371{
 372	struct pata_macio_priv *priv = ap->private_data;
 373	void __iomem *rbase = ap->ioaddr.cmd_addr;
 374
 375	if (priv->kind == controller_sh_ata6 ||
 376	    priv->kind == controller_un_ata6 ||
 377	    priv->kind == controller_k2_ata6) {
 378		writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
 379		writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
 380	} else
 381		writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
 382}
 383
 384static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
 385{
 386	ata_sff_dev_select(ap, device);
 387
 388	/* Apply timings */
 389	pata_macio_apply_timings(ap, device);
 390}
 391
 392static void pata_macio_set_timings(struct ata_port *ap,
 393				   struct ata_device *adev)
 394{
 395	struct pata_macio_priv *priv = ap->private_data;
 396	const struct pata_macio_timing *t;
 397
 398	dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
 399		adev->devno,
 400		adev->pio_mode,
 401		ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
 402		adev->dma_mode,
 403		ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
 404
 405	/* First clear timings */
 406	priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
 407
 408	/* Now get the PIO timings */
 409	t = pata_macio_find_timing(priv, adev->pio_mode);
 410	if (t == NULL) {
 411		dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
 412			 adev->pio_mode);
 413		t = pata_macio_find_timing(priv, XFER_PIO_0);
 414	}
 415	BUG_ON(t == NULL);
 416
 417	/* PIO timings only ever use the first treg */
 418	priv->treg[adev->devno][0] |= t->reg1;
 419
 420	/* Now get DMA timings */
 421	t = pata_macio_find_timing(priv, adev->dma_mode);
 422	if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
 423		dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
 424		t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
 425	}
 426	BUG_ON(t == NULL);
 427
 428	/* DMA timings can use both tregs */
 429	priv->treg[adev->devno][0] |= t->reg1;
 430	priv->treg[adev->devno][1] |= t->reg2;
 431
 432	dev_dbg(priv->dev, " -> %08x %08x\n",
 433		priv->treg[adev->devno][0],
 434		priv->treg[adev->devno][1]);
 435
 436	/* Apply to hardware */
 437	pata_macio_apply_timings(ap, adev->devno);
 438}
 439
 440/*
 441 * Blast some well known "safe" values to the timing registers at init or
 442 * wakeup from sleep time, before we do real calculation
 443 */
 444static void pata_macio_default_timings(struct pata_macio_priv *priv)
 445{
 446	unsigned int value, value2 = 0;
 447
 448	switch(priv->kind) {
 449		case controller_sh_ata6:
 450			value = 0x0a820c97;
 451			value2 = 0x00033031;
 452			break;
 453		case controller_un_ata6:
 454		case controller_k2_ata6:
 455			value = 0x08618a92;
 456			value2 = 0x00002921;
 457			break;
 458		case controller_kl_ata4:
 459			value = 0x0008438c;
 460			break;
 461		case controller_kl_ata3:
 462			value = 0x00084526;
 463			break;
 464		case controller_heathrow:
 465		case controller_ohare:
 466		default:
 467			value = 0x00074526;
 468			break;
 469	}
 470	priv->treg[0][0] = priv->treg[1][0] = value;
 471	priv->treg[0][1] = priv->treg[1][1] = value2;
 472}
 473
 474static int pata_macio_cable_detect(struct ata_port *ap)
 475{
 476	struct pata_macio_priv *priv = ap->private_data;
 477
 478	/* Get cable type from device-tree */
 479	if (priv->kind == controller_kl_ata4 ||
 480	    priv->kind == controller_un_ata6 ||
 481	    priv->kind == controller_k2_ata6 ||
 482	    priv->kind == controller_sh_ata6) {
 483		const char* cable = of_get_property(priv->node, "cable-type",
 484						    NULL);
 485		struct device_node *root = of_find_node_by_path("/");
 486		const char *model = of_get_property(root, "model", NULL);
 487
 488		of_node_put(root);
 489
 490		if (cable && !strncmp(cable, "80-", 3)) {
 491			/* Some drives fail to detect 80c cable in PowerBook
 492			 * These machine use proprietary short IDE cable
 493			 * anyway
 494			 */
 495			if (!strncmp(model, "PowerBook", 9))
 496				return ATA_CBL_PATA40_SHORT;
 497			else
 498				return ATA_CBL_PATA80;
 499		}
 500	}
 501
 502	/* G5's seem to have incorrect cable type in device-tree.
 503	 * Let's assume they always have a 80 conductor cable, this seem to
 504	 * be always the case unless the user mucked around
 505	 */
 506	if (of_device_is_compatible(priv->node, "K2-UATA") ||
 507	    of_device_is_compatible(priv->node, "shasta-ata"))
 508		return ATA_CBL_PATA80;
 509
 510	/* Anything else is 40 connectors */
 511	return ATA_CBL_PATA40;
 512}
 513
 514static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
 515{
 516	unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
 517	struct ata_port *ap = qc->ap;
 518	struct pata_macio_priv *priv = ap->private_data;
 519	struct scatterlist *sg;
 520	struct dbdma_cmd *table;
 521	unsigned int si, pi;
 522
 523	dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
 524		   __func__, qc, qc->flags, write, qc->dev->devno);
 525
 526	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 527		return AC_ERR_OK;
 528
 529	table = (struct dbdma_cmd *) priv->dma_table_cpu;
 530
 531	pi = 0;
 532	for_each_sg(qc->sg, sg, qc->n_elem, si) {
 533		u32 addr, sg_len, len;
 534
 535		/* determine if physical DMA addr spans 64K boundary.
 536		 * Note h/w doesn't support 64-bit, so we unconditionally
 537		 * truncate dma_addr_t to u32.
 538		 */
 539		addr = (u32) sg_dma_address(sg);
 540		sg_len = sg_dma_len(sg);
 541
 542		while (sg_len) {
 543			/* table overflow should never happen */
 544			BUG_ON (pi++ >= MAX_DCMDS);
 545
 546			len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
 547			table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
 548			table->req_count = cpu_to_le16(len);
 549			table->phy_addr = cpu_to_le32(addr);
 550			table->cmd_dep = 0;
 551			table->xfer_status = 0;
 552			table->res_count = 0;
 553			addr += len;
 554			sg_len -= len;
 555			++table;
 556		}
 557	}
 558
 559	/* Should never happen according to Tejun */
 560	BUG_ON(!pi);
 561
 562	/* Convert the last command to an input/output */
 563	table--;
 564	table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST);
 565	table++;
 566
 567	/* Add the stop command to the end of the list */
 568	memset(table, 0, sizeof(struct dbdma_cmd));
 569	table->command = cpu_to_le16(DBDMA_STOP);
 570
 571	dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
 572
 573	return AC_ERR_OK;
 574}
 575
 576
 577static void pata_macio_freeze(struct ata_port *ap)
 578{
 579	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 580
 581	if (dma_regs) {
 582		unsigned int timeout = 1000000;
 583
 584		/* Make sure DMA controller is stopped */
 585		writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
 586		while (--timeout && (readl(&dma_regs->status) & RUN))
 587			udelay(1);
 588	}
 589
 590	ata_sff_freeze(ap);
 591}
 592
 593
 594static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
 595{
 596	struct ata_port *ap = qc->ap;
 597	struct pata_macio_priv *priv = ap->private_data;
 598	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 599	int dev = qc->dev->devno;
 600
 601	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 602
 603	/* Make sure DMA commands updates are visible */
 604	writel(priv->dma_table_dma, &dma_regs->cmdptr);
 605
 606	/* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
 607	 * UDMA reads
 608	 */
 609	if (priv->kind == controller_kl_ata4 &&
 610	    (priv->treg[dev][0] & TR_66_UDMA_EN)) {
 611		void __iomem *rbase = ap->ioaddr.cmd_addr;
 612		u32 reg = priv->treg[dev][0];
 613
 614		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
 615			reg += 0x00800000;
 616		writel(reg, rbase + IDE_TIMING_CONFIG);
 617	}
 618
 619	/* issue r/w command */
 620	ap->ops->sff_exec_command(ap, &qc->tf);
 621}
 622
 623static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
 624{
 625	struct ata_port *ap = qc->ap;
 626	struct pata_macio_priv *priv = ap->private_data;
 627	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 628
 629	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 630
 631	writel((RUN << 16) | RUN, &dma_regs->control);
 632	/* Make sure it gets to the controller right now */
 633	(void)readl(&dma_regs->control);
 634}
 635
 636static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
 637{
 638	struct ata_port *ap = qc->ap;
 639	struct pata_macio_priv *priv = ap->private_data;
 640	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 641	unsigned int timeout = 1000000;
 642
 643	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 644
 645	/* Stop the DMA engine and wait for it to full halt */
 646	writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
 647	while (--timeout && (readl(&dma_regs->status) & RUN))
 648		udelay(1);
 649}
 650
 651static u8 pata_macio_bmdma_status(struct ata_port *ap)
 652{
 653	struct pata_macio_priv *priv = ap->private_data;
 654	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 655	u32 dstat, rstat = ATA_DMA_INTR;
 656	unsigned long timeout = 0;
 657
 658	dstat = readl(&dma_regs->status);
 659
 660	dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
 661
 662	/* We have two things to deal with here:
 663	 *
 664	 * - The dbdma won't stop if the command was started
 665	 * but completed with an error without transferring all
 666	 * datas. This happens when bad blocks are met during
 667	 * a multi-block transfer.
 668	 *
 669	 * - The dbdma fifo hasn't yet finished flushing to
 670	 * system memory when the disk interrupt occurs.
 
 671	 */
 672
 673	/* First check for errors */
 674	if ((dstat & (RUN|DEAD)) != RUN)
 675		rstat |= ATA_DMA_ERR;
 676
 677	/* If ACTIVE is cleared, the STOP command has been hit and
 678	 * the transfer is complete. If not, we have to flush the
 679	 * channel.
 680	 */
 681	if ((dstat & ACTIVE) == 0)
 682		return rstat;
 683
 684	dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
 685
 686	/* If dbdma didn't execute the STOP command yet, the
 687	 * active bit is still set. We consider that we aren't
 688	 * sharing interrupts (which is hopefully the case with
 689	 * those controllers) and so we just try to flush the
 690	 * channel for pending data in the fifo
 691	 */
 692	udelay(1);
 693	writel((FLUSH << 16) | FLUSH, &dma_regs->control);
 694	for (;;) {
 695		udelay(1);
 696		dstat = readl(&dma_regs->status);
 697		if ((dstat & FLUSH) == 0)
 698			break;
 699		if (++timeout > 1000) {
 700			dev_warn(priv->dev, "timeout flushing DMA\n");
 701			rstat |= ATA_DMA_ERR;
 702			break;
 703		}
 704	}
 705	return rstat;
 706}
 707
 708/* port_start is when we allocate the DMA command list */
 709static int pata_macio_port_start(struct ata_port *ap)
 710{
 711	struct pata_macio_priv *priv = ap->private_data;
 712
 713	if (ap->ioaddr.bmdma_addr == NULL)
 714		return 0;
 715
 716	/* Allocate space for the DBDMA commands.
 717	 *
 718	 * The +2 is +1 for the stop command and +1 to allow for
 719	 * aligning the start address to a multiple of 16 bytes.
 720	 */
 721	priv->dma_table_cpu =
 722		dmam_alloc_coherent(priv->dev,
 723				    (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
 724				    &priv->dma_table_dma, GFP_KERNEL);
 725	if (priv->dma_table_cpu == NULL) {
 726		dev_err(priv->dev, "Unable to allocate DMA command list\n");
 727		ap->ioaddr.bmdma_addr = NULL;
 728		ap->mwdma_mask = 0;
 729		ap->udma_mask = 0;
 730	}
 731	return 0;
 732}
 733
 734static void pata_macio_irq_clear(struct ata_port *ap)
 735{
 736	struct pata_macio_priv *priv = ap->private_data;
 737
 738	/* Nothing to do here */
 739
 740	dev_dbgdma(priv->dev, "%s\n", __func__);
 741}
 742
 743static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
 744{
 745	dev_dbg(priv->dev, "Enabling & resetting... \n");
 746
 747	if (priv->mediabay)
 748		return;
 749
 750	if (priv->kind == controller_ohare && !resume) {
 751		/* The code below is having trouble on some ohare machines
 752		 * (timing related ?). Until I can put my hand on one of these
 753		 * units, I keep the old way
 754		 */
 755		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
 756	} else {
 757		int rc;
 758
 759 		/* Reset and enable controller */
 760		rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 761					 priv->node, priv->aapl_bus_id, 1);
 762		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
 763				    priv->node, priv->aapl_bus_id, 1);
 764		msleep(10);
 765		/* Only bother waiting if there's a reset control */
 766		if (rc == 0) {
 767			ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 768					    priv->node, priv->aapl_bus_id, 0);
 769			msleep(IDE_WAKEUP_DELAY_MS);
 770		}
 771	}
 772
 773	/* If resuming a PCI device, restore the config space here */
 774	if (priv->pdev && resume) {
 775		int rc;
 776
 777		pci_restore_state(priv->pdev);
 778		rc = pcim_enable_device(priv->pdev);
 779		if (rc)
 780			dev_err(&priv->pdev->dev,
 781				"Failed to enable device after resume (%d)\n",
 782				rc);
 783		else
 784			pci_set_master(priv->pdev);
 785	}
 786
 787	/* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
 788	 * seem necessary and speeds up the boot process
 789	 */
 790	if (priv->kauai_fcr)
 791		writel(KAUAI_FCR_UATA_MAGIC |
 792		       KAUAI_FCR_UATA_RESET_N |
 793		       KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
 794}
 795
 796/* Hook the standard slave config to fixup some HW related alignment
 797 * restrictions
 798 */
 799static int pata_macio_slave_config(struct scsi_device *sdev)
 800{
 801	struct ata_port *ap = ata_shost_to_port(sdev->host);
 802	struct pata_macio_priv *priv = ap->private_data;
 803	struct ata_device *dev;
 804	u16 cmd;
 805	int rc;
 806
 807	/* First call original */
 808	rc = ata_scsi_slave_config(sdev);
 809	if (rc)
 810		return rc;
 811
 812	/* This is lifted from sata_nv */
 813	dev = &ap->link.device[sdev->id];
 814
 815	/* OHare has issues with non cache aligned DMA on some chipsets */
 816	if (priv->kind == controller_ohare) {
 817		blk_queue_update_dma_alignment(sdev->request_queue, 31);
 818		blk_queue_update_dma_pad(sdev->request_queue, 31);
 819
 820		/* Tell the world about it */
 821		ata_dev_info(dev, "OHare alignment limits applied\n");
 822		return 0;
 823	}
 824
 825	/* We only have issues with ATAPI */
 826	if (dev->class != ATA_DEV_ATAPI)
 827		return 0;
 828
 829	/* Shasta and K2 seem to have "issues" with reads ... */
 830	if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
 831		/* Allright these are bad, apply restrictions */
 832		blk_queue_update_dma_alignment(sdev->request_queue, 15);
 833		blk_queue_update_dma_pad(sdev->request_queue, 15);
 834
 835		/* We enable MWI and hack cache line size directly here, this
 836		 * is specific to this chipset and not normal values, we happen
 837		 * to somewhat know what we are doing here (which is basically
 838		 * to do the same Apple does and pray they did not get it wrong :-)
 839		 */
 840		BUG_ON(!priv->pdev);
 841		pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
 842		pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
 843		pci_write_config_word(priv->pdev, PCI_COMMAND,
 844				      cmd | PCI_COMMAND_INVALIDATE);
 845
 846		/* Tell the world about it */
 847		ata_dev_info(dev, "K2/Shasta alignment limits applied\n");
 848	}
 849
 850	return 0;
 851}
 852
 853#ifdef CONFIG_PM_SLEEP
 
 854static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
 855{
 
 
 856	/* First, core libata suspend to do most of the work */
 857	ata_host_suspend(priv->host, mesg);
 
 
 858
 859	/* Restore to default timings */
 860	pata_macio_default_timings(priv);
 861
 862	/* Mask interrupt. Not strictly necessary but old driver did
 863	 * it and I'd rather not change that here */
 864	disable_irq(priv->irq);
 865
 866	/* The media bay will handle itself just fine */
 867	if (priv->mediabay)
 868		return 0;
 869
 870	/* Kauai has bus control FCRs directly here */
 871	if (priv->kauai_fcr) {
 872		u32 fcr = readl(priv->kauai_fcr);
 873		fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
 874		writel(fcr, priv->kauai_fcr);
 875	}
 876
 877	/* For PCI, save state and disable DMA. No need to call
 878	 * pci_set_power_state(), the HW doesn't do D states that
 879	 * way, the platform code will take care of suspending the
 880	 * ASIC properly
 881	 */
 882	if (priv->pdev) {
 883		pci_save_state(priv->pdev);
 884		pci_disable_device(priv->pdev);
 885	}
 886
 887	/* Disable the bus on older machines and the cell on kauai */
 888	ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
 889			    priv->aapl_bus_id, 0);
 890
 891	return 0;
 892}
 893
 894static int pata_macio_do_resume(struct pata_macio_priv *priv)
 895{
 896	/* Reset and re-enable the HW */
 897	pata_macio_reset_hw(priv, 1);
 898
 899	/* Sanitize drive timings */
 900	pata_macio_apply_timings(priv->host->ports[0], 0);
 901
 902	/* We want our IRQ back ! */
 903	enable_irq(priv->irq);
 904
 905	/* Let the libata core take it from there */
 906	ata_host_resume(priv->host);
 907
 908	return 0;
 909}
 910#endif /* CONFIG_PM_SLEEP */
 911
 912static const struct scsi_host_template pata_macio_sht = {
 913	__ATA_BASE_SHT(DRV_NAME),
 
 
 914	.sg_tablesize		= MAX_DCMDS,
 915	/* We may not need that strict one */
 916	.dma_boundary		= ATA_DMA_BOUNDARY,
 917	/* Not sure what the real max is but we know it's less than 64K, let's
 918	 * use 64K minus 256
 919	 */
 920	.max_segment_size	= MAX_DBDMA_SEG,
 921	.slave_configure	= pata_macio_slave_config,
 922	.sdev_groups		= ata_common_sdev_groups,
 923	.can_queue		= ATA_DEF_QUEUE,
 924	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
 925};
 926
 927static struct ata_port_operations pata_macio_ops = {
 928	.inherits		= &ata_bmdma_port_ops,
 929
 930	.freeze			= pata_macio_freeze,
 931	.set_piomode		= pata_macio_set_timings,
 932	.set_dmamode		= pata_macio_set_timings,
 933	.cable_detect		= pata_macio_cable_detect,
 934	.sff_dev_select		= pata_macio_dev_select,
 935	.qc_prep		= pata_macio_qc_prep,
 936	.bmdma_setup		= pata_macio_bmdma_setup,
 937	.bmdma_start		= pata_macio_bmdma_start,
 938	.bmdma_stop		= pata_macio_bmdma_stop,
 939	.bmdma_status		= pata_macio_bmdma_status,
 940	.port_start		= pata_macio_port_start,
 941	.sff_irq_clear		= pata_macio_irq_clear,
 942};
 943
 944static void pata_macio_invariants(struct pata_macio_priv *priv)
 945{
 946	const int *bidp;
 947
 948	/* Identify the type of controller */
 949	if (of_device_is_compatible(priv->node, "shasta-ata")) {
 950		priv->kind = controller_sh_ata6;
 951	        priv->timings = pata_macio_shasta_timings;
 952	} else if (of_device_is_compatible(priv->node, "kauai-ata")) {
 953		priv->kind = controller_un_ata6;
 954	        priv->timings = pata_macio_kauai_timings;
 955	} else if (of_device_is_compatible(priv->node, "K2-UATA")) {
 956		priv->kind = controller_k2_ata6;
 957	        priv->timings = pata_macio_kauai_timings;
 958	} else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
 959		if (of_node_name_eq(priv->node, "ata-4")) {
 960			priv->kind = controller_kl_ata4;
 961			priv->timings = pata_macio_kl66_timings;
 962		} else {
 963			priv->kind = controller_kl_ata3;
 964			priv->timings = pata_macio_kl33_timings;
 965		}
 966	} else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
 967		priv->kind = controller_heathrow;
 968		priv->timings = pata_macio_heathrow_timings;
 969	} else {
 970		priv->kind = controller_ohare;
 971		priv->timings = pata_macio_ohare_timings;
 972	}
 973
 974	/* XXX FIXME --- setup priv->mediabay here */
 975
 976	/* Get Apple bus ID (for clock and ASIC control) */
 977	bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
 978	priv->aapl_bus_id =  bidp ? *bidp : 0;
 979
 980	/* Fixup missing Apple bus ID in case of media-bay */
 981	if (priv->mediabay && !bidp)
 982		priv->aapl_bus_id = 1;
 983}
 984
 985static void pata_macio_setup_ios(struct ata_ioports *ioaddr,
 986				 void __iomem * base, void __iomem * dma)
 
 987{
 988	/* cmd_addr is the base of regs for that port */
 989	ioaddr->cmd_addr	= base;
 990
 991	/* taskfile registers */
 992	ioaddr->data_addr	= base + (ATA_REG_DATA    << 4);
 993	ioaddr->error_addr	= base + (ATA_REG_ERR     << 4);
 994	ioaddr->feature_addr	= base + (ATA_REG_FEATURE << 4);
 995	ioaddr->nsect_addr	= base + (ATA_REG_NSECT   << 4);
 996	ioaddr->lbal_addr	= base + (ATA_REG_LBAL    << 4);
 997	ioaddr->lbam_addr	= base + (ATA_REG_LBAM    << 4);
 998	ioaddr->lbah_addr	= base + (ATA_REG_LBAH    << 4);
 999	ioaddr->device_addr	= base + (ATA_REG_DEVICE  << 4);
1000	ioaddr->status_addr	= base + (ATA_REG_STATUS  << 4);
1001	ioaddr->command_addr	= base + (ATA_REG_CMD     << 4);
1002	ioaddr->altstatus_addr	= base + 0x160;
1003	ioaddr->ctl_addr	= base + 0x160;
1004	ioaddr->bmdma_addr	= dma;
1005}
1006
1007static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
1008					 struct ata_port_info *pinfo)
1009{
1010	int i = 0;
1011
1012	pinfo->pio_mask		= 0;
1013	pinfo->mwdma_mask	= 0;
1014	pinfo->udma_mask	= 0;
1015
1016	while (priv->timings[i].mode > 0) {
1017		unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
1018		switch(priv->timings[i].mode & 0xf0) {
1019		case 0x00: /* PIO */
1020			pinfo->pio_mask |= (mask >> 8);
1021			break;
1022		case 0x20: /* MWDMA */
1023			pinfo->mwdma_mask |= mask;
1024			break;
1025		case 0x40: /* UDMA */
1026			pinfo->udma_mask |= mask;
1027			break;
1028		}
1029		i++;
1030	}
1031	dev_dbg(priv->dev, "Supported masks: PIO=%x, MWDMA=%x, UDMA=%x\n",
1032		pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
1033}
1034
1035static int pata_macio_common_init(struct pata_macio_priv *priv,
1036				  resource_size_t tfregs,
1037				  resource_size_t dmaregs,
1038				  resource_size_t fcregs,
1039				  unsigned long irq)
1040{
1041	struct ata_port_info		pinfo;
1042	const struct ata_port_info	*ppi[] = { &pinfo, NULL };
1043	void __iomem			*dma_regs = NULL;
1044
1045	/* Fill up privates with various invariants collected from the
1046	 * device-tree
1047	 */
1048	pata_macio_invariants(priv);
1049
1050	/* Make sure we have sane initial timings in the cache */
1051	pata_macio_default_timings(priv);
1052
 
 
 
 
 
1053	/* Allocate libata host for 1 port */
1054	memset(&pinfo, 0, sizeof(struct ata_port_info));
1055	pmac_macio_calc_timing_masks(priv, &pinfo);
1056	pinfo.flags		= ATA_FLAG_SLAVE_POSS;
1057	pinfo.port_ops		= &pata_macio_ops;
1058	pinfo.private_data	= priv;
1059
1060	priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
1061	if (priv->host == NULL) {
1062		dev_err(priv->dev, "Failed to allocate ATA port structure\n");
1063		return -ENOMEM;
1064	}
1065
1066	/* Setup the private data in host too */
1067	priv->host->private_data = priv;
1068
1069	/* Map base registers */
1070	priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
1071	if (priv->tfregs == NULL) {
1072		dev_err(priv->dev, "Failed to map ATA ports\n");
1073		return -ENOMEM;
1074	}
1075	priv->host->iomap = &priv->tfregs;
1076
1077	/* Map DMA regs */
1078	if (dmaregs != 0) {
1079		dma_regs = devm_ioremap(priv->dev, dmaregs,
1080					sizeof(struct dbdma_regs));
1081		if (dma_regs == NULL)
1082			dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
1083	}
1084
1085	/* If chip has local feature control, map those regs too */
1086	if (fcregs != 0) {
1087		priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
1088		if (priv->kauai_fcr == NULL) {
1089			dev_err(priv->dev, "Failed to map ATA FCR register\n");
1090			return -ENOMEM;
1091		}
1092	}
1093
1094	/* Setup port data structure */
1095	pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
1096			     priv->tfregs, dma_regs);
1097	priv->host->ports[0]->private_data = priv;
1098
1099	/* hard-reset the controller */
1100	pata_macio_reset_hw(priv, 0);
1101	pata_macio_apply_timings(priv->host->ports[0], 0);
1102
1103	/* Enable bus master if necessary */
1104	if (priv->pdev && dma_regs)
1105		pci_set_master(priv->pdev);
1106
1107	dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
1108		 macio_ata_names[priv->kind], priv->aapl_bus_id);
1109
1110	/* Start it up */
1111	priv->irq = irq;
1112	return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
1113				 &pata_macio_sht);
1114}
1115
1116static int pata_macio_attach(struct macio_dev *mdev,
1117			     const struct of_device_id *match)
1118{
1119	struct pata_macio_priv	*priv;
1120	resource_size_t		tfregs, dmaregs = 0;
1121	unsigned long		irq;
1122	int			rc;
1123
1124	/* Check for broken device-trees */
1125	if (macio_resource_count(mdev) == 0) {
1126		dev_err(&mdev->ofdev.dev,
1127			"No addresses for controller\n");
1128		return -ENXIO;
1129	}
1130
1131	/* Enable managed resources */
1132	macio_enable_devres(mdev);
1133
1134	/* Allocate and init private data structure */
1135	priv = devm_kzalloc(&mdev->ofdev.dev,
1136			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1137	if (!priv)
 
 
1138		return -ENOMEM;
1139
1140	priv->node = of_node_get(mdev->ofdev.dev.of_node);
1141	priv->mdev = mdev;
1142	priv->dev = &mdev->ofdev.dev;
1143
1144	/* Request memory resource for taskfile registers */
1145	if (macio_request_resource(mdev, 0, "pata-macio")) {
1146		dev_err(&mdev->ofdev.dev,
1147			"Cannot obtain taskfile resource\n");
1148		return -EBUSY;
1149	}
1150	tfregs = macio_resource_start(mdev, 0);
1151
1152	/* Request resources for DMA registers if any */
1153	if (macio_resource_count(mdev) >= 2) {
1154		if (macio_request_resource(mdev, 1, "pata-macio-dma"))
1155			dev_err(&mdev->ofdev.dev,
1156				"Cannot obtain DMA resource\n");
1157		else
1158			dmaregs = macio_resource_start(mdev, 1);
1159	}
1160
1161	/*
1162	 * Fixup missing IRQ for some old implementations with broken
1163	 * device-trees.
1164	 *
1165	 * This is a bit bogus, it should be fixed in the device-tree itself,
1166	 * via the existing macio fixups, based on the type of interrupt
1167	 * controller in the machine. However, I have no test HW for this case,
1168	 * and this trick works well enough on those old machines...
1169	 */
1170	if (macio_irq_count(mdev) == 0) {
1171		dev_warn(&mdev->ofdev.dev,
1172			 "No interrupts for controller, using 13\n");
1173		irq = irq_create_mapping(NULL, 13);
1174	} else
1175		irq = macio_irq(mdev, 0);
1176
1177	/* Prevvent media bay callbacks until fully registered */
1178	lock_media_bay(priv->mdev->media_bay);
1179
1180	/* Get register addresses and call common initialization */
1181	rc = pata_macio_common_init(priv,
1182				    tfregs,		/* Taskfile regs */
1183				    dmaregs,		/* DBDMA regs */
1184				    0,			/* Feature control */
1185				    irq);
1186	unlock_media_bay(priv->mdev->media_bay);
1187
1188	return rc;
1189}
1190
1191static int pata_macio_detach(struct macio_dev *mdev)
1192{
1193	struct ata_host *host = macio_get_drvdata(mdev);
1194	struct pata_macio_priv *priv = host->private_data;
1195
1196	lock_media_bay(priv->mdev->media_bay);
1197
1198	/* Make sure the mediabay callback doesn't try to access
1199	 * dead stuff
1200	 */
1201	priv->host->private_data = NULL;
1202
1203	ata_host_detach(host);
1204
1205	unlock_media_bay(priv->mdev->media_bay);
1206
1207	return 0;
1208}
1209
1210#ifdef CONFIG_PM_SLEEP
 
1211static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1212{
1213	struct ata_host *host = macio_get_drvdata(mdev);
1214
1215	return pata_macio_do_suspend(host->private_data, mesg);
1216}
1217
1218static int pata_macio_resume(struct macio_dev *mdev)
1219{
1220	struct ata_host *host = macio_get_drvdata(mdev);
1221
1222	return pata_macio_do_resume(host->private_data);
1223}
1224#endif /* CONFIG_PM_SLEEP */
 
1225
1226#ifdef CONFIG_PMAC_MEDIABAY
1227static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
1228{
1229	struct ata_host *host = macio_get_drvdata(mdev);
1230	struct ata_port *ap;
1231	struct ata_eh_info *ehi;
1232	struct ata_device *dev;
1233	unsigned long flags;
1234
1235	if (!host || !host->private_data)
1236		return;
1237	ap = host->ports[0];
1238	spin_lock_irqsave(ap->lock, flags);
1239	ehi = &ap->link.eh_info;
1240	if (mb_state == MB_CD) {
1241		ata_ehi_push_desc(ehi, "mediabay plug");
1242		ata_ehi_hotplugged(ehi);
1243		ata_port_freeze(ap);
1244	} else {
1245		ata_ehi_push_desc(ehi, "mediabay unplug");
1246		ata_for_each_dev(dev, &ap->link, ALL)
1247			dev->flags |= ATA_DFLAG_DETACH;
1248		ata_port_abort(ap);
1249	}
1250	spin_unlock_irqrestore(ap->lock, flags);
1251
1252}
1253#endif /* CONFIG_PMAC_MEDIABAY */
1254
1255
1256static int pata_macio_pci_attach(struct pci_dev *pdev,
1257				 const struct pci_device_id *id)
1258{
1259	struct pata_macio_priv	*priv;
1260	struct device_node	*np;
1261	resource_size_t		rbase;
1262
1263	/* We cannot use a MacIO controller without its OF device node */
1264	np = pci_device_to_OF_node(pdev);
1265	if (np == NULL) {
1266		dev_err(&pdev->dev,
1267			"Cannot find OF device node for controller\n");
1268		return -ENODEV;
1269	}
1270
1271	/* Check that it can be enabled */
1272	if (pcim_enable_device(pdev)) {
1273		dev_err(&pdev->dev,
1274			"Cannot enable controller PCI device\n");
1275		return -ENXIO;
1276	}
1277
1278	/* Allocate and init private data structure */
1279	priv = devm_kzalloc(&pdev->dev,
1280			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1281	if (!priv)
 
 
1282		return -ENOMEM;
1283
1284	priv->node = of_node_get(np);
1285	priv->pdev = pdev;
1286	priv->dev = &pdev->dev;
1287
1288	/* Get MMIO regions */
1289	if (pci_request_regions(pdev, "pata-macio")) {
1290		dev_err(&pdev->dev,
1291			"Cannot obtain PCI resources\n");
1292		return -EBUSY;
1293	}
1294
1295	/* Get register addresses and call common initialization */
1296	rbase = pci_resource_start(pdev, 0);
1297	if (pata_macio_common_init(priv,
1298				   rbase + 0x2000,	/* Taskfile regs */
1299				   rbase + 0x1000,	/* DBDMA regs */
1300				   rbase,		/* Feature control */
1301				   pdev->irq))
1302		return -ENXIO;
1303
1304	return 0;
1305}
1306
1307static void pata_macio_pci_detach(struct pci_dev *pdev)
1308{
1309	struct ata_host *host = pci_get_drvdata(pdev);
1310
1311	ata_host_detach(host);
1312}
1313
1314#ifdef CONFIG_PM_SLEEP
 
1315static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1316{
1317	struct ata_host *host = pci_get_drvdata(pdev);
1318
1319	return pata_macio_do_suspend(host->private_data, mesg);
1320}
1321
1322static int pata_macio_pci_resume(struct pci_dev *pdev)
1323{
1324	struct ata_host *host = pci_get_drvdata(pdev);
1325
1326	return pata_macio_do_resume(host->private_data);
1327}
1328#endif /* CONFIG_PM_SLEEP */
1329
1330static const struct of_device_id pata_macio_match[] =
 
 
1331{
1332	{ .name = "IDE", },
1333	{ .name = "ATA", },
1334	{ .type = "ide", },
1335	{ .type = "ata", },
1336	{ /* sentinel */ }
 
 
 
 
 
 
 
 
1337};
1338MODULE_DEVICE_TABLE(of, pata_macio_match);
1339
1340static struct macio_driver pata_macio_driver =
1341{
1342	.driver = {
1343		.name 		= "pata-macio",
1344		.owner		= THIS_MODULE,
1345		.of_match_table	= pata_macio_match,
1346	},
1347	.probe		= pata_macio_attach,
1348	.remove		= pata_macio_detach,
1349#ifdef CONFIG_PM_SLEEP
1350	.suspend	= pata_macio_suspend,
1351	.resume		= pata_macio_resume,
1352#endif
1353#ifdef CONFIG_PMAC_MEDIABAY
1354	.mediabay_event	= pata_macio_mb_event,
1355#endif
1356};
1357
1358static const struct pci_device_id pata_macio_pci_match[] = {
1359	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA),	0 },
1360	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100),	0 },
1361	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100),	0 },
1362	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA),	0 },
1363	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA),	0 },
1364	{},
1365};
1366
1367static struct pci_driver pata_macio_pci_driver = {
1368	.name		= "pata-pci-macio",
1369	.id_table	= pata_macio_pci_match,
1370	.probe		= pata_macio_pci_attach,
1371	.remove		= pata_macio_pci_detach,
1372#ifdef CONFIG_PM_SLEEP
1373	.suspend	= pata_macio_pci_suspend,
1374	.resume		= pata_macio_pci_resume,
1375#endif
1376	.driver = {
1377		.owner		= THIS_MODULE,
1378	},
1379};
1380MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
1381
1382
1383static int __init pata_macio_init(void)
1384{
1385	int rc;
1386
1387	if (!machine_is(powermac))
1388		return -ENODEV;
1389
1390	rc = pci_register_driver(&pata_macio_pci_driver);
1391	if (rc)
1392		return rc;
1393	rc = macio_register_driver(&pata_macio_driver);
1394	if (rc) {
1395		pci_unregister_driver(&pata_macio_pci_driver);
1396		return rc;
1397	}
1398	return 0;
1399}
1400
1401static void __exit pata_macio_exit(void)
1402{
1403	macio_unregister_driver(&pata_macio_driver);
1404	pci_unregister_driver(&pata_macio_pci_driver);
1405}
1406
1407module_init(pata_macio_init);
1408module_exit(pata_macio_exit);
1409
1410MODULE_AUTHOR("Benjamin Herrenschmidt");
1411MODULE_DESCRIPTION("Apple MacIO PATA driver");
1412MODULE_LICENSE("GPL");
1413MODULE_VERSION(DRV_VERSION);