Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Libata based driver for Apple "macio" family of PATA controllers
   4 *
   5 * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
   6 *                     <benh@kernel.crashing.org>
   7 *
   8 * Some bits and pieces from drivers/ide/ppc/pmac.c
   9 *
  10 */
  11
  12#undef DEBUG
  13#undef DEBUG_DMA
  14
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/blkdev.h>
  19#include <linux/ata.h>
  20#include <linux/libata.h>
  21#include <linux/adb.h>
  22#include <linux/pmu.h>
  23#include <linux/scatterlist.h>
  24#include <linux/of.h>
  25#include <linux/gfp.h>
  26#include <linux/pci.h>
  27
  28#include <scsi/scsi.h>
  29#include <scsi/scsi_host.h>
  30#include <scsi/scsi_device.h>
  31
  32#include <asm/macio.h>
  33#include <asm/io.h>
  34#include <asm/dbdma.h>
  35#include <asm/machdep.h>
  36#include <asm/pmac_feature.h>
  37#include <asm/mediabay.h>
  38
  39#ifdef DEBUG_DMA
  40#define dev_dbgdma(dev, format, arg...)		\
  41	dev_printk(KERN_DEBUG , dev , format , ## arg)
  42#else
  43#define dev_dbgdma(dev, format, arg...)		\
  44	({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
  45#endif
  46
  47#define DRV_NAME	"pata_macio"
  48#define DRV_VERSION	"0.9"
  49
  50/* Models of macio ATA controller */
  51enum {
  52	controller_ohare,	/* OHare based */
  53	controller_heathrow,	/* Heathrow/Paddington */
  54	controller_kl_ata3,	/* KeyLargo ATA-3 */
  55	controller_kl_ata4,	/* KeyLargo ATA-4 */
  56	controller_un_ata6,	/* UniNorth2 ATA-6 */
  57	controller_k2_ata6,	/* K2 ATA-6 */
  58	controller_sh_ata6,	/* Shasta ATA-6 */
  59};
  60
  61static const char* macio_ata_names[] = {
  62	"OHare ATA",		/* OHare based */
  63	"Heathrow ATA",		/* Heathrow/Paddington */
  64	"KeyLargo ATA-3",	/* KeyLargo ATA-3 (MDMA only) */
  65	"KeyLargo ATA-4",	/* KeyLargo ATA-4 (UDMA/66) */
  66	"UniNorth ATA-6",	/* UniNorth2 ATA-6 (UDMA/100) */
  67	"K2 ATA-6",		/* K2 ATA-6 (UDMA/100) */
  68	"Shasta ATA-6",		/* Shasta ATA-6 (UDMA/133) */
  69};
  70
  71/*
  72 * Extra registers, both 32-bit little-endian
  73 */
  74#define IDE_TIMING_CONFIG	0x200
  75#define IDE_INTERRUPT		0x300
  76
  77/* Kauai (U2) ATA has different register setup */
  78#define IDE_KAUAI_PIO_CONFIG	0x200
  79#define IDE_KAUAI_ULTRA_CONFIG	0x210
  80#define IDE_KAUAI_POLL_CONFIG	0x220
  81
  82/*
  83 * Timing configuration register definitions
  84 */
  85
  86/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
  87#define SYSCLK_TICKS(t)		(((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
  88#define SYSCLK_TICKS_66(t)	(((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
  89#define IDE_SYSCLK_NS		30	/* 33Mhz cell */
  90#define IDE_SYSCLK_66_NS	15	/* 66Mhz cell */
  91
  92/* 133Mhz cell, found in shasta.
  93 * See comments about 100 Mhz Uninorth 2...
  94 * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
  95 * weird and I don't now why .. at this stage
  96 */
  97#define TR_133_PIOREG_PIO_MASK		0xff000fff
  98#define TR_133_PIOREG_MDMA_MASK		0x00fff800
  99#define TR_133_UDMAREG_UDMA_MASK	0x0003ffff
 100#define TR_133_UDMAREG_UDMA_EN		0x00000001
 101
 102/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
 103 * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
 104 * controlled like gem or fw. It appears to be an evolution of keylargo
 105 * ATA4 with a timing register extended to 2x32bits registers (one
 106 * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
 107 * It has it's own local feature control register as well.
 108 *
 109 * After scratching my mind over the timing values, at least for PIO
 110 * and MDMA, I think I've figured the format of the timing register,
 111 * though I use pre-calculated tables for UDMA as usual...
 112 */
 113#define TR_100_PIO_ADDRSETUP_MASK	0xff000000 /* Size of field unknown */
 114#define TR_100_PIO_ADDRSETUP_SHIFT	24
 115#define TR_100_MDMA_MASK		0x00fff000
 116#define TR_100_MDMA_RECOVERY_MASK	0x00fc0000
 117#define TR_100_MDMA_RECOVERY_SHIFT	18
 118#define TR_100_MDMA_ACCESS_MASK		0x0003f000
 119#define TR_100_MDMA_ACCESS_SHIFT	12
 120#define TR_100_PIO_MASK			0xff000fff
 121#define TR_100_PIO_RECOVERY_MASK	0x00000fc0
 122#define TR_100_PIO_RECOVERY_SHIFT	6
 123#define TR_100_PIO_ACCESS_MASK		0x0000003f
 124#define TR_100_PIO_ACCESS_SHIFT		0
 125
 126#define TR_100_UDMAREG_UDMA_MASK	0x0000ffff
 127#define TR_100_UDMAREG_UDMA_EN		0x00000001
 128
 129
 130/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
 131 * 40 connector cable and to 4 on 80 connector one.
 132 * Clock unit is 15ns (66Mhz)
 133 *
 134 * 3 Values can be programmed:
 135 *  - Write data setup, which appears to match the cycle time. They
 136 *    also call it DIOW setup.
 137 *  - Ready to pause time (from spec)
 138 *  - Address setup. That one is weird. I don't see where exactly
 139 *    it fits in UDMA cycles, I got it's name from an obscure piece
 140 *    of commented out code in Darwin. They leave it to 0, we do as
 141 *    well, despite a comment that would lead to think it has a
 142 *    min value of 45ns.
 143 * Apple also add 60ns to the write data setup (or cycle time ?) on
 144 * reads.
 145 */
 146#define TR_66_UDMA_MASK			0xfff00000
 147#define TR_66_UDMA_EN			0x00100000 /* Enable Ultra mode for DMA */
 148#define TR_66_PIO_ADDRSETUP_MASK	0xe0000000 /* Address setup */
 149#define TR_66_PIO_ADDRSETUP_SHIFT	29
 150#define TR_66_UDMA_RDY2PAUS_MASK	0x1e000000 /* Ready 2 pause time */
 151#define TR_66_UDMA_RDY2PAUS_SHIFT	25
 152#define TR_66_UDMA_WRDATASETUP_MASK	0x01e00000 /* Write data setup time */
 153#define TR_66_UDMA_WRDATASETUP_SHIFT	21
 154#define TR_66_MDMA_MASK			0x000ffc00
 155#define TR_66_MDMA_RECOVERY_MASK	0x000f8000
 156#define TR_66_MDMA_RECOVERY_SHIFT	15
 157#define TR_66_MDMA_ACCESS_MASK		0x00007c00
 158#define TR_66_MDMA_ACCESS_SHIFT		10
 159#define TR_66_PIO_MASK			0xe00003ff
 160#define TR_66_PIO_RECOVERY_MASK		0x000003e0
 161#define TR_66_PIO_RECOVERY_SHIFT	5
 162#define TR_66_PIO_ACCESS_MASK		0x0000001f
 163#define TR_66_PIO_ACCESS_SHIFT		0
 164
 165/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
 166 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
 167 *
 168 * The access time and recovery time can be programmed. Some older
 169 * Darwin code base limit OHare to 150ns cycle time. I decided to do
 170 * the same here fore safety against broken old hardware ;)
 171 * The HalfTick bit, when set, adds half a clock (15ns) to the access
 172 * time and removes one from recovery. It's not supported on KeyLargo
 173 * implementation afaik. The E bit appears to be set for PIO mode 0 and
 174 * is used to reach long timings used in this mode.
 175 */
 176#define TR_33_MDMA_MASK			0x003ff800
 177#define TR_33_MDMA_RECOVERY_MASK	0x001f0000
 178#define TR_33_MDMA_RECOVERY_SHIFT	16
 179#define TR_33_MDMA_ACCESS_MASK		0x0000f800
 180#define TR_33_MDMA_ACCESS_SHIFT		11
 181#define TR_33_MDMA_HALFTICK		0x00200000
 182#define TR_33_PIO_MASK			0x000007ff
 183#define TR_33_PIO_E			0x00000400
 184#define TR_33_PIO_RECOVERY_MASK		0x000003e0
 185#define TR_33_PIO_RECOVERY_SHIFT	5
 186#define TR_33_PIO_ACCESS_MASK		0x0000001f
 187#define TR_33_PIO_ACCESS_SHIFT		0
 188
 189/*
 190 * Interrupt register definitions. Only present on newer cells
 191 * (Keylargo and later afaik) so we don't use it.
 192 */
 193#define IDE_INTR_DMA			0x80000000
 194#define IDE_INTR_DEVICE			0x40000000
 195
 196/*
 197 * FCR Register on Kauai. Not sure what bit 0x4 is  ...
 198 */
 199#define KAUAI_FCR_UATA_MAGIC		0x00000004
 200#define KAUAI_FCR_UATA_RESET_N		0x00000002
 201#define KAUAI_FCR_UATA_ENABLE		0x00000001
 202
 203
 204/* Allow up to 256 DBDMA commands per xfer */
 205#define MAX_DCMDS		256
 206
 207/* Don't let a DMA segment go all the way to 64K */
 208#define MAX_DBDMA_SEG		0xff00
 209
 210
 211/*
 212 * Wait 1s for disk to answer on IDE bus after a hard reset
 213 * of the device (via GPIO/FCR).
 214 *
 215 * Some devices seem to "pollute" the bus even after dropping
 216 * the BSY bit (typically some combo drives slave on the UDMA
 217 * bus) after a hard reset. Since we hard reset all drives on
 218 * KeyLargo ATA66, we have to keep that delay around. I may end
 219 * up not hard resetting anymore on these and keep the delay only
 220 * for older interfaces instead (we have to reset when coming
 221 * from MacOS...) --BenH.
 222 */
 223#define IDE_WAKEUP_DELAY_MS	1000
 224
 225struct pata_macio_timing;
 226
 227struct pata_macio_priv {
 228	int				kind;
 229	int				aapl_bus_id;
 230	int				mediabay : 1;
 231	struct device_node		*node;
 232	struct macio_dev		*mdev;
 233	struct pci_dev			*pdev;
 234	struct device			*dev;
 235	int				irq;
 236	u32				treg[2][2];
 237	void __iomem			*tfregs;
 238	void __iomem			*kauai_fcr;
 239	struct dbdma_cmd *		dma_table_cpu;
 240	dma_addr_t			dma_table_dma;
 241	struct ata_host			*host;
 242	const struct pata_macio_timing	*timings;
 243};
 244
 245/* Previous variants of this driver used to calculate timings
 246 * for various variants of the chip and use tables for others.
 247 *
 248 * Not only was this confusing, but in addition, it isn't clear
 249 * whether our calculation code was correct. It didn't entirely
 250 * match the darwin code and whatever documentation I could find
 251 * on these cells
 252 *
 253 * I decided to entirely rely on a table instead for this version
 254 * of the driver. Also, because I don't really care about derated
 255 * modes and really old HW other than making it work, I'm not going
 256 * to calculate / snoop timing values for something else than the
 257 * standard modes.
 258 */
 259struct pata_macio_timing {
 260	int	mode;
 261	u32	reg1;	/* Bits to set in first timing reg */
 262	u32	reg2;	/* Bits to set in second timing reg */
 263};
 264
 265static const struct pata_macio_timing pata_macio_ohare_timings[] = {
 266	{ XFER_PIO_0,		0x00000526,	0, },
 267	{ XFER_PIO_1,		0x00000085,	0, },
 268	{ XFER_PIO_2,		0x00000025,	0, },
 269	{ XFER_PIO_3,		0x00000025,	0, },
 270	{ XFER_PIO_4,		0x00000025,	0, },
 271	{ XFER_MW_DMA_0,	0x00074000,	0, },
 272	{ XFER_MW_DMA_1,	0x00221000,	0, },
 273	{ XFER_MW_DMA_2,	0x00211000,	0, },
 274	{ -1, 0, 0 }
 275};
 276
 277static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
 278	{ XFER_PIO_0,		0x00000526,	0, },
 279	{ XFER_PIO_1,		0x00000085,	0, },
 280	{ XFER_PIO_2,		0x00000025,	0, },
 281	{ XFER_PIO_3,		0x00000025,	0, },
 282	{ XFER_PIO_4,		0x00000025,	0, },
 283	{ XFER_MW_DMA_0,	0x00074000,	0, },
 284	{ XFER_MW_DMA_1,	0x00221000,	0, },
 285	{ XFER_MW_DMA_2,	0x00211000,	0, },
 286	{ -1, 0, 0 }
 287};
 288
 289static const struct pata_macio_timing pata_macio_kl33_timings[] = {
 290	{ XFER_PIO_0,		0x00000526,	0, },
 291	{ XFER_PIO_1,		0x00000085,	0, },
 292	{ XFER_PIO_2,		0x00000025,	0, },
 293	{ XFER_PIO_3,		0x00000025,	0, },
 294	{ XFER_PIO_4,		0x00000025,	0, },
 295	{ XFER_MW_DMA_0,	0x00084000,	0, },
 296	{ XFER_MW_DMA_1,	0x00021800,	0, },
 297	{ XFER_MW_DMA_2,	0x00011800,	0, },
 298	{ -1, 0, 0 }
 299};
 300
 301static const struct pata_macio_timing pata_macio_kl66_timings[] = {
 302	{ XFER_PIO_0,		0x0000038c,	0, },
 303	{ XFER_PIO_1,		0x0000020a,	0, },
 304	{ XFER_PIO_2,		0x00000127,	0, },
 305	{ XFER_PIO_3,		0x000000c6,	0, },
 306	{ XFER_PIO_4,		0x00000065,	0, },
 307	{ XFER_MW_DMA_0,	0x00084000,	0, },
 308	{ XFER_MW_DMA_1,	0x00029800,	0, },
 309	{ XFER_MW_DMA_2,	0x00019400,	0, },
 310	{ XFER_UDMA_0,		0x19100000,	0, },
 311	{ XFER_UDMA_1,		0x14d00000,	0, },
 312	{ XFER_UDMA_2,		0x10900000,	0, },
 313	{ XFER_UDMA_3,		0x0c700000,	0, },
 314	{ XFER_UDMA_4,		0x0c500000,	0, },
 315	{ -1, 0, 0 }
 316};
 317
 318static const struct pata_macio_timing pata_macio_kauai_timings[] = {
 319	{ XFER_PIO_0,		0x08000a92,	0, },
 320	{ XFER_PIO_1,		0x0800060f,	0, },
 321	{ XFER_PIO_2,		0x0800038b,	0, },
 322	{ XFER_PIO_3,		0x05000249,	0, },
 323	{ XFER_PIO_4,		0x04000148,	0, },
 324	{ XFER_MW_DMA_0,	0x00618000,	0, },
 325	{ XFER_MW_DMA_1,	0x00209000,	0, },
 326	{ XFER_MW_DMA_2,	0x00148000,	0, },
 327	{ XFER_UDMA_0,		         0,	0x000070c1, },
 328	{ XFER_UDMA_1,		         0,	0x00005d81, },
 329	{ XFER_UDMA_2,		         0,	0x00004a61, },
 330	{ XFER_UDMA_3,		         0,	0x00003a51, },
 331	{ XFER_UDMA_4,		         0,	0x00002a31, },
 332	{ XFER_UDMA_5,		         0,	0x00002921, },
 333	{ -1, 0, 0 }
 334};
 335
 336static const struct pata_macio_timing pata_macio_shasta_timings[] = {
 337	{ XFER_PIO_0,		0x0a000c97,	0, },
 338	{ XFER_PIO_1,		0x07000712,	0, },
 339	{ XFER_PIO_2,		0x040003cd,	0, },
 340	{ XFER_PIO_3,		0x0500028b,	0, },
 341	{ XFER_PIO_4,		0x0400010a,	0, },
 342	{ XFER_MW_DMA_0,	0x00820800,	0, },
 343	{ XFER_MW_DMA_1,	0x0028b000,	0, },
 344	{ XFER_MW_DMA_2,	0x001ca000,	0, },
 345	{ XFER_UDMA_0,		         0,	0x00035901, },
 346	{ XFER_UDMA_1,		         0,	0x000348b1, },
 347	{ XFER_UDMA_2,		         0,	0x00033881, },
 348	{ XFER_UDMA_3,		         0,	0x00033861, },
 349	{ XFER_UDMA_4,		         0,	0x00033841, },
 350	{ XFER_UDMA_5,		         0,	0x00033031, },
 351	{ XFER_UDMA_6,		         0,	0x00033021, },
 352	{ -1, 0, 0 }
 353};
 354
 355static const struct pata_macio_timing *pata_macio_find_timing(
 356					    struct pata_macio_priv *priv,
 357					    int mode)
 358{
 359	int i;
 360
 361	for (i = 0; priv->timings[i].mode > 0; i++) {
 362		if (priv->timings[i].mode == mode)
 363			return &priv->timings[i];
 364	}
 365	return NULL;
 366}
 367
 368
 369static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
 370{
 371	struct pata_macio_priv *priv = ap->private_data;
 372	void __iomem *rbase = ap->ioaddr.cmd_addr;
 373
 374	if (priv->kind == controller_sh_ata6 ||
 375	    priv->kind == controller_un_ata6 ||
 376	    priv->kind == controller_k2_ata6) {
 377		writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
 378		writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
 379	} else
 380		writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
 381}
 382
 383static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
 384{
 385	ata_sff_dev_select(ap, device);
 386
 387	/* Apply timings */
 388	pata_macio_apply_timings(ap, device);
 389}
 390
 391static void pata_macio_set_timings(struct ata_port *ap,
 392				   struct ata_device *adev)
 393{
 394	struct pata_macio_priv *priv = ap->private_data;
 395	const struct pata_macio_timing *t;
 396
 397	dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
 398		adev->devno,
 399		adev->pio_mode,
 400		ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
 401		adev->dma_mode,
 402		ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
 403
 404	/* First clear timings */
 405	priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
 406
 407	/* Now get the PIO timings */
 408	t = pata_macio_find_timing(priv, adev->pio_mode);
 409	if (t == NULL) {
 410		dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
 411			 adev->pio_mode);
 412		t = pata_macio_find_timing(priv, XFER_PIO_0);
 413	}
 414	BUG_ON(t == NULL);
 415
 416	/* PIO timings only ever use the first treg */
 417	priv->treg[adev->devno][0] |= t->reg1;
 418
 419	/* Now get DMA timings */
 420	t = pata_macio_find_timing(priv, adev->dma_mode);
 421	if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
 422		dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
 423		t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
 424	}
 425	BUG_ON(t == NULL);
 426
 427	/* DMA timings can use both tregs */
 428	priv->treg[adev->devno][0] |= t->reg1;
 429	priv->treg[adev->devno][1] |= t->reg2;
 430
 431	dev_dbg(priv->dev, " -> %08x %08x\n",
 432		priv->treg[adev->devno][0],
 433		priv->treg[adev->devno][1]);
 434
 435	/* Apply to hardware */
 436	pata_macio_apply_timings(ap, adev->devno);
 437}
 438
 439/*
 440 * Blast some well known "safe" values to the timing registers at init or
 441 * wakeup from sleep time, before we do real calculation
 442 */
 443static void pata_macio_default_timings(struct pata_macio_priv *priv)
 444{
 445	unsigned int value, value2 = 0;
 446
 447	switch(priv->kind) {
 448		case controller_sh_ata6:
 449			value = 0x0a820c97;
 450			value2 = 0x00033031;
 451			break;
 452		case controller_un_ata6:
 453		case controller_k2_ata6:
 454			value = 0x08618a92;
 455			value2 = 0x00002921;
 456			break;
 457		case controller_kl_ata4:
 458			value = 0x0008438c;
 459			break;
 460		case controller_kl_ata3:
 461			value = 0x00084526;
 462			break;
 463		case controller_heathrow:
 464		case controller_ohare:
 465		default:
 466			value = 0x00074526;
 467			break;
 468	}
 469	priv->treg[0][0] = priv->treg[1][0] = value;
 470	priv->treg[0][1] = priv->treg[1][1] = value2;
 471}
 472
 473static int pata_macio_cable_detect(struct ata_port *ap)
 474{
 475	struct pata_macio_priv *priv = ap->private_data;
 476
 477	/* Get cable type from device-tree */
 478	if (priv->kind == controller_kl_ata4 ||
 479	    priv->kind == controller_un_ata6 ||
 480	    priv->kind == controller_k2_ata6 ||
 481	    priv->kind == controller_sh_ata6) {
 482		const char* cable = of_get_property(priv->node, "cable-type",
 483						    NULL);
 484		struct device_node *root = of_find_node_by_path("/");
 485		const char *model = of_get_property(root, "model", NULL);
 486
 487		of_node_put(root);
 488
 489		if (cable && !strncmp(cable, "80-", 3)) {
 490			/* Some drives fail to detect 80c cable in PowerBook
 491			 * These machine use proprietary short IDE cable
 492			 * anyway
 493			 */
 494			if (!strncmp(model, "PowerBook", 9))
 495				return ATA_CBL_PATA40_SHORT;
 496			else
 497				return ATA_CBL_PATA80;
 498		}
 499	}
 500
 501	/* G5's seem to have incorrect cable type in device-tree.
 502	 * Let's assume they always have a 80 conductor cable, this seem to
 503	 * be always the case unless the user mucked around
 504	 */
 505	if (of_device_is_compatible(priv->node, "K2-UATA") ||
 506	    of_device_is_compatible(priv->node, "shasta-ata"))
 507		return ATA_CBL_PATA80;
 508
 509	/* Anything else is 40 connectors */
 510	return ATA_CBL_PATA40;
 511}
 512
 513static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
 514{
 515	unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
 516	struct ata_port *ap = qc->ap;
 517	struct pata_macio_priv *priv = ap->private_data;
 518	struct scatterlist *sg;
 519	struct dbdma_cmd *table;
 520	unsigned int si, pi;
 521
 522	dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
 523		   __func__, qc, qc->flags, write, qc->dev->devno);
 524
 525	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 526		return AC_ERR_OK;
 527
 528	table = (struct dbdma_cmd *) priv->dma_table_cpu;
 529
 530	pi = 0;
 531	for_each_sg(qc->sg, sg, qc->n_elem, si) {
 532		u32 addr, sg_len, len;
 533
 534		/* determine if physical DMA addr spans 64K boundary.
 535		 * Note h/w doesn't support 64-bit, so we unconditionally
 536		 * truncate dma_addr_t to u32.
 537		 */
 538		addr = (u32) sg_dma_address(sg);
 539		sg_len = sg_dma_len(sg);
 540
 541		while (sg_len) {
 542			/* table overflow should never happen */
 543			BUG_ON (pi++ >= MAX_DCMDS);
 544
 545			len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
 546			table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
 547			table->req_count = cpu_to_le16(len);
 548			table->phy_addr = cpu_to_le32(addr);
 549			table->cmd_dep = 0;
 550			table->xfer_status = 0;
 551			table->res_count = 0;
 552			addr += len;
 553			sg_len -= len;
 554			++table;
 555		}
 556	}
 557
 558	/* Should never happen according to Tejun */
 559	BUG_ON(!pi);
 560
 561	/* Convert the last command to an input/output */
 562	table--;
 563	table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST);
 564	table++;
 565
 566	/* Add the stop command to the end of the list */
 567	memset(table, 0, sizeof(struct dbdma_cmd));
 568	table->command = cpu_to_le16(DBDMA_STOP);
 569
 570	dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
 571
 572	return AC_ERR_OK;
 573}
 574
 575
 576static void pata_macio_freeze(struct ata_port *ap)
 577{
 578	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 579
 580	if (dma_regs) {
 581		unsigned int timeout = 1000000;
 582
 583		/* Make sure DMA controller is stopped */
 584		writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
 585		while (--timeout && (readl(&dma_regs->status) & RUN))
 586			udelay(1);
 587	}
 588
 589	ata_sff_freeze(ap);
 590}
 591
 592
 593static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
 594{
 595	struct ata_port *ap = qc->ap;
 596	struct pata_macio_priv *priv = ap->private_data;
 597	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 598	int dev = qc->dev->devno;
 599
 600	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 601
 602	/* Make sure DMA commands updates are visible */
 603	writel(priv->dma_table_dma, &dma_regs->cmdptr);
 604
 605	/* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
 606	 * UDMA reads
 607	 */
 608	if (priv->kind == controller_kl_ata4 &&
 609	    (priv->treg[dev][0] & TR_66_UDMA_EN)) {
 610		void __iomem *rbase = ap->ioaddr.cmd_addr;
 611		u32 reg = priv->treg[dev][0];
 612
 613		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
 614			reg += 0x00800000;
 615		writel(reg, rbase + IDE_TIMING_CONFIG);
 616	}
 617
 618	/* issue r/w command */
 619	ap->ops->sff_exec_command(ap, &qc->tf);
 620}
 621
 622static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
 623{
 624	struct ata_port *ap = qc->ap;
 625	struct pata_macio_priv *priv = ap->private_data;
 626	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 627
 628	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 629
 630	writel((RUN << 16) | RUN, &dma_regs->control);
 631	/* Make sure it gets to the controller right now */
 632	(void)readl(&dma_regs->control);
 633}
 634
 635static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
 636{
 637	struct ata_port *ap = qc->ap;
 638	struct pata_macio_priv *priv = ap->private_data;
 639	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 640	unsigned int timeout = 1000000;
 641
 642	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 643
 644	/* Stop the DMA engine and wait for it to full halt */
 645	writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
 646	while (--timeout && (readl(&dma_regs->status) & RUN))
 647		udelay(1);
 648}
 649
 650static u8 pata_macio_bmdma_status(struct ata_port *ap)
 651{
 652	struct pata_macio_priv *priv = ap->private_data;
 653	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 654	u32 dstat, rstat = ATA_DMA_INTR;
 655	unsigned long timeout = 0;
 656
 657	dstat = readl(&dma_regs->status);
 658
 659	dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
 660
 661	/* We have two things to deal with here:
 662	 *
 663	 * - The dbdma won't stop if the command was started
 664	 * but completed with an error without transferring all
 665	 * datas. This happens when bad blocks are met during
 666	 * a multi-block transfer.
 667	 *
 668	 * - The dbdma fifo hasn't yet finished flushing to
 669	 * system memory when the disk interrupt occurs.
 
 670	 */
 671
 672	/* First check for errors */
 673	if ((dstat & (RUN|DEAD)) != RUN)
 674		rstat |= ATA_DMA_ERR;
 675
 676	/* If ACTIVE is cleared, the STOP command has been hit and
 677	 * the transfer is complete. If not, we have to flush the
 678	 * channel.
 679	 */
 680	if ((dstat & ACTIVE) == 0)
 681		return rstat;
 682
 683	dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
 684
 685	/* If dbdma didn't execute the STOP command yet, the
 686	 * active bit is still set. We consider that we aren't
 687	 * sharing interrupts (which is hopefully the case with
 688	 * those controllers) and so we just try to flush the
 689	 * channel for pending data in the fifo
 690	 */
 691	udelay(1);
 692	writel((FLUSH << 16) | FLUSH, &dma_regs->control);
 693	for (;;) {
 694		udelay(1);
 695		dstat = readl(&dma_regs->status);
 696		if ((dstat & FLUSH) == 0)
 697			break;
 698		if (++timeout > 1000) {
 699			dev_warn(priv->dev, "timeout flushing DMA\n");
 700			rstat |= ATA_DMA_ERR;
 701			break;
 702		}
 703	}
 704	return rstat;
 705}
 706
 707/* port_start is when we allocate the DMA command list */
 708static int pata_macio_port_start(struct ata_port *ap)
 709{
 710	struct pata_macio_priv *priv = ap->private_data;
 711
 712	if (ap->ioaddr.bmdma_addr == NULL)
 713		return 0;
 714
 715	/* Allocate space for the DBDMA commands.
 716	 *
 717	 * The +2 is +1 for the stop command and +1 to allow for
 718	 * aligning the start address to a multiple of 16 bytes.
 719	 */
 720	priv->dma_table_cpu =
 721		dmam_alloc_coherent(priv->dev,
 722				    (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
 723				    &priv->dma_table_dma, GFP_KERNEL);
 724	if (priv->dma_table_cpu == NULL) {
 725		dev_err(priv->dev, "Unable to allocate DMA command list\n");
 726		ap->ioaddr.bmdma_addr = NULL;
 727		ap->mwdma_mask = 0;
 728		ap->udma_mask = 0;
 729	}
 730	return 0;
 731}
 732
 733static void pata_macio_irq_clear(struct ata_port *ap)
 734{
 735	struct pata_macio_priv *priv = ap->private_data;
 736
 737	/* Nothing to do here */
 738
 739	dev_dbgdma(priv->dev, "%s\n", __func__);
 740}
 741
 742static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
 743{
 744	dev_dbg(priv->dev, "Enabling & resetting... \n");
 745
 746	if (priv->mediabay)
 747		return;
 748
 749	if (priv->kind == controller_ohare && !resume) {
 750		/* The code below is having trouble on some ohare machines
 751		 * (timing related ?). Until I can put my hand on one of these
 752		 * units, I keep the old way
 753		 */
 754		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
 755	} else {
 756		int rc;
 757
 758 		/* Reset and enable controller */
 759		rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 760					 priv->node, priv->aapl_bus_id, 1);
 761		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
 762				    priv->node, priv->aapl_bus_id, 1);
 763		msleep(10);
 764		/* Only bother waiting if there's a reset control */
 765		if (rc == 0) {
 766			ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 767					    priv->node, priv->aapl_bus_id, 0);
 768			msleep(IDE_WAKEUP_DELAY_MS);
 769		}
 770	}
 771
 772	/* If resuming a PCI device, restore the config space here */
 773	if (priv->pdev && resume) {
 774		int rc;
 775
 776		pci_restore_state(priv->pdev);
 777		rc = pcim_enable_device(priv->pdev);
 778		if (rc)
 779			dev_err(&priv->pdev->dev,
 780				"Failed to enable device after resume (%d)\n",
 781				rc);
 782		else
 783			pci_set_master(priv->pdev);
 784	}
 785
 786	/* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
 787	 * seem necessary and speeds up the boot process
 788	 */
 789	if (priv->kauai_fcr)
 790		writel(KAUAI_FCR_UATA_MAGIC |
 791		       KAUAI_FCR_UATA_RESET_N |
 792		       KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
 793}
 794
 795/* Hook the standard slave config to fixup some HW related alignment
 796 * restrictions
 797 */
 798static int pata_macio_slave_config(struct scsi_device *sdev)
 799{
 800	struct ata_port *ap = ata_shost_to_port(sdev->host);
 801	struct pata_macio_priv *priv = ap->private_data;
 802	struct ata_device *dev;
 803	u16 cmd;
 804	int rc;
 805
 806	/* First call original */
 807	rc = ata_scsi_slave_config(sdev);
 808	if (rc)
 809		return rc;
 810
 811	/* This is lifted from sata_nv */
 812	dev = &ap->link.device[sdev->id];
 813
 814	/* OHare has issues with non cache aligned DMA on some chipsets */
 815	if (priv->kind == controller_ohare) {
 816		blk_queue_update_dma_alignment(sdev->request_queue, 31);
 817		blk_queue_update_dma_pad(sdev->request_queue, 31);
 818
 819		/* Tell the world about it */
 820		ata_dev_info(dev, "OHare alignment limits applied\n");
 821		return 0;
 822	}
 823
 824	/* We only have issues with ATAPI */
 825	if (dev->class != ATA_DEV_ATAPI)
 826		return 0;
 827
 828	/* Shasta and K2 seem to have "issues" with reads ... */
 829	if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
 830		/* Allright these are bad, apply restrictions */
 831		blk_queue_update_dma_alignment(sdev->request_queue, 15);
 832		blk_queue_update_dma_pad(sdev->request_queue, 15);
 833
 834		/* We enable MWI and hack cache line size directly here, this
 835		 * is specific to this chipset and not normal values, we happen
 836		 * to somewhat know what we are doing here (which is basically
 837		 * to do the same Apple does and pray they did not get it wrong :-)
 838		 */
 839		BUG_ON(!priv->pdev);
 840		pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
 841		pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
 842		pci_write_config_word(priv->pdev, PCI_COMMAND,
 843				      cmd | PCI_COMMAND_INVALIDATE);
 844
 845		/* Tell the world about it */
 846		ata_dev_info(dev, "K2/Shasta alignment limits applied\n");
 847	}
 848
 849	return 0;
 850}
 851
 852#ifdef CONFIG_PM_SLEEP
 853static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
 854{
 
 
 855	/* First, core libata suspend to do most of the work */
 856	ata_host_suspend(priv->host, mesg);
 
 
 857
 858	/* Restore to default timings */
 859	pata_macio_default_timings(priv);
 860
 861	/* Mask interrupt. Not strictly necessary but old driver did
 862	 * it and I'd rather not change that here */
 863	disable_irq(priv->irq);
 864
 865	/* The media bay will handle itself just fine */
 866	if (priv->mediabay)
 867		return 0;
 868
 869	/* Kauai has bus control FCRs directly here */
 870	if (priv->kauai_fcr) {
 871		u32 fcr = readl(priv->kauai_fcr);
 872		fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
 873		writel(fcr, priv->kauai_fcr);
 874	}
 875
 876	/* For PCI, save state and disable DMA. No need to call
 877	 * pci_set_power_state(), the HW doesn't do D states that
 878	 * way, the platform code will take care of suspending the
 879	 * ASIC properly
 880	 */
 881	if (priv->pdev) {
 882		pci_save_state(priv->pdev);
 883		pci_disable_device(priv->pdev);
 884	}
 885
 886	/* Disable the bus on older machines and the cell on kauai */
 887	ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
 888			    priv->aapl_bus_id, 0);
 889
 890	return 0;
 891}
 892
 893static int pata_macio_do_resume(struct pata_macio_priv *priv)
 894{
 895	/* Reset and re-enable the HW */
 896	pata_macio_reset_hw(priv, 1);
 897
 898	/* Sanitize drive timings */
 899	pata_macio_apply_timings(priv->host->ports[0], 0);
 900
 901	/* We want our IRQ back ! */
 902	enable_irq(priv->irq);
 903
 904	/* Let the libata core take it from there */
 905	ata_host_resume(priv->host);
 906
 907	return 0;
 908}
 909#endif /* CONFIG_PM_SLEEP */
 910
 911static struct scsi_host_template pata_macio_sht = {
 912	__ATA_BASE_SHT(DRV_NAME),
 913	.sg_tablesize		= MAX_DCMDS,
 914	/* We may not need that strict one */
 915	.dma_boundary		= ATA_DMA_BOUNDARY,
 916	/* Not sure what the real max is but we know it's less than 64K, let's
 917	 * use 64K minus 256
 918	 */
 919	.max_segment_size	= MAX_DBDMA_SEG,
 920	.slave_configure	= pata_macio_slave_config,
 921	.sdev_groups		= ata_common_sdev_groups,
 922	.can_queue		= ATA_DEF_QUEUE,
 923	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
 924};
 925
 926static struct ata_port_operations pata_macio_ops = {
 927	.inherits		= &ata_bmdma_port_ops,
 928
 929	.freeze			= pata_macio_freeze,
 930	.set_piomode		= pata_macio_set_timings,
 931	.set_dmamode		= pata_macio_set_timings,
 932	.cable_detect		= pata_macio_cable_detect,
 933	.sff_dev_select		= pata_macio_dev_select,
 934	.qc_prep		= pata_macio_qc_prep,
 935	.bmdma_setup		= pata_macio_bmdma_setup,
 936	.bmdma_start		= pata_macio_bmdma_start,
 937	.bmdma_stop		= pata_macio_bmdma_stop,
 938	.bmdma_status		= pata_macio_bmdma_status,
 939	.port_start		= pata_macio_port_start,
 940	.sff_irq_clear		= pata_macio_irq_clear,
 941};
 942
 943static void pata_macio_invariants(struct pata_macio_priv *priv)
 944{
 945	const int *bidp;
 946
 947	/* Identify the type of controller */
 948	if (of_device_is_compatible(priv->node, "shasta-ata")) {
 949		priv->kind = controller_sh_ata6;
 950	        priv->timings = pata_macio_shasta_timings;
 951	} else if (of_device_is_compatible(priv->node, "kauai-ata")) {
 952		priv->kind = controller_un_ata6;
 953	        priv->timings = pata_macio_kauai_timings;
 954	} else if (of_device_is_compatible(priv->node, "K2-UATA")) {
 955		priv->kind = controller_k2_ata6;
 956	        priv->timings = pata_macio_kauai_timings;
 957	} else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
 958		if (of_node_name_eq(priv->node, "ata-4")) {
 959			priv->kind = controller_kl_ata4;
 960			priv->timings = pata_macio_kl66_timings;
 961		} else {
 962			priv->kind = controller_kl_ata3;
 963			priv->timings = pata_macio_kl33_timings;
 964		}
 965	} else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
 966		priv->kind = controller_heathrow;
 967		priv->timings = pata_macio_heathrow_timings;
 968	} else {
 969		priv->kind = controller_ohare;
 970		priv->timings = pata_macio_ohare_timings;
 971	}
 972
 973	/* XXX FIXME --- setup priv->mediabay here */
 974
 975	/* Get Apple bus ID (for clock and ASIC control) */
 976	bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
 977	priv->aapl_bus_id =  bidp ? *bidp : 0;
 978
 979	/* Fixup missing Apple bus ID in case of media-bay */
 980	if (priv->mediabay && !bidp)
 981		priv->aapl_bus_id = 1;
 982}
 983
 984static void pata_macio_setup_ios(struct ata_ioports *ioaddr,
 985				 void __iomem * base, void __iomem * dma)
 986{
 987	/* cmd_addr is the base of regs for that port */
 988	ioaddr->cmd_addr	= base;
 989
 990	/* taskfile registers */
 991	ioaddr->data_addr	= base + (ATA_REG_DATA    << 4);
 992	ioaddr->error_addr	= base + (ATA_REG_ERR     << 4);
 993	ioaddr->feature_addr	= base + (ATA_REG_FEATURE << 4);
 994	ioaddr->nsect_addr	= base + (ATA_REG_NSECT   << 4);
 995	ioaddr->lbal_addr	= base + (ATA_REG_LBAL    << 4);
 996	ioaddr->lbam_addr	= base + (ATA_REG_LBAM    << 4);
 997	ioaddr->lbah_addr	= base + (ATA_REG_LBAH    << 4);
 998	ioaddr->device_addr	= base + (ATA_REG_DEVICE  << 4);
 999	ioaddr->status_addr	= base + (ATA_REG_STATUS  << 4);
1000	ioaddr->command_addr	= base + (ATA_REG_CMD     << 4);
1001	ioaddr->altstatus_addr	= base + 0x160;
1002	ioaddr->ctl_addr	= base + 0x160;
1003	ioaddr->bmdma_addr	= dma;
1004}
1005
1006static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
1007					 struct ata_port_info *pinfo)
1008{
1009	int i = 0;
1010
1011	pinfo->pio_mask		= 0;
1012	pinfo->mwdma_mask	= 0;
1013	pinfo->udma_mask	= 0;
1014
1015	while (priv->timings[i].mode > 0) {
1016		unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
1017		switch(priv->timings[i].mode & 0xf0) {
1018		case 0x00: /* PIO */
1019			pinfo->pio_mask |= (mask >> 8);
1020			break;
1021		case 0x20: /* MWDMA */
1022			pinfo->mwdma_mask |= mask;
1023			break;
1024		case 0x40: /* UDMA */
1025			pinfo->udma_mask |= mask;
1026			break;
1027		}
1028		i++;
1029	}
1030	dev_dbg(priv->dev, "Supported masks: PIO=%x, MWDMA=%x, UDMA=%x\n",
1031		pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
1032}
1033
1034static int pata_macio_common_init(struct pata_macio_priv *priv,
1035				  resource_size_t tfregs,
1036				  resource_size_t dmaregs,
1037				  resource_size_t fcregs,
1038				  unsigned long irq)
1039{
1040	struct ata_port_info		pinfo;
1041	const struct ata_port_info	*ppi[] = { &pinfo, NULL };
1042	void __iomem			*dma_regs = NULL;
1043
1044	/* Fill up privates with various invariants collected from the
1045	 * device-tree
1046	 */
1047	pata_macio_invariants(priv);
1048
1049	/* Make sure we have sane initial timings in the cache */
1050	pata_macio_default_timings(priv);
1051
 
 
 
 
 
1052	/* Allocate libata host for 1 port */
1053	memset(&pinfo, 0, sizeof(struct ata_port_info));
1054	pmac_macio_calc_timing_masks(priv, &pinfo);
1055	pinfo.flags		= ATA_FLAG_SLAVE_POSS;
1056	pinfo.port_ops		= &pata_macio_ops;
1057	pinfo.private_data	= priv;
1058
1059	priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
1060	if (priv->host == NULL) {
1061		dev_err(priv->dev, "Failed to allocate ATA port structure\n");
1062		return -ENOMEM;
1063	}
1064
1065	/* Setup the private data in host too */
1066	priv->host->private_data = priv;
1067
1068	/* Map base registers */
1069	priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
1070	if (priv->tfregs == NULL) {
1071		dev_err(priv->dev, "Failed to map ATA ports\n");
1072		return -ENOMEM;
1073	}
1074	priv->host->iomap = &priv->tfregs;
1075
1076	/* Map DMA regs */
1077	if (dmaregs != 0) {
1078		dma_regs = devm_ioremap(priv->dev, dmaregs,
1079					sizeof(struct dbdma_regs));
1080		if (dma_regs == NULL)
1081			dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
1082	}
1083
1084	/* If chip has local feature control, map those regs too */
1085	if (fcregs != 0) {
1086		priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
1087		if (priv->kauai_fcr == NULL) {
1088			dev_err(priv->dev, "Failed to map ATA FCR register\n");
1089			return -ENOMEM;
1090		}
1091	}
1092
1093	/* Setup port data structure */
1094	pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
1095			     priv->tfregs, dma_regs);
1096	priv->host->ports[0]->private_data = priv;
1097
1098	/* hard-reset the controller */
1099	pata_macio_reset_hw(priv, 0);
1100	pata_macio_apply_timings(priv->host->ports[0], 0);
1101
1102	/* Enable bus master if necessary */
1103	if (priv->pdev && dma_regs)
1104		pci_set_master(priv->pdev);
1105
1106	dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
1107		 macio_ata_names[priv->kind], priv->aapl_bus_id);
1108
1109	/* Start it up */
1110	priv->irq = irq;
1111	return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
1112				 &pata_macio_sht);
1113}
1114
1115static int pata_macio_attach(struct macio_dev *mdev,
1116			     const struct of_device_id *match)
1117{
1118	struct pata_macio_priv	*priv;
1119	resource_size_t		tfregs, dmaregs = 0;
1120	unsigned long		irq;
1121	int			rc;
1122
1123	/* Check for broken device-trees */
1124	if (macio_resource_count(mdev) == 0) {
1125		dev_err(&mdev->ofdev.dev,
1126			"No addresses for controller\n");
1127		return -ENXIO;
1128	}
1129
1130	/* Enable managed resources */
1131	macio_enable_devres(mdev);
1132
1133	/* Allocate and init private data structure */
1134	priv = devm_kzalloc(&mdev->ofdev.dev,
1135			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1136	if (!priv)
 
 
1137		return -ENOMEM;
1138
1139	priv->node = of_node_get(mdev->ofdev.dev.of_node);
1140	priv->mdev = mdev;
1141	priv->dev = &mdev->ofdev.dev;
1142
1143	/* Request memory resource for taskfile registers */
1144	if (macio_request_resource(mdev, 0, "pata-macio")) {
1145		dev_err(&mdev->ofdev.dev,
1146			"Cannot obtain taskfile resource\n");
1147		return -EBUSY;
1148	}
1149	tfregs = macio_resource_start(mdev, 0);
1150
1151	/* Request resources for DMA registers if any */
1152	if (macio_resource_count(mdev) >= 2) {
1153		if (macio_request_resource(mdev, 1, "pata-macio-dma"))
1154			dev_err(&mdev->ofdev.dev,
1155				"Cannot obtain DMA resource\n");
1156		else
1157			dmaregs = macio_resource_start(mdev, 1);
1158	}
1159
1160	/*
1161	 * Fixup missing IRQ for some old implementations with broken
1162	 * device-trees.
1163	 *
1164	 * This is a bit bogus, it should be fixed in the device-tree itself,
1165	 * via the existing macio fixups, based on the type of interrupt
1166	 * controller in the machine. However, I have no test HW for this case,
1167	 * and this trick works well enough on those old machines...
1168	 */
1169	if (macio_irq_count(mdev) == 0) {
1170		dev_warn(&mdev->ofdev.dev,
1171			 "No interrupts for controller, using 13\n");
1172		irq = irq_create_mapping(NULL, 13);
1173	} else
1174		irq = macio_irq(mdev, 0);
1175
1176	/* Prevvent media bay callbacks until fully registered */
1177	lock_media_bay(priv->mdev->media_bay);
1178
1179	/* Get register addresses and call common initialization */
1180	rc = pata_macio_common_init(priv,
1181				    tfregs,		/* Taskfile regs */
1182				    dmaregs,		/* DBDMA regs */
1183				    0,			/* Feature control */
1184				    irq);
1185	unlock_media_bay(priv->mdev->media_bay);
1186
1187	return rc;
1188}
1189
1190static int pata_macio_detach(struct macio_dev *mdev)
1191{
1192	struct ata_host *host = macio_get_drvdata(mdev);
1193	struct pata_macio_priv *priv = host->private_data;
1194
1195	lock_media_bay(priv->mdev->media_bay);
1196
1197	/* Make sure the mediabay callback doesn't try to access
1198	 * dead stuff
1199	 */
1200	priv->host->private_data = NULL;
1201
1202	ata_host_detach(host);
1203
1204	unlock_media_bay(priv->mdev->media_bay);
1205
1206	return 0;
1207}
1208
1209#ifdef CONFIG_PM_SLEEP
1210static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1211{
1212	struct ata_host *host = macio_get_drvdata(mdev);
1213
1214	return pata_macio_do_suspend(host->private_data, mesg);
1215}
1216
1217static int pata_macio_resume(struct macio_dev *mdev)
1218{
1219	struct ata_host *host = macio_get_drvdata(mdev);
1220
1221	return pata_macio_do_resume(host->private_data);
1222}
1223#endif /* CONFIG_PM_SLEEP */
1224
1225#ifdef CONFIG_PMAC_MEDIABAY
1226static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
1227{
1228	struct ata_host *host = macio_get_drvdata(mdev);
1229	struct ata_port *ap;
1230	struct ata_eh_info *ehi;
1231	struct ata_device *dev;
1232	unsigned long flags;
1233
1234	if (!host || !host->private_data)
1235		return;
1236	ap = host->ports[0];
1237	spin_lock_irqsave(ap->lock, flags);
1238	ehi = &ap->link.eh_info;
1239	if (mb_state == MB_CD) {
1240		ata_ehi_push_desc(ehi, "mediabay plug");
1241		ata_ehi_hotplugged(ehi);
1242		ata_port_freeze(ap);
1243	} else {
1244		ata_ehi_push_desc(ehi, "mediabay unplug");
1245		ata_for_each_dev(dev, &ap->link, ALL)
1246			dev->flags |= ATA_DFLAG_DETACH;
1247		ata_port_abort(ap);
1248	}
1249	spin_unlock_irqrestore(ap->lock, flags);
1250
1251}
1252#endif /* CONFIG_PMAC_MEDIABAY */
1253
1254
1255static int pata_macio_pci_attach(struct pci_dev *pdev,
1256				 const struct pci_device_id *id)
1257{
1258	struct pata_macio_priv	*priv;
1259	struct device_node	*np;
1260	resource_size_t		rbase;
1261
1262	/* We cannot use a MacIO controller without its OF device node */
1263	np = pci_device_to_OF_node(pdev);
1264	if (np == NULL) {
1265		dev_err(&pdev->dev,
1266			"Cannot find OF device node for controller\n");
1267		return -ENODEV;
1268	}
1269
1270	/* Check that it can be enabled */
1271	if (pcim_enable_device(pdev)) {
1272		dev_err(&pdev->dev,
1273			"Cannot enable controller PCI device\n");
1274		return -ENXIO;
1275	}
1276
1277	/* Allocate and init private data structure */
1278	priv = devm_kzalloc(&pdev->dev,
1279			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1280	if (!priv)
 
 
1281		return -ENOMEM;
1282
1283	priv->node = of_node_get(np);
1284	priv->pdev = pdev;
1285	priv->dev = &pdev->dev;
1286
1287	/* Get MMIO regions */
1288	if (pci_request_regions(pdev, "pata-macio")) {
1289		dev_err(&pdev->dev,
1290			"Cannot obtain PCI resources\n");
1291		return -EBUSY;
1292	}
1293
1294	/* Get register addresses and call common initialization */
1295	rbase = pci_resource_start(pdev, 0);
1296	if (pata_macio_common_init(priv,
1297				   rbase + 0x2000,	/* Taskfile regs */
1298				   rbase + 0x1000,	/* DBDMA regs */
1299				   rbase,		/* Feature control */
1300				   pdev->irq))
1301		return -ENXIO;
1302
1303	return 0;
1304}
1305
1306static void pata_macio_pci_detach(struct pci_dev *pdev)
1307{
1308	struct ata_host *host = pci_get_drvdata(pdev);
1309
1310	ata_host_detach(host);
1311}
1312
1313#ifdef CONFIG_PM_SLEEP
1314static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1315{
1316	struct ata_host *host = pci_get_drvdata(pdev);
1317
1318	return pata_macio_do_suspend(host->private_data, mesg);
1319}
1320
1321static int pata_macio_pci_resume(struct pci_dev *pdev)
1322{
1323	struct ata_host *host = pci_get_drvdata(pdev);
1324
1325	return pata_macio_do_resume(host->private_data);
1326}
1327#endif /* CONFIG_PM_SLEEP */
1328
1329static const struct of_device_id pata_macio_match[] =
1330{
1331	{ .name = "IDE", },
1332	{ .name = "ATA", },
1333	{ .type = "ide", },
1334	{ .type = "ata", },
1335	{ /* sentinel */ }
 
 
 
 
 
 
 
 
1336};
1337MODULE_DEVICE_TABLE(of, pata_macio_match);
1338
1339static struct macio_driver pata_macio_driver =
1340{
1341	.driver = {
1342		.name 		= "pata-macio",
1343		.owner		= THIS_MODULE,
1344		.of_match_table	= pata_macio_match,
1345	},
1346	.probe		= pata_macio_attach,
1347	.remove		= pata_macio_detach,
1348#ifdef CONFIG_PM_SLEEP
1349	.suspend	= pata_macio_suspend,
1350	.resume		= pata_macio_resume,
1351#endif
1352#ifdef CONFIG_PMAC_MEDIABAY
1353	.mediabay_event	= pata_macio_mb_event,
1354#endif
1355};
1356
1357static const struct pci_device_id pata_macio_pci_match[] = {
1358	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA),	0 },
1359	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100),	0 },
1360	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100),	0 },
1361	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA),	0 },
1362	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA),	0 },
1363	{},
1364};
1365
1366static struct pci_driver pata_macio_pci_driver = {
1367	.name		= "pata-pci-macio",
1368	.id_table	= pata_macio_pci_match,
1369	.probe		= pata_macio_pci_attach,
1370	.remove		= pata_macio_pci_detach,
1371#ifdef CONFIG_PM_SLEEP
1372	.suspend	= pata_macio_pci_suspend,
1373	.resume		= pata_macio_pci_resume,
1374#endif
1375	.driver = {
1376		.owner		= THIS_MODULE,
1377	},
1378};
1379MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
1380
1381
1382static int __init pata_macio_init(void)
1383{
1384	int rc;
1385
1386	if (!machine_is(powermac))
1387		return -ENODEV;
1388
1389	rc = pci_register_driver(&pata_macio_pci_driver);
1390	if (rc)
1391		return rc;
1392	rc = macio_register_driver(&pata_macio_driver);
1393	if (rc) {
1394		pci_unregister_driver(&pata_macio_pci_driver);
1395		return rc;
1396	}
1397	return 0;
1398}
1399
1400static void __exit pata_macio_exit(void)
1401{
1402	macio_unregister_driver(&pata_macio_driver);
1403	pci_unregister_driver(&pata_macio_pci_driver);
1404}
1405
1406module_init(pata_macio_init);
1407module_exit(pata_macio_exit);
1408
1409MODULE_AUTHOR("Benjamin Herrenschmidt");
1410MODULE_DESCRIPTION("Apple MacIO PATA driver");
1411MODULE_LICENSE("GPL");
1412MODULE_VERSION(DRV_VERSION);
v4.6
 
   1/*
   2 * Libata based driver for Apple "macio" family of PATA controllers
   3 *
   4 * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
   5 *                     <benh@kernel.crashing.org>
   6 *
   7 * Some bits and pieces from drivers/ide/ppc/pmac.c
   8 *
   9 */
  10
  11#undef DEBUG
  12#undef DEBUG_DMA
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/init.h>
  17#include <linux/blkdev.h>
  18#include <linux/ata.h>
  19#include <linux/libata.h>
  20#include <linux/adb.h>
  21#include <linux/pmu.h>
  22#include <linux/scatterlist.h>
  23#include <linux/of.h>
  24#include <linux/gfp.h>
  25#include <linux/pci.h>
  26
  27#include <scsi/scsi.h>
  28#include <scsi/scsi_host.h>
  29#include <scsi/scsi_device.h>
  30
  31#include <asm/macio.h>
  32#include <asm/io.h>
  33#include <asm/dbdma.h>
  34#include <asm/machdep.h>
  35#include <asm/pmac_feature.h>
  36#include <asm/mediabay.h>
  37
  38#ifdef DEBUG_DMA
  39#define dev_dbgdma(dev, format, arg...)		\
  40	dev_printk(KERN_DEBUG , dev , format , ## arg)
  41#else
  42#define dev_dbgdma(dev, format, arg...)		\
  43	({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
  44#endif
  45
  46#define DRV_NAME	"pata_macio"
  47#define DRV_VERSION	"0.9"
  48
  49/* Models of macio ATA controller */
  50enum {
  51	controller_ohare,	/* OHare based */
  52	controller_heathrow,	/* Heathrow/Paddington */
  53	controller_kl_ata3,	/* KeyLargo ATA-3 */
  54	controller_kl_ata4,	/* KeyLargo ATA-4 */
  55	controller_un_ata6,	/* UniNorth2 ATA-6 */
  56	controller_k2_ata6,	/* K2 ATA-6 */
  57	controller_sh_ata6,	/* Shasta ATA-6 */
  58};
  59
  60static const char* macio_ata_names[] = {
  61	"OHare ATA",		/* OHare based */
  62	"Heathrow ATA",		/* Heathrow/Paddington */
  63	"KeyLargo ATA-3",	/* KeyLargo ATA-3 (MDMA only) */
  64	"KeyLargo ATA-4",	/* KeyLargo ATA-4 (UDMA/66) */
  65	"UniNorth ATA-6",	/* UniNorth2 ATA-6 (UDMA/100) */
  66	"K2 ATA-6",		/* K2 ATA-6 (UDMA/100) */
  67	"Shasta ATA-6",		/* Shasta ATA-6 (UDMA/133) */
  68};
  69
  70/*
  71 * Extra registers, both 32-bit little-endian
  72 */
  73#define IDE_TIMING_CONFIG	0x200
  74#define IDE_INTERRUPT		0x300
  75
  76/* Kauai (U2) ATA has different register setup */
  77#define IDE_KAUAI_PIO_CONFIG	0x200
  78#define IDE_KAUAI_ULTRA_CONFIG	0x210
  79#define IDE_KAUAI_POLL_CONFIG	0x220
  80
  81/*
  82 * Timing configuration register definitions
  83 */
  84
  85/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
  86#define SYSCLK_TICKS(t)		(((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
  87#define SYSCLK_TICKS_66(t)	(((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
  88#define IDE_SYSCLK_NS		30	/* 33Mhz cell */
  89#define IDE_SYSCLK_66_NS	15	/* 66Mhz cell */
  90
  91/* 133Mhz cell, found in shasta.
  92 * See comments about 100 Mhz Uninorth 2...
  93 * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
  94 * weird and I don't now why .. at this stage
  95 */
  96#define TR_133_PIOREG_PIO_MASK		0xff000fff
  97#define TR_133_PIOREG_MDMA_MASK		0x00fff800
  98#define TR_133_UDMAREG_UDMA_MASK	0x0003ffff
  99#define TR_133_UDMAREG_UDMA_EN		0x00000001
 100
 101/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
 102 * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
 103 * controlled like gem or fw. It appears to be an evolution of keylargo
 104 * ATA4 with a timing register extended to 2x32bits registers (one
 105 * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
 106 * It has it's own local feature control register as well.
 107 *
 108 * After scratching my mind over the timing values, at least for PIO
 109 * and MDMA, I think I've figured the format of the timing register,
 110 * though I use pre-calculated tables for UDMA as usual...
 111 */
 112#define TR_100_PIO_ADDRSETUP_MASK	0xff000000 /* Size of field unknown */
 113#define TR_100_PIO_ADDRSETUP_SHIFT	24
 114#define TR_100_MDMA_MASK		0x00fff000
 115#define TR_100_MDMA_RECOVERY_MASK	0x00fc0000
 116#define TR_100_MDMA_RECOVERY_SHIFT	18
 117#define TR_100_MDMA_ACCESS_MASK		0x0003f000
 118#define TR_100_MDMA_ACCESS_SHIFT	12
 119#define TR_100_PIO_MASK			0xff000fff
 120#define TR_100_PIO_RECOVERY_MASK	0x00000fc0
 121#define TR_100_PIO_RECOVERY_SHIFT	6
 122#define TR_100_PIO_ACCESS_MASK		0x0000003f
 123#define TR_100_PIO_ACCESS_SHIFT		0
 124
 125#define TR_100_UDMAREG_UDMA_MASK	0x0000ffff
 126#define TR_100_UDMAREG_UDMA_EN		0x00000001
 127
 128
 129/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
 130 * 40 connector cable and to 4 on 80 connector one.
 131 * Clock unit is 15ns (66Mhz)
 132 *
 133 * 3 Values can be programmed:
 134 *  - Write data setup, which appears to match the cycle time. They
 135 *    also call it DIOW setup.
 136 *  - Ready to pause time (from spec)
 137 *  - Address setup. That one is weird. I don't see where exactly
 138 *    it fits in UDMA cycles, I got it's name from an obscure piece
 139 *    of commented out code in Darwin. They leave it to 0, we do as
 140 *    well, despite a comment that would lead to think it has a
 141 *    min value of 45ns.
 142 * Apple also add 60ns to the write data setup (or cycle time ?) on
 143 * reads.
 144 */
 145#define TR_66_UDMA_MASK			0xfff00000
 146#define TR_66_UDMA_EN			0x00100000 /* Enable Ultra mode for DMA */
 147#define TR_66_PIO_ADDRSETUP_MASK	0xe0000000 /* Address setup */
 148#define TR_66_PIO_ADDRSETUP_SHIFT	29
 149#define TR_66_UDMA_RDY2PAUS_MASK	0x1e000000 /* Ready 2 pause time */
 150#define TR_66_UDMA_RDY2PAUS_SHIFT	25
 151#define TR_66_UDMA_WRDATASETUP_MASK	0x01e00000 /* Write data setup time */
 152#define TR_66_UDMA_WRDATASETUP_SHIFT	21
 153#define TR_66_MDMA_MASK			0x000ffc00
 154#define TR_66_MDMA_RECOVERY_MASK	0x000f8000
 155#define TR_66_MDMA_RECOVERY_SHIFT	15
 156#define TR_66_MDMA_ACCESS_MASK		0x00007c00
 157#define TR_66_MDMA_ACCESS_SHIFT		10
 158#define TR_66_PIO_MASK			0xe00003ff
 159#define TR_66_PIO_RECOVERY_MASK		0x000003e0
 160#define TR_66_PIO_RECOVERY_SHIFT	5
 161#define TR_66_PIO_ACCESS_MASK		0x0000001f
 162#define TR_66_PIO_ACCESS_SHIFT		0
 163
 164/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
 165 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
 166 *
 167 * The access time and recovery time can be programmed. Some older
 168 * Darwin code base limit OHare to 150ns cycle time. I decided to do
 169 * the same here fore safety against broken old hardware ;)
 170 * The HalfTick bit, when set, adds half a clock (15ns) to the access
 171 * time and removes one from recovery. It's not supported on KeyLargo
 172 * implementation afaik. The E bit appears to be set for PIO mode 0 and
 173 * is used to reach long timings used in this mode.
 174 */
 175#define TR_33_MDMA_MASK			0x003ff800
 176#define TR_33_MDMA_RECOVERY_MASK	0x001f0000
 177#define TR_33_MDMA_RECOVERY_SHIFT	16
 178#define TR_33_MDMA_ACCESS_MASK		0x0000f800
 179#define TR_33_MDMA_ACCESS_SHIFT		11
 180#define TR_33_MDMA_HALFTICK		0x00200000
 181#define TR_33_PIO_MASK			0x000007ff
 182#define TR_33_PIO_E			0x00000400
 183#define TR_33_PIO_RECOVERY_MASK		0x000003e0
 184#define TR_33_PIO_RECOVERY_SHIFT	5
 185#define TR_33_PIO_ACCESS_MASK		0x0000001f
 186#define TR_33_PIO_ACCESS_SHIFT		0
 187
 188/*
 189 * Interrupt register definitions. Only present on newer cells
 190 * (Keylargo and later afaik) so we don't use it.
 191 */
 192#define IDE_INTR_DMA			0x80000000
 193#define IDE_INTR_DEVICE			0x40000000
 194
 195/*
 196 * FCR Register on Kauai. Not sure what bit 0x4 is  ...
 197 */
 198#define KAUAI_FCR_UATA_MAGIC		0x00000004
 199#define KAUAI_FCR_UATA_RESET_N		0x00000002
 200#define KAUAI_FCR_UATA_ENABLE		0x00000001
 201
 202
 203/* Allow up to 256 DBDMA commands per xfer */
 204#define MAX_DCMDS		256
 205
 206/* Don't let a DMA segment go all the way to 64K */
 207#define MAX_DBDMA_SEG		0xff00
 208
 209
 210/*
 211 * Wait 1s for disk to answer on IDE bus after a hard reset
 212 * of the device (via GPIO/FCR).
 213 *
 214 * Some devices seem to "pollute" the bus even after dropping
 215 * the BSY bit (typically some combo drives slave on the UDMA
 216 * bus) after a hard reset. Since we hard reset all drives on
 217 * KeyLargo ATA66, we have to keep that delay around. I may end
 218 * up not hard resetting anymore on these and keep the delay only
 219 * for older interfaces instead (we have to reset when coming
 220 * from MacOS...) --BenH.
 221 */
 222#define IDE_WAKEUP_DELAY_MS	1000
 223
 224struct pata_macio_timing;
 225
 226struct pata_macio_priv {
 227	int				kind;
 228	int				aapl_bus_id;
 229	int				mediabay : 1;
 230	struct device_node		*node;
 231	struct macio_dev		*mdev;
 232	struct pci_dev			*pdev;
 233	struct device			*dev;
 234	int				irq;
 235	u32				treg[2][2];
 236	void __iomem			*tfregs;
 237	void __iomem			*kauai_fcr;
 238	struct dbdma_cmd *		dma_table_cpu;
 239	dma_addr_t			dma_table_dma;
 240	struct ata_host			*host;
 241	const struct pata_macio_timing	*timings;
 242};
 243
 244/* Previous variants of this driver used to calculate timings
 245 * for various variants of the chip and use tables for others.
 246 *
 247 * Not only was this confusing, but in addition, it isn't clear
 248 * whether our calculation code was correct. It didn't entirely
 249 * match the darwin code and whatever documentation I could find
 250 * on these cells
 251 *
 252 * I decided to entirely rely on a table instead for this version
 253 * of the driver. Also, because I don't really care about derated
 254 * modes and really old HW other than making it work, I'm not going
 255 * to calculate / snoop timing values for something else than the
 256 * standard modes.
 257 */
 258struct pata_macio_timing {
 259	int	mode;
 260	u32	reg1;	/* Bits to set in first timing reg */
 261	u32	reg2;	/* Bits to set in second timing reg */
 262};
 263
 264static const struct pata_macio_timing pata_macio_ohare_timings[] = {
 265	{ XFER_PIO_0,		0x00000526,	0, },
 266	{ XFER_PIO_1,		0x00000085,	0, },
 267	{ XFER_PIO_2,		0x00000025,	0, },
 268	{ XFER_PIO_3,		0x00000025,	0, },
 269	{ XFER_PIO_4,		0x00000025,	0, },
 270	{ XFER_MW_DMA_0,	0x00074000,	0, },
 271	{ XFER_MW_DMA_1,	0x00221000,	0, },
 272	{ XFER_MW_DMA_2,	0x00211000,	0, },
 273	{ -1, 0, 0 }
 274};
 275
 276static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
 277	{ XFER_PIO_0,		0x00000526,	0, },
 278	{ XFER_PIO_1,		0x00000085,	0, },
 279	{ XFER_PIO_2,		0x00000025,	0, },
 280	{ XFER_PIO_3,		0x00000025,	0, },
 281	{ XFER_PIO_4,		0x00000025,	0, },
 282	{ XFER_MW_DMA_0,	0x00074000,	0, },
 283	{ XFER_MW_DMA_1,	0x00221000,	0, },
 284	{ XFER_MW_DMA_2,	0x00211000,	0, },
 285	{ -1, 0, 0 }
 286};
 287
 288static const struct pata_macio_timing pata_macio_kl33_timings[] = {
 289	{ XFER_PIO_0,		0x00000526,	0, },
 290	{ XFER_PIO_1,		0x00000085,	0, },
 291	{ XFER_PIO_2,		0x00000025,	0, },
 292	{ XFER_PIO_3,		0x00000025,	0, },
 293	{ XFER_PIO_4,		0x00000025,	0, },
 294	{ XFER_MW_DMA_0,	0x00084000,	0, },
 295	{ XFER_MW_DMA_1,	0x00021800,	0, },
 296	{ XFER_MW_DMA_2,	0x00011800,	0, },
 297	{ -1, 0, 0 }
 298};
 299
 300static const struct pata_macio_timing pata_macio_kl66_timings[] = {
 301	{ XFER_PIO_0,		0x0000038c,	0, },
 302	{ XFER_PIO_1,		0x0000020a,	0, },
 303	{ XFER_PIO_2,		0x00000127,	0, },
 304	{ XFER_PIO_3,		0x000000c6,	0, },
 305	{ XFER_PIO_4,		0x00000065,	0, },
 306	{ XFER_MW_DMA_0,	0x00084000,	0, },
 307	{ XFER_MW_DMA_1,	0x00029800,	0, },
 308	{ XFER_MW_DMA_2,	0x00019400,	0, },
 309	{ XFER_UDMA_0,		0x19100000,	0, },
 310	{ XFER_UDMA_1,		0x14d00000,	0, },
 311	{ XFER_UDMA_2,		0x10900000,	0, },
 312	{ XFER_UDMA_3,		0x0c700000,	0, },
 313	{ XFER_UDMA_4,		0x0c500000,	0, },
 314	{ -1, 0, 0 }
 315};
 316
 317static const struct pata_macio_timing pata_macio_kauai_timings[] = {
 318	{ XFER_PIO_0,		0x08000a92,	0, },
 319	{ XFER_PIO_1,		0x0800060f,	0, },
 320	{ XFER_PIO_2,		0x0800038b,	0, },
 321	{ XFER_PIO_3,		0x05000249,	0, },
 322	{ XFER_PIO_4,		0x04000148,	0, },
 323	{ XFER_MW_DMA_0,	0x00618000,	0, },
 324	{ XFER_MW_DMA_1,	0x00209000,	0, },
 325	{ XFER_MW_DMA_2,	0x00148000,	0, },
 326	{ XFER_UDMA_0,		         0,	0x000070c1, },
 327	{ XFER_UDMA_1,		         0,	0x00005d81, },
 328	{ XFER_UDMA_2,		         0,	0x00004a61, },
 329	{ XFER_UDMA_3,		         0,	0x00003a51, },
 330	{ XFER_UDMA_4,		         0,	0x00002a31, },
 331	{ XFER_UDMA_5,		         0,	0x00002921, },
 332	{ -1, 0, 0 }
 333};
 334
 335static const struct pata_macio_timing pata_macio_shasta_timings[] = {
 336	{ XFER_PIO_0,		0x0a000c97,	0, },
 337	{ XFER_PIO_1,		0x07000712,	0, },
 338	{ XFER_PIO_2,		0x040003cd,	0, },
 339	{ XFER_PIO_3,		0x0500028b,	0, },
 340	{ XFER_PIO_4,		0x0400010a,	0, },
 341	{ XFER_MW_DMA_0,	0x00820800,	0, },
 342	{ XFER_MW_DMA_1,	0x0028b000,	0, },
 343	{ XFER_MW_DMA_2,	0x001ca000,	0, },
 344	{ XFER_UDMA_0,		         0,	0x00035901, },
 345	{ XFER_UDMA_1,		         0,	0x000348b1, },
 346	{ XFER_UDMA_2,		         0,	0x00033881, },
 347	{ XFER_UDMA_3,		         0,	0x00033861, },
 348	{ XFER_UDMA_4,		         0,	0x00033841, },
 349	{ XFER_UDMA_5,		         0,	0x00033031, },
 350	{ XFER_UDMA_6,		         0,	0x00033021, },
 351	{ -1, 0, 0 }
 352};
 353
 354static const struct pata_macio_timing *pata_macio_find_timing(
 355					    struct pata_macio_priv *priv,
 356					    int mode)
 357{
 358	int i;
 359
 360	for (i = 0; priv->timings[i].mode > 0; i++) {
 361		if (priv->timings[i].mode == mode)
 362			return &priv->timings[i];
 363	}
 364	return NULL;
 365}
 366
 367
 368static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
 369{
 370	struct pata_macio_priv *priv = ap->private_data;
 371	void __iomem *rbase = ap->ioaddr.cmd_addr;
 372
 373	if (priv->kind == controller_sh_ata6 ||
 374	    priv->kind == controller_un_ata6 ||
 375	    priv->kind == controller_k2_ata6) {
 376		writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
 377		writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
 378	} else
 379		writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
 380}
 381
 382static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
 383{
 384	ata_sff_dev_select(ap, device);
 385
 386	/* Apply timings */
 387	pata_macio_apply_timings(ap, device);
 388}
 389
 390static void pata_macio_set_timings(struct ata_port *ap,
 391				   struct ata_device *adev)
 392{
 393	struct pata_macio_priv *priv = ap->private_data;
 394	const struct pata_macio_timing *t;
 395
 396	dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
 397		adev->devno,
 398		adev->pio_mode,
 399		ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
 400		adev->dma_mode,
 401		ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
 402
 403	/* First clear timings */
 404	priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
 405
 406	/* Now get the PIO timings */
 407	t = pata_macio_find_timing(priv, adev->pio_mode);
 408	if (t == NULL) {
 409		dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
 410			 adev->pio_mode);
 411		t = pata_macio_find_timing(priv, XFER_PIO_0);
 412	}
 413	BUG_ON(t == NULL);
 414
 415	/* PIO timings only ever use the first treg */
 416	priv->treg[adev->devno][0] |= t->reg1;
 417
 418	/* Now get DMA timings */
 419	t = pata_macio_find_timing(priv, adev->dma_mode);
 420	if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
 421		dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
 422		t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
 423	}
 424	BUG_ON(t == NULL);
 425
 426	/* DMA timings can use both tregs */
 427	priv->treg[adev->devno][0] |= t->reg1;
 428	priv->treg[adev->devno][1] |= t->reg2;
 429
 430	dev_dbg(priv->dev, " -> %08x %08x\n",
 431		priv->treg[adev->devno][0],
 432		priv->treg[adev->devno][1]);
 433
 434	/* Apply to hardware */
 435	pata_macio_apply_timings(ap, adev->devno);
 436}
 437
 438/*
 439 * Blast some well known "safe" values to the timing registers at init or
 440 * wakeup from sleep time, before we do real calculation
 441 */
 442static void pata_macio_default_timings(struct pata_macio_priv *priv)
 443{
 444	unsigned int value, value2 = 0;
 445
 446	switch(priv->kind) {
 447		case controller_sh_ata6:
 448			value = 0x0a820c97;
 449			value2 = 0x00033031;
 450			break;
 451		case controller_un_ata6:
 452		case controller_k2_ata6:
 453			value = 0x08618a92;
 454			value2 = 0x00002921;
 455			break;
 456		case controller_kl_ata4:
 457			value = 0x0008438c;
 458			break;
 459		case controller_kl_ata3:
 460			value = 0x00084526;
 461			break;
 462		case controller_heathrow:
 463		case controller_ohare:
 464		default:
 465			value = 0x00074526;
 466			break;
 467	}
 468	priv->treg[0][0] = priv->treg[1][0] = value;
 469	priv->treg[0][1] = priv->treg[1][1] = value2;
 470}
 471
 472static int pata_macio_cable_detect(struct ata_port *ap)
 473{
 474	struct pata_macio_priv *priv = ap->private_data;
 475
 476	/* Get cable type from device-tree */
 477	if (priv->kind == controller_kl_ata4 ||
 478	    priv->kind == controller_un_ata6 ||
 479	    priv->kind == controller_k2_ata6 ||
 480	    priv->kind == controller_sh_ata6) {
 481		const char* cable = of_get_property(priv->node, "cable-type",
 482						    NULL);
 483		struct device_node *root = of_find_node_by_path("/");
 484		const char *model = of_get_property(root, "model", NULL);
 485
 
 
 486		if (cable && !strncmp(cable, "80-", 3)) {
 487			/* Some drives fail to detect 80c cable in PowerBook
 488			 * These machine use proprietary short IDE cable
 489			 * anyway
 490			 */
 491			if (!strncmp(model, "PowerBook", 9))
 492				return ATA_CBL_PATA40_SHORT;
 493			else
 494				return ATA_CBL_PATA80;
 495		}
 496	}
 497
 498	/* G5's seem to have incorrect cable type in device-tree.
 499	 * Let's assume they always have a 80 conductor cable, this seem to
 500	 * be always the case unless the user mucked around
 501	 */
 502	if (of_device_is_compatible(priv->node, "K2-UATA") ||
 503	    of_device_is_compatible(priv->node, "shasta-ata"))
 504		return ATA_CBL_PATA80;
 505
 506	/* Anything else is 40 connectors */
 507	return ATA_CBL_PATA40;
 508}
 509
 510static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
 511{
 512	unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
 513	struct ata_port *ap = qc->ap;
 514	struct pata_macio_priv *priv = ap->private_data;
 515	struct scatterlist *sg;
 516	struct dbdma_cmd *table;
 517	unsigned int si, pi;
 518
 519	dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
 520		   __func__, qc, qc->flags, write, qc->dev->devno);
 521
 522	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 523		return;
 524
 525	table = (struct dbdma_cmd *) priv->dma_table_cpu;
 526
 527	pi = 0;
 528	for_each_sg(qc->sg, sg, qc->n_elem, si) {
 529		u32 addr, sg_len, len;
 530
 531		/* determine if physical DMA addr spans 64K boundary.
 532		 * Note h/w doesn't support 64-bit, so we unconditionally
 533		 * truncate dma_addr_t to u32.
 534		 */
 535		addr = (u32) sg_dma_address(sg);
 536		sg_len = sg_dma_len(sg);
 537
 538		while (sg_len) {
 539			/* table overflow should never happen */
 540			BUG_ON (pi++ >= MAX_DCMDS);
 541
 542			len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
 543			table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
 544			table->req_count = cpu_to_le16(len);
 545			table->phy_addr = cpu_to_le32(addr);
 546			table->cmd_dep = 0;
 547			table->xfer_status = 0;
 548			table->res_count = 0;
 549			addr += len;
 550			sg_len -= len;
 551			++table;
 552		}
 553	}
 554
 555	/* Should never happen according to Tejun */
 556	BUG_ON(!pi);
 557
 558	/* Convert the last command to an input/output */
 559	table--;
 560	table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST);
 561	table++;
 562
 563	/* Add the stop command to the end of the list */
 564	memset(table, 0, sizeof(struct dbdma_cmd));
 565	table->command = cpu_to_le16(DBDMA_STOP);
 566
 567	dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
 
 
 568}
 569
 570
 571static void pata_macio_freeze(struct ata_port *ap)
 572{
 573	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 574
 575	if (dma_regs) {
 576		unsigned int timeout = 1000000;
 577
 578		/* Make sure DMA controller is stopped */
 579		writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
 580		while (--timeout && (readl(&dma_regs->status) & RUN))
 581			udelay(1);
 582	}
 583
 584	ata_sff_freeze(ap);
 585}
 586
 587
 588static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
 589{
 590	struct ata_port *ap = qc->ap;
 591	struct pata_macio_priv *priv = ap->private_data;
 592	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 593	int dev = qc->dev->devno;
 594
 595	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 596
 597	/* Make sure DMA commands updates are visible */
 598	writel(priv->dma_table_dma, &dma_regs->cmdptr);
 599
 600	/* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
 601	 * UDMA reads
 602	 */
 603	if (priv->kind == controller_kl_ata4 &&
 604	    (priv->treg[dev][0] & TR_66_UDMA_EN)) {
 605		void __iomem *rbase = ap->ioaddr.cmd_addr;
 606		u32 reg = priv->treg[dev][0];
 607
 608		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
 609			reg += 0x00800000;
 610		writel(reg, rbase + IDE_TIMING_CONFIG);
 611	}
 612
 613	/* issue r/w command */
 614	ap->ops->sff_exec_command(ap, &qc->tf);
 615}
 616
 617static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
 618{
 619	struct ata_port *ap = qc->ap;
 620	struct pata_macio_priv *priv = ap->private_data;
 621	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 622
 623	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 624
 625	writel((RUN << 16) | RUN, &dma_regs->control);
 626	/* Make sure it gets to the controller right now */
 627	(void)readl(&dma_regs->control);
 628}
 629
 630static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
 631{
 632	struct ata_port *ap = qc->ap;
 633	struct pata_macio_priv *priv = ap->private_data;
 634	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 635	unsigned int timeout = 1000000;
 636
 637	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 638
 639	/* Stop the DMA engine and wait for it to full halt */
 640	writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
 641	while (--timeout && (readl(&dma_regs->status) & RUN))
 642		udelay(1);
 643}
 644
 645static u8 pata_macio_bmdma_status(struct ata_port *ap)
 646{
 647	struct pata_macio_priv *priv = ap->private_data;
 648	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 649	u32 dstat, rstat = ATA_DMA_INTR;
 650	unsigned long timeout = 0;
 651
 652	dstat = readl(&dma_regs->status);
 653
 654	dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
 655
 656	/* We have two things to deal with here:
 657	 *
 658	 * - The dbdma won't stop if the command was started
 659	 * but completed with an error without transferring all
 660	 * datas. This happens when bad blocks are met during
 661	 * a multi-block transfer.
 662	 *
 663	 * - The dbdma fifo hasn't yet finished flushing to
 664	 * to system memory when the disk interrupt occurs.
 665	 *
 666	 */
 667
 668	/* First check for errors */
 669	if ((dstat & (RUN|DEAD)) != RUN)
 670		rstat |= ATA_DMA_ERR;
 671
 672	/* If ACTIVE is cleared, the STOP command has been hit and
 673	 * the transfer is complete. If not, we have to flush the
 674	 * channel.
 675	 */
 676	if ((dstat & ACTIVE) == 0)
 677		return rstat;
 678
 679	dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
 680
 681	/* If dbdma didn't execute the STOP command yet, the
 682	 * active bit is still set. We consider that we aren't
 683	 * sharing interrupts (which is hopefully the case with
 684	 * those controllers) and so we just try to flush the
 685	 * channel for pending data in the fifo
 686	 */
 687	udelay(1);
 688	writel((FLUSH << 16) | FLUSH, &dma_regs->control);
 689	for (;;) {
 690		udelay(1);
 691		dstat = readl(&dma_regs->status);
 692		if ((dstat & FLUSH) == 0)
 693			break;
 694		if (++timeout > 1000) {
 695			dev_warn(priv->dev, "timeout flushing DMA\n");
 696			rstat |= ATA_DMA_ERR;
 697			break;
 698		}
 699	}
 700	return rstat;
 701}
 702
 703/* port_start is when we allocate the DMA command list */
 704static int pata_macio_port_start(struct ata_port *ap)
 705{
 706	struct pata_macio_priv *priv = ap->private_data;
 707
 708	if (ap->ioaddr.bmdma_addr == NULL)
 709		return 0;
 710
 711	/* Allocate space for the DBDMA commands.
 712	 *
 713	 * The +2 is +1 for the stop command and +1 to allow for
 714	 * aligning the start address to a multiple of 16 bytes.
 715	 */
 716	priv->dma_table_cpu =
 717		dmam_alloc_coherent(priv->dev,
 718				    (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
 719				    &priv->dma_table_dma, GFP_KERNEL);
 720	if (priv->dma_table_cpu == NULL) {
 721		dev_err(priv->dev, "Unable to allocate DMA command list\n");
 722		ap->ioaddr.bmdma_addr = NULL;
 723		ap->mwdma_mask = 0;
 724		ap->udma_mask = 0;
 725	}
 726	return 0;
 727}
 728
 729static void pata_macio_irq_clear(struct ata_port *ap)
 730{
 731	struct pata_macio_priv *priv = ap->private_data;
 732
 733	/* Nothing to do here */
 734
 735	dev_dbgdma(priv->dev, "%s\n", __func__);
 736}
 737
 738static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
 739{
 740	dev_dbg(priv->dev, "Enabling & resetting... \n");
 741
 742	if (priv->mediabay)
 743		return;
 744
 745	if (priv->kind == controller_ohare && !resume) {
 746		/* The code below is having trouble on some ohare machines
 747		 * (timing related ?). Until I can put my hand on one of these
 748		 * units, I keep the old way
 749		 */
 750		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
 751	} else {
 752		int rc;
 753
 754 		/* Reset and enable controller */
 755		rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 756					 priv->node, priv->aapl_bus_id, 1);
 757		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
 758				    priv->node, priv->aapl_bus_id, 1);
 759		msleep(10);
 760		/* Only bother waiting if there's a reset control */
 761		if (rc == 0) {
 762			ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 763					    priv->node, priv->aapl_bus_id, 0);
 764			msleep(IDE_WAKEUP_DELAY_MS);
 765		}
 766	}
 767
 768	/* If resuming a PCI device, restore the config space here */
 769	if (priv->pdev && resume) {
 770		int rc;
 771
 772		pci_restore_state(priv->pdev);
 773		rc = pcim_enable_device(priv->pdev);
 774		if (rc)
 775			dev_err(&priv->pdev->dev,
 776				"Failed to enable device after resume (%d)\n",
 777				rc);
 778		else
 779			pci_set_master(priv->pdev);
 780	}
 781
 782	/* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
 783	 * seem necessary and speeds up the boot process
 784	 */
 785	if (priv->kauai_fcr)
 786		writel(KAUAI_FCR_UATA_MAGIC |
 787		       KAUAI_FCR_UATA_RESET_N |
 788		       KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
 789}
 790
 791/* Hook the standard slave config to fixup some HW related alignment
 792 * restrictions
 793 */
 794static int pata_macio_slave_config(struct scsi_device *sdev)
 795{
 796	struct ata_port *ap = ata_shost_to_port(sdev->host);
 797	struct pata_macio_priv *priv = ap->private_data;
 798	struct ata_device *dev;
 799	u16 cmd;
 800	int rc;
 801
 802	/* First call original */
 803	rc = ata_scsi_slave_config(sdev);
 804	if (rc)
 805		return rc;
 806
 807	/* This is lifted from sata_nv */
 808	dev = &ap->link.device[sdev->id];
 809
 810	/* OHare has issues with non cache aligned DMA on some chipsets */
 811	if (priv->kind == controller_ohare) {
 812		blk_queue_update_dma_alignment(sdev->request_queue, 31);
 813		blk_queue_update_dma_pad(sdev->request_queue, 31);
 814
 815		/* Tell the world about it */
 816		ata_dev_info(dev, "OHare alignment limits applied\n");
 817		return 0;
 818	}
 819
 820	/* We only have issues with ATAPI */
 821	if (dev->class != ATA_DEV_ATAPI)
 822		return 0;
 823
 824	/* Shasta and K2 seem to have "issues" with reads ... */
 825	if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
 826		/* Allright these are bad, apply restrictions */
 827		blk_queue_update_dma_alignment(sdev->request_queue, 15);
 828		blk_queue_update_dma_pad(sdev->request_queue, 15);
 829
 830		/* We enable MWI and hack cache line size directly here, this
 831		 * is specific to this chipset and not normal values, we happen
 832		 * to somewhat know what we are doing here (which is basically
 833		 * to do the same Apple does and pray they did not get it wrong :-)
 834		 */
 835		BUG_ON(!priv->pdev);
 836		pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
 837		pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
 838		pci_write_config_word(priv->pdev, PCI_COMMAND,
 839				      cmd | PCI_COMMAND_INVALIDATE);
 840
 841		/* Tell the world about it */
 842		ata_dev_info(dev, "K2/Shasta alignment limits applied\n");
 843	}
 844
 845	return 0;
 846}
 847
 848#ifdef CONFIG_PM_SLEEP
 849static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
 850{
 851	int rc;
 852
 853	/* First, core libata suspend to do most of the work */
 854	rc = ata_host_suspend(priv->host, mesg);
 855	if (rc)
 856		return rc;
 857
 858	/* Restore to default timings */
 859	pata_macio_default_timings(priv);
 860
 861	/* Mask interrupt. Not strictly necessary but old driver did
 862	 * it and I'd rather not change that here */
 863	disable_irq(priv->irq);
 864
 865	/* The media bay will handle itself just fine */
 866	if (priv->mediabay)
 867		return 0;
 868
 869	/* Kauai has bus control FCRs directly here */
 870	if (priv->kauai_fcr) {
 871		u32 fcr = readl(priv->kauai_fcr);
 872		fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
 873		writel(fcr, priv->kauai_fcr);
 874	}
 875
 876	/* For PCI, save state and disable DMA. No need to call
 877	 * pci_set_power_state(), the HW doesn't do D states that
 878	 * way, the platform code will take care of suspending the
 879	 * ASIC properly
 880	 */
 881	if (priv->pdev) {
 882		pci_save_state(priv->pdev);
 883		pci_disable_device(priv->pdev);
 884	}
 885
 886	/* Disable the bus on older machines and the cell on kauai */
 887	ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
 888			    priv->aapl_bus_id, 0);
 889
 890	return 0;
 891}
 892
 893static int pata_macio_do_resume(struct pata_macio_priv *priv)
 894{
 895	/* Reset and re-enable the HW */
 896	pata_macio_reset_hw(priv, 1);
 897
 898	/* Sanitize drive timings */
 899	pata_macio_apply_timings(priv->host->ports[0], 0);
 900
 901	/* We want our IRQ back ! */
 902	enable_irq(priv->irq);
 903
 904	/* Let the libata core take it from there */
 905	ata_host_resume(priv->host);
 906
 907	return 0;
 908}
 909#endif /* CONFIG_PM_SLEEP */
 910
 911static struct scsi_host_template pata_macio_sht = {
 912	ATA_BASE_SHT(DRV_NAME),
 913	.sg_tablesize		= MAX_DCMDS,
 914	/* We may not need that strict one */
 915	.dma_boundary		= ATA_DMA_BOUNDARY,
 
 
 
 
 916	.slave_configure	= pata_macio_slave_config,
 
 
 
 917};
 918
 919static struct ata_port_operations pata_macio_ops = {
 920	.inherits		= &ata_bmdma_port_ops,
 921
 922	.freeze			= pata_macio_freeze,
 923	.set_piomode		= pata_macio_set_timings,
 924	.set_dmamode		= pata_macio_set_timings,
 925	.cable_detect		= pata_macio_cable_detect,
 926	.sff_dev_select		= pata_macio_dev_select,
 927	.qc_prep		= pata_macio_qc_prep,
 928	.bmdma_setup		= pata_macio_bmdma_setup,
 929	.bmdma_start		= pata_macio_bmdma_start,
 930	.bmdma_stop		= pata_macio_bmdma_stop,
 931	.bmdma_status		= pata_macio_bmdma_status,
 932	.port_start		= pata_macio_port_start,
 933	.sff_irq_clear		= pata_macio_irq_clear,
 934};
 935
 936static void pata_macio_invariants(struct pata_macio_priv *priv)
 937{
 938	const int *bidp;
 939
 940	/* Identify the type of controller */
 941	if (of_device_is_compatible(priv->node, "shasta-ata")) {
 942		priv->kind = controller_sh_ata6;
 943	        priv->timings = pata_macio_shasta_timings;
 944	} else if (of_device_is_compatible(priv->node, "kauai-ata")) {
 945		priv->kind = controller_un_ata6;
 946	        priv->timings = pata_macio_kauai_timings;
 947	} else if (of_device_is_compatible(priv->node, "K2-UATA")) {
 948		priv->kind = controller_k2_ata6;
 949	        priv->timings = pata_macio_kauai_timings;
 950	} else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
 951		if (strcmp(priv->node->name, "ata-4") == 0) {
 952			priv->kind = controller_kl_ata4;
 953			priv->timings = pata_macio_kl66_timings;
 954		} else {
 955			priv->kind = controller_kl_ata3;
 956			priv->timings = pata_macio_kl33_timings;
 957		}
 958	} else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
 959		priv->kind = controller_heathrow;
 960		priv->timings = pata_macio_heathrow_timings;
 961	} else {
 962		priv->kind = controller_ohare;
 963		priv->timings = pata_macio_ohare_timings;
 964	}
 965
 966	/* XXX FIXME --- setup priv->mediabay here */
 967
 968	/* Get Apple bus ID (for clock and ASIC control) */
 969	bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
 970	priv->aapl_bus_id =  bidp ? *bidp : 0;
 971
 972	/* Fixup missing Apple bus ID in case of media-bay */
 973	if (priv->mediabay && bidp == 0)
 974		priv->aapl_bus_id = 1;
 975}
 976
 977static void pata_macio_setup_ios(struct ata_ioports *ioaddr,
 978				 void __iomem * base, void __iomem * dma)
 979{
 980	/* cmd_addr is the base of regs for that port */
 981	ioaddr->cmd_addr	= base;
 982
 983	/* taskfile registers */
 984	ioaddr->data_addr	= base + (ATA_REG_DATA    << 4);
 985	ioaddr->error_addr	= base + (ATA_REG_ERR     << 4);
 986	ioaddr->feature_addr	= base + (ATA_REG_FEATURE << 4);
 987	ioaddr->nsect_addr	= base + (ATA_REG_NSECT   << 4);
 988	ioaddr->lbal_addr	= base + (ATA_REG_LBAL    << 4);
 989	ioaddr->lbam_addr	= base + (ATA_REG_LBAM    << 4);
 990	ioaddr->lbah_addr	= base + (ATA_REG_LBAH    << 4);
 991	ioaddr->device_addr	= base + (ATA_REG_DEVICE  << 4);
 992	ioaddr->status_addr	= base + (ATA_REG_STATUS  << 4);
 993	ioaddr->command_addr	= base + (ATA_REG_CMD     << 4);
 994	ioaddr->altstatus_addr	= base + 0x160;
 995	ioaddr->ctl_addr	= base + 0x160;
 996	ioaddr->bmdma_addr	= dma;
 997}
 998
 999static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
1000					 struct ata_port_info *pinfo)
1001{
1002	int i = 0;
1003
1004	pinfo->pio_mask		= 0;
1005	pinfo->mwdma_mask	= 0;
1006	pinfo->udma_mask	= 0;
1007
1008	while (priv->timings[i].mode > 0) {
1009		unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
1010		switch(priv->timings[i].mode & 0xf0) {
1011		case 0x00: /* PIO */
1012			pinfo->pio_mask |= (mask >> 8);
1013			break;
1014		case 0x20: /* MWDMA */
1015			pinfo->mwdma_mask |= mask;
1016			break;
1017		case 0x40: /* UDMA */
1018			pinfo->udma_mask |= mask;
1019			break;
1020		}
1021		i++;
1022	}
1023	dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n",
1024		pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
1025}
1026
1027static int pata_macio_common_init(struct pata_macio_priv *priv,
1028				  resource_size_t tfregs,
1029				  resource_size_t dmaregs,
1030				  resource_size_t fcregs,
1031				  unsigned long irq)
1032{
1033	struct ata_port_info		pinfo;
1034	const struct ata_port_info	*ppi[] = { &pinfo, NULL };
1035	void __iomem			*dma_regs = NULL;
1036
1037	/* Fill up privates with various invariants collected from the
1038	 * device-tree
1039	 */
1040	pata_macio_invariants(priv);
1041
1042	/* Make sure we have sane initial timings in the cache */
1043	pata_macio_default_timings(priv);
1044
1045	/* Not sure what the real max is but we know it's less than 64K, let's
1046	 * use 64K minus 256
1047	 */
1048	dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
1049
1050	/* Allocate libata host for 1 port */
1051	memset(&pinfo, 0, sizeof(struct ata_port_info));
1052	pmac_macio_calc_timing_masks(priv, &pinfo);
1053	pinfo.flags		= ATA_FLAG_SLAVE_POSS;
1054	pinfo.port_ops		= &pata_macio_ops;
1055	pinfo.private_data	= priv;
1056
1057	priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
1058	if (priv->host == NULL) {
1059		dev_err(priv->dev, "Failed to allocate ATA port structure\n");
1060		return -ENOMEM;
1061	}
1062
1063	/* Setup the private data in host too */
1064	priv->host->private_data = priv;
1065
1066	/* Map base registers */
1067	priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
1068	if (priv->tfregs == NULL) {
1069		dev_err(priv->dev, "Failed to map ATA ports\n");
1070		return -ENOMEM;
1071	}
1072	priv->host->iomap = &priv->tfregs;
1073
1074	/* Map DMA regs */
1075	if (dmaregs != 0) {
1076		dma_regs = devm_ioremap(priv->dev, dmaregs,
1077					sizeof(struct dbdma_regs));
1078		if (dma_regs == NULL)
1079			dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
1080	}
1081
1082	/* If chip has local feature control, map those regs too */
1083	if (fcregs != 0) {
1084		priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
1085		if (priv->kauai_fcr == NULL) {
1086			dev_err(priv->dev, "Failed to map ATA FCR register\n");
1087			return -ENOMEM;
1088		}
1089	}
1090
1091	/* Setup port data structure */
1092	pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
1093			     priv->tfregs, dma_regs);
1094	priv->host->ports[0]->private_data = priv;
1095
1096	/* hard-reset the controller */
1097	pata_macio_reset_hw(priv, 0);
1098	pata_macio_apply_timings(priv->host->ports[0], 0);
1099
1100	/* Enable bus master if necessary */
1101	if (priv->pdev && dma_regs)
1102		pci_set_master(priv->pdev);
1103
1104	dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
1105		 macio_ata_names[priv->kind], priv->aapl_bus_id);
1106
1107	/* Start it up */
1108	priv->irq = irq;
1109	return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
1110				 &pata_macio_sht);
1111}
1112
1113static int pata_macio_attach(struct macio_dev *mdev,
1114			     const struct of_device_id *match)
1115{
1116	struct pata_macio_priv	*priv;
1117	resource_size_t		tfregs, dmaregs = 0;
1118	unsigned long		irq;
1119	int			rc;
1120
1121	/* Check for broken device-trees */
1122	if (macio_resource_count(mdev) == 0) {
1123		dev_err(&mdev->ofdev.dev,
1124			"No addresses for controller\n");
1125		return -ENXIO;
1126	}
1127
1128	/* Enable managed resources */
1129	macio_enable_devres(mdev);
1130
1131	/* Allocate and init private data structure */
1132	priv = devm_kzalloc(&mdev->ofdev.dev,
1133			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1134	if (priv == NULL) {
1135		dev_err(&mdev->ofdev.dev,
1136			"Failed to allocate private memory\n");
1137		return -ENOMEM;
1138	}
1139	priv->node = of_node_get(mdev->ofdev.dev.of_node);
1140	priv->mdev = mdev;
1141	priv->dev = &mdev->ofdev.dev;
1142
1143	/* Request memory resource for taskfile registers */
1144	if (macio_request_resource(mdev, 0, "pata-macio")) {
1145		dev_err(&mdev->ofdev.dev,
1146			"Cannot obtain taskfile resource\n");
1147		return -EBUSY;
1148	}
1149	tfregs = macio_resource_start(mdev, 0);
1150
1151	/* Request resources for DMA registers if any */
1152	if (macio_resource_count(mdev) >= 2) {
1153		if (macio_request_resource(mdev, 1, "pata-macio-dma"))
1154			dev_err(&mdev->ofdev.dev,
1155				"Cannot obtain DMA resource\n");
1156		else
1157			dmaregs = macio_resource_start(mdev, 1);
1158	}
1159
1160	/*
1161	 * Fixup missing IRQ for some old implementations with broken
1162	 * device-trees.
1163	 *
1164	 * This is a bit bogus, it should be fixed in the device-tree itself,
1165	 * via the existing macio fixups, based on the type of interrupt
1166	 * controller in the machine. However, I have no test HW for this case,
1167	 * and this trick works well enough on those old machines...
1168	 */
1169	if (macio_irq_count(mdev) == 0) {
1170		dev_warn(&mdev->ofdev.dev,
1171			 "No interrupts for controller, using 13\n");
1172		irq = irq_create_mapping(NULL, 13);
1173	} else
1174		irq = macio_irq(mdev, 0);
1175
1176	/* Prevvent media bay callbacks until fully registered */
1177	lock_media_bay(priv->mdev->media_bay);
1178
1179	/* Get register addresses and call common initialization */
1180	rc = pata_macio_common_init(priv,
1181				    tfregs,		/* Taskfile regs */
1182				    dmaregs,		/* DBDMA regs */
1183				    0,			/* Feature control */
1184				    irq);
1185	unlock_media_bay(priv->mdev->media_bay);
1186
1187	return rc;
1188}
1189
1190static int pata_macio_detach(struct macio_dev *mdev)
1191{
1192	struct ata_host *host = macio_get_drvdata(mdev);
1193	struct pata_macio_priv *priv = host->private_data;
1194
1195	lock_media_bay(priv->mdev->media_bay);
1196
1197	/* Make sure the mediabay callback doesn't try to access
1198	 * dead stuff
1199	 */
1200	priv->host->private_data = NULL;
1201
1202	ata_host_detach(host);
1203
1204	unlock_media_bay(priv->mdev->media_bay);
1205
1206	return 0;
1207}
1208
1209#ifdef CONFIG_PM_SLEEP
1210static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1211{
1212	struct ata_host *host = macio_get_drvdata(mdev);
1213
1214	return pata_macio_do_suspend(host->private_data, mesg);
1215}
1216
1217static int pata_macio_resume(struct macio_dev *mdev)
1218{
1219	struct ata_host *host = macio_get_drvdata(mdev);
1220
1221	return pata_macio_do_resume(host->private_data);
1222}
1223#endif /* CONFIG_PM_SLEEP */
1224
1225#ifdef CONFIG_PMAC_MEDIABAY
1226static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
1227{
1228	struct ata_host *host = macio_get_drvdata(mdev);
1229	struct ata_port *ap;
1230	struct ata_eh_info *ehi;
1231	struct ata_device *dev;
1232	unsigned long flags;
1233
1234	if (!host || !host->private_data)
1235		return;
1236	ap = host->ports[0];
1237	spin_lock_irqsave(ap->lock, flags);
1238	ehi = &ap->link.eh_info;
1239	if (mb_state == MB_CD) {
1240		ata_ehi_push_desc(ehi, "mediabay plug");
1241		ata_ehi_hotplugged(ehi);
1242		ata_port_freeze(ap);
1243	} else {
1244		ata_ehi_push_desc(ehi, "mediabay unplug");
1245		ata_for_each_dev(dev, &ap->link, ALL)
1246			dev->flags |= ATA_DFLAG_DETACH;
1247		ata_port_abort(ap);
1248	}
1249	spin_unlock_irqrestore(ap->lock, flags);
1250
1251}
1252#endif /* CONFIG_PMAC_MEDIABAY */
1253
1254
1255static int pata_macio_pci_attach(struct pci_dev *pdev,
1256				 const struct pci_device_id *id)
1257{
1258	struct pata_macio_priv	*priv;
1259	struct device_node	*np;
1260	resource_size_t		rbase;
1261
1262	/* We cannot use a MacIO controller without its OF device node */
1263	np = pci_device_to_OF_node(pdev);
1264	if (np == NULL) {
1265		dev_err(&pdev->dev,
1266			"Cannot find OF device node for controller\n");
1267		return -ENODEV;
1268	}
1269
1270	/* Check that it can be enabled */
1271	if (pcim_enable_device(pdev)) {
1272		dev_err(&pdev->dev,
1273			"Cannot enable controller PCI device\n");
1274		return -ENXIO;
1275	}
1276
1277	/* Allocate and init private data structure */
1278	priv = devm_kzalloc(&pdev->dev,
1279			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1280	if (priv == NULL) {
1281		dev_err(&pdev->dev,
1282			"Failed to allocate private memory\n");
1283		return -ENOMEM;
1284	}
1285	priv->node = of_node_get(np);
1286	priv->pdev = pdev;
1287	priv->dev = &pdev->dev;
1288
1289	/* Get MMIO regions */
1290	if (pci_request_regions(pdev, "pata-macio")) {
1291		dev_err(&pdev->dev,
1292			"Cannot obtain PCI resources\n");
1293		return -EBUSY;
1294	}
1295
1296	/* Get register addresses and call common initialization */
1297	rbase = pci_resource_start(pdev, 0);
1298	if (pata_macio_common_init(priv,
1299				   rbase + 0x2000,	/* Taskfile regs */
1300				   rbase + 0x1000,	/* DBDMA regs */
1301				   rbase,		/* Feature control */
1302				   pdev->irq))
1303		return -ENXIO;
1304
1305	return 0;
1306}
1307
1308static void pata_macio_pci_detach(struct pci_dev *pdev)
1309{
1310	struct ata_host *host = pci_get_drvdata(pdev);
1311
1312	ata_host_detach(host);
1313}
1314
1315#ifdef CONFIG_PM_SLEEP
1316static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1317{
1318	struct ata_host *host = pci_get_drvdata(pdev);
1319
1320	return pata_macio_do_suspend(host->private_data, mesg);
1321}
1322
1323static int pata_macio_pci_resume(struct pci_dev *pdev)
1324{
1325	struct ata_host *host = pci_get_drvdata(pdev);
1326
1327	return pata_macio_do_resume(host->private_data);
1328}
1329#endif /* CONFIG_PM_SLEEP */
1330
1331static struct of_device_id pata_macio_match[] =
1332{
1333	{
1334	.name 		= "IDE",
1335	},
1336	{
1337	.name 		= "ATA",
1338	},
1339	{
1340	.type		= "ide",
1341	},
1342	{
1343	.type		= "ata",
1344	},
1345	{},
1346};
1347MODULE_DEVICE_TABLE(of, pata_macio_match);
1348
1349static struct macio_driver pata_macio_driver =
1350{
1351	.driver = {
1352		.name 		= "pata-macio",
1353		.owner		= THIS_MODULE,
1354		.of_match_table	= pata_macio_match,
1355	},
1356	.probe		= pata_macio_attach,
1357	.remove		= pata_macio_detach,
1358#ifdef CONFIG_PM_SLEEP
1359	.suspend	= pata_macio_suspend,
1360	.resume		= pata_macio_resume,
1361#endif
1362#ifdef CONFIG_PMAC_MEDIABAY
1363	.mediabay_event	= pata_macio_mb_event,
1364#endif
1365};
1366
1367static const struct pci_device_id pata_macio_pci_match[] = {
1368	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA),	0 },
1369	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100),	0 },
1370	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100),	0 },
1371	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA),	0 },
1372	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA),	0 },
1373	{},
1374};
1375
1376static struct pci_driver pata_macio_pci_driver = {
1377	.name		= "pata-pci-macio",
1378	.id_table	= pata_macio_pci_match,
1379	.probe		= pata_macio_pci_attach,
1380	.remove		= pata_macio_pci_detach,
1381#ifdef CONFIG_PM_SLEEP
1382	.suspend	= pata_macio_pci_suspend,
1383	.resume		= pata_macio_pci_resume,
1384#endif
1385	.driver = {
1386		.owner		= THIS_MODULE,
1387	},
1388};
1389MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
1390
1391
1392static int __init pata_macio_init(void)
1393{
1394	int rc;
1395
1396	if (!machine_is(powermac))
1397		return -ENODEV;
1398
1399	rc = pci_register_driver(&pata_macio_pci_driver);
1400	if (rc)
1401		return rc;
1402	rc = macio_register_driver(&pata_macio_driver);
1403	if (rc) {
1404		pci_unregister_driver(&pata_macio_pci_driver);
1405		return rc;
1406	}
1407	return 0;
1408}
1409
1410static void __exit pata_macio_exit(void)
1411{
1412	macio_unregister_driver(&pata_macio_driver);
1413	pci_unregister_driver(&pata_macio_pci_driver);
1414}
1415
1416module_init(pata_macio_init);
1417module_exit(pata_macio_exit);
1418
1419MODULE_AUTHOR("Benjamin Herrenschmidt");
1420MODULE_DESCRIPTION("Apple MacIO PATA driver");
1421MODULE_LICENSE("GPL");
1422MODULE_VERSION(DRV_VERSION);