Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Libata based driver for Apple "macio" family of PATA controllers
   4 *
   5 * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
   6 *                     <benh@kernel.crashing.org>
   7 *
   8 * Some bits and pieces from drivers/ide/ppc/pmac.c
   9 *
  10 */
  11
  12#undef DEBUG
  13#undef DEBUG_DMA
  14
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/blkdev.h>
  19#include <linux/ata.h>
  20#include <linux/libata.h>
  21#include <linux/adb.h>
  22#include <linux/pmu.h>
  23#include <linux/scatterlist.h>
  24#include <linux/of.h>
  25#include <linux/gfp.h>
  26#include <linux/pci.h>
  27
  28#include <scsi/scsi.h>
  29#include <scsi/scsi_host.h>
  30#include <scsi/scsi_device.h>
  31
  32#include <asm/macio.h>
  33#include <asm/io.h>
  34#include <asm/dbdma.h>
  35#include <asm/machdep.h>
  36#include <asm/pmac_feature.h>
  37#include <asm/mediabay.h>
  38
  39#ifdef DEBUG_DMA
  40#define dev_dbgdma(dev, format, arg...)		\
  41	dev_printk(KERN_DEBUG , dev , format , ## arg)
  42#else
  43#define dev_dbgdma(dev, format, arg...)		\
  44	({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
  45#endif
  46
  47#define DRV_NAME	"pata_macio"
  48#define DRV_VERSION	"0.9"
  49
  50/* Models of macio ATA controller */
  51enum {
  52	controller_ohare,	/* OHare based */
  53	controller_heathrow,	/* Heathrow/Paddington */
  54	controller_kl_ata3,	/* KeyLargo ATA-3 */
  55	controller_kl_ata4,	/* KeyLargo ATA-4 */
  56	controller_un_ata6,	/* UniNorth2 ATA-6 */
  57	controller_k2_ata6,	/* K2 ATA-6 */
  58	controller_sh_ata6,	/* Shasta ATA-6 */
  59};
  60
  61static const char* macio_ata_names[] = {
  62	"OHare ATA",		/* OHare based */
  63	"Heathrow ATA",		/* Heathrow/Paddington */
  64	"KeyLargo ATA-3",	/* KeyLargo ATA-3 (MDMA only) */
  65	"KeyLargo ATA-4",	/* KeyLargo ATA-4 (UDMA/66) */
  66	"UniNorth ATA-6",	/* UniNorth2 ATA-6 (UDMA/100) */
  67	"K2 ATA-6",		/* K2 ATA-6 (UDMA/100) */
  68	"Shasta ATA-6",		/* Shasta ATA-6 (UDMA/133) */
  69};
  70
  71/*
  72 * Extra registers, both 32-bit little-endian
  73 */
  74#define IDE_TIMING_CONFIG	0x200
  75#define IDE_INTERRUPT		0x300
  76
  77/* Kauai (U2) ATA has different register setup */
  78#define IDE_KAUAI_PIO_CONFIG	0x200
  79#define IDE_KAUAI_ULTRA_CONFIG	0x210
  80#define IDE_KAUAI_POLL_CONFIG	0x220
  81
  82/*
  83 * Timing configuration register definitions
  84 */
  85
  86/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
  87#define SYSCLK_TICKS(t)		(((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
  88#define SYSCLK_TICKS_66(t)	(((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
  89#define IDE_SYSCLK_NS		30	/* 33Mhz cell */
  90#define IDE_SYSCLK_66_NS	15	/* 66Mhz cell */
  91
  92/* 133Mhz cell, found in shasta.
  93 * See comments about 100 Mhz Uninorth 2...
  94 * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
  95 * weird and I don't now why .. at this stage
  96 */
  97#define TR_133_PIOREG_PIO_MASK		0xff000fff
  98#define TR_133_PIOREG_MDMA_MASK		0x00fff800
  99#define TR_133_UDMAREG_UDMA_MASK	0x0003ffff
 100#define TR_133_UDMAREG_UDMA_EN		0x00000001
 101
 102/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
 103 * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
 104 * controlled like gem or fw. It appears to be an evolution of keylargo
 105 * ATA4 with a timing register extended to 2x32bits registers (one
 106 * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
 107 * It has it's own local feature control register as well.
 108 *
 109 * After scratching my mind over the timing values, at least for PIO
 110 * and MDMA, I think I've figured the format of the timing register,
 111 * though I use pre-calculated tables for UDMA as usual...
 112 */
 113#define TR_100_PIO_ADDRSETUP_MASK	0xff000000 /* Size of field unknown */
 114#define TR_100_PIO_ADDRSETUP_SHIFT	24
 115#define TR_100_MDMA_MASK		0x00fff000
 116#define TR_100_MDMA_RECOVERY_MASK	0x00fc0000
 117#define TR_100_MDMA_RECOVERY_SHIFT	18
 118#define TR_100_MDMA_ACCESS_MASK		0x0003f000
 119#define TR_100_MDMA_ACCESS_SHIFT	12
 120#define TR_100_PIO_MASK			0xff000fff
 121#define TR_100_PIO_RECOVERY_MASK	0x00000fc0
 122#define TR_100_PIO_RECOVERY_SHIFT	6
 123#define TR_100_PIO_ACCESS_MASK		0x0000003f
 124#define TR_100_PIO_ACCESS_SHIFT		0
 125
 126#define TR_100_UDMAREG_UDMA_MASK	0x0000ffff
 127#define TR_100_UDMAREG_UDMA_EN		0x00000001
 128
 129
 130/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
 131 * 40 connector cable and to 4 on 80 connector one.
 132 * Clock unit is 15ns (66Mhz)
 133 *
 134 * 3 Values can be programmed:
 135 *  - Write data setup, which appears to match the cycle time. They
 136 *    also call it DIOW setup.
 137 *  - Ready to pause time (from spec)
 138 *  - Address setup. That one is weird. I don't see where exactly
 139 *    it fits in UDMA cycles, I got it's name from an obscure piece
 140 *    of commented out code in Darwin. They leave it to 0, we do as
 141 *    well, despite a comment that would lead to think it has a
 142 *    min value of 45ns.
 143 * Apple also add 60ns to the write data setup (or cycle time ?) on
 144 * reads.
 145 */
 146#define TR_66_UDMA_MASK			0xfff00000
 147#define TR_66_UDMA_EN			0x00100000 /* Enable Ultra mode for DMA */
 148#define TR_66_PIO_ADDRSETUP_MASK	0xe0000000 /* Address setup */
 149#define TR_66_PIO_ADDRSETUP_SHIFT	29
 150#define TR_66_UDMA_RDY2PAUS_MASK	0x1e000000 /* Ready 2 pause time */
 151#define TR_66_UDMA_RDY2PAUS_SHIFT	25
 152#define TR_66_UDMA_WRDATASETUP_MASK	0x01e00000 /* Write data setup time */
 153#define TR_66_UDMA_WRDATASETUP_SHIFT	21
 154#define TR_66_MDMA_MASK			0x000ffc00
 155#define TR_66_MDMA_RECOVERY_MASK	0x000f8000
 156#define TR_66_MDMA_RECOVERY_SHIFT	15
 157#define TR_66_MDMA_ACCESS_MASK		0x00007c00
 158#define TR_66_MDMA_ACCESS_SHIFT		10
 159#define TR_66_PIO_MASK			0xe00003ff
 160#define TR_66_PIO_RECOVERY_MASK		0x000003e0
 161#define TR_66_PIO_RECOVERY_SHIFT	5
 162#define TR_66_PIO_ACCESS_MASK		0x0000001f
 163#define TR_66_PIO_ACCESS_SHIFT		0
 164
 165/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
 166 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
 167 *
 168 * The access time and recovery time can be programmed. Some older
 169 * Darwin code base limit OHare to 150ns cycle time. I decided to do
 170 * the same here fore safety against broken old hardware ;)
 171 * The HalfTick bit, when set, adds half a clock (15ns) to the access
 172 * time and removes one from recovery. It's not supported on KeyLargo
 173 * implementation afaik. The E bit appears to be set for PIO mode 0 and
 174 * is used to reach long timings used in this mode.
 175 */
 176#define TR_33_MDMA_MASK			0x003ff800
 177#define TR_33_MDMA_RECOVERY_MASK	0x001f0000
 178#define TR_33_MDMA_RECOVERY_SHIFT	16
 179#define TR_33_MDMA_ACCESS_MASK		0x0000f800
 180#define TR_33_MDMA_ACCESS_SHIFT		11
 181#define TR_33_MDMA_HALFTICK		0x00200000
 182#define TR_33_PIO_MASK			0x000007ff
 183#define TR_33_PIO_E			0x00000400
 184#define TR_33_PIO_RECOVERY_MASK		0x000003e0
 185#define TR_33_PIO_RECOVERY_SHIFT	5
 186#define TR_33_PIO_ACCESS_MASK		0x0000001f
 187#define TR_33_PIO_ACCESS_SHIFT		0
 188
 189/*
 190 * Interrupt register definitions. Only present on newer cells
 191 * (Keylargo and later afaik) so we don't use it.
 192 */
 193#define IDE_INTR_DMA			0x80000000
 194#define IDE_INTR_DEVICE			0x40000000
 195
 196/*
 197 * FCR Register on Kauai. Not sure what bit 0x4 is  ...
 198 */
 199#define KAUAI_FCR_UATA_MAGIC		0x00000004
 200#define KAUAI_FCR_UATA_RESET_N		0x00000002
 201#define KAUAI_FCR_UATA_ENABLE		0x00000001
 202
 203
 204/* Allow up to 256 DBDMA commands per xfer */
 205#define MAX_DCMDS		256
 206
 207/* Don't let a DMA segment go all the way to 64K */
 208#define MAX_DBDMA_SEG		0xff00
 209
 210
 211/*
 212 * Wait 1s for disk to answer on IDE bus after a hard reset
 213 * of the device (via GPIO/FCR).
 214 *
 215 * Some devices seem to "pollute" the bus even after dropping
 216 * the BSY bit (typically some combo drives slave on the UDMA
 217 * bus) after a hard reset. Since we hard reset all drives on
 218 * KeyLargo ATA66, we have to keep that delay around. I may end
 219 * up not hard resetting anymore on these and keep the delay only
 220 * for older interfaces instead (we have to reset when coming
 221 * from MacOS...) --BenH.
 222 */
 223#define IDE_WAKEUP_DELAY_MS	1000
 224
 225struct pata_macio_timing;
 226
 227struct pata_macio_priv {
 228	int				kind;
 229	int				aapl_bus_id;
 230	int				mediabay : 1;
 231	struct device_node		*node;
 232	struct macio_dev		*mdev;
 233	struct pci_dev			*pdev;
 234	struct device			*dev;
 235	int				irq;
 236	u32				treg[2][2];
 237	void __iomem			*tfregs;
 238	void __iomem			*kauai_fcr;
 239	struct dbdma_cmd *		dma_table_cpu;
 240	dma_addr_t			dma_table_dma;
 241	struct ata_host			*host;
 242	const struct pata_macio_timing	*timings;
 243};
 244
 245/* Previous variants of this driver used to calculate timings
 246 * for various variants of the chip and use tables for others.
 247 *
 248 * Not only was this confusing, but in addition, it isn't clear
 249 * whether our calculation code was correct. It didn't entirely
 250 * match the darwin code and whatever documentation I could find
 251 * on these cells
 252 *
 253 * I decided to entirely rely on a table instead for this version
 254 * of the driver. Also, because I don't really care about derated
 255 * modes and really old HW other than making it work, I'm not going
 256 * to calculate / snoop timing values for something else than the
 257 * standard modes.
 258 */
 259struct pata_macio_timing {
 260	int	mode;
 261	u32	reg1;	/* Bits to set in first timing reg */
 262	u32	reg2;	/* Bits to set in second timing reg */
 263};
 264
 265static const struct pata_macio_timing pata_macio_ohare_timings[] = {
 266	{ XFER_PIO_0,		0x00000526,	0, },
 267	{ XFER_PIO_1,		0x00000085,	0, },
 268	{ XFER_PIO_2,		0x00000025,	0, },
 269	{ XFER_PIO_3,		0x00000025,	0, },
 270	{ XFER_PIO_4,		0x00000025,	0, },
 271	{ XFER_MW_DMA_0,	0x00074000,	0, },
 272	{ XFER_MW_DMA_1,	0x00221000,	0, },
 273	{ XFER_MW_DMA_2,	0x00211000,	0, },
 274	{ -1, 0, 0 }
 275};
 276
 277static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
 278	{ XFER_PIO_0,		0x00000526,	0, },
 279	{ XFER_PIO_1,		0x00000085,	0, },
 280	{ XFER_PIO_2,		0x00000025,	0, },
 281	{ XFER_PIO_3,		0x00000025,	0, },
 282	{ XFER_PIO_4,		0x00000025,	0, },
 283	{ XFER_MW_DMA_0,	0x00074000,	0, },
 284	{ XFER_MW_DMA_1,	0x00221000,	0, },
 285	{ XFER_MW_DMA_2,	0x00211000,	0, },
 286	{ -1, 0, 0 }
 287};
 288
 289static const struct pata_macio_timing pata_macio_kl33_timings[] = {
 290	{ XFER_PIO_0,		0x00000526,	0, },
 291	{ XFER_PIO_1,		0x00000085,	0, },
 292	{ XFER_PIO_2,		0x00000025,	0, },
 293	{ XFER_PIO_3,		0x00000025,	0, },
 294	{ XFER_PIO_4,		0x00000025,	0, },
 295	{ XFER_MW_DMA_0,	0x00084000,	0, },
 296	{ XFER_MW_DMA_1,	0x00021800,	0, },
 297	{ XFER_MW_DMA_2,	0x00011800,	0, },
 298	{ -1, 0, 0 }
 299};
 300
 301static const struct pata_macio_timing pata_macio_kl66_timings[] = {
 302	{ XFER_PIO_0,		0x0000038c,	0, },
 303	{ XFER_PIO_1,		0x0000020a,	0, },
 304	{ XFER_PIO_2,		0x00000127,	0, },
 305	{ XFER_PIO_3,		0x000000c6,	0, },
 306	{ XFER_PIO_4,		0x00000065,	0, },
 307	{ XFER_MW_DMA_0,	0x00084000,	0, },
 308	{ XFER_MW_DMA_1,	0x00029800,	0, },
 309	{ XFER_MW_DMA_2,	0x00019400,	0, },
 310	{ XFER_UDMA_0,		0x19100000,	0, },
 311	{ XFER_UDMA_1,		0x14d00000,	0, },
 312	{ XFER_UDMA_2,		0x10900000,	0, },
 313	{ XFER_UDMA_3,		0x0c700000,	0, },
 314	{ XFER_UDMA_4,		0x0c500000,	0, },
 315	{ -1, 0, 0 }
 316};
 317
 318static const struct pata_macio_timing pata_macio_kauai_timings[] = {
 319	{ XFER_PIO_0,		0x08000a92,	0, },
 320	{ XFER_PIO_1,		0x0800060f,	0, },
 321	{ XFER_PIO_2,		0x0800038b,	0, },
 322	{ XFER_PIO_3,		0x05000249,	0, },
 323	{ XFER_PIO_4,		0x04000148,	0, },
 324	{ XFER_MW_DMA_0,	0x00618000,	0, },
 325	{ XFER_MW_DMA_1,	0x00209000,	0, },
 326	{ XFER_MW_DMA_2,	0x00148000,	0, },
 327	{ XFER_UDMA_0,		         0,	0x000070c1, },
 328	{ XFER_UDMA_1,		         0,	0x00005d81, },
 329	{ XFER_UDMA_2,		         0,	0x00004a61, },
 330	{ XFER_UDMA_3,		         0,	0x00003a51, },
 331	{ XFER_UDMA_4,		         0,	0x00002a31, },
 332	{ XFER_UDMA_5,		         0,	0x00002921, },
 333	{ -1, 0, 0 }
 334};
 335
 336static const struct pata_macio_timing pata_macio_shasta_timings[] = {
 337	{ XFER_PIO_0,		0x0a000c97,	0, },
 338	{ XFER_PIO_1,		0x07000712,	0, },
 339	{ XFER_PIO_2,		0x040003cd,	0, },
 340	{ XFER_PIO_3,		0x0500028b,	0, },
 341	{ XFER_PIO_4,		0x0400010a,	0, },
 342	{ XFER_MW_DMA_0,	0x00820800,	0, },
 343	{ XFER_MW_DMA_1,	0x0028b000,	0, },
 344	{ XFER_MW_DMA_2,	0x001ca000,	0, },
 345	{ XFER_UDMA_0,		         0,	0x00035901, },
 346	{ XFER_UDMA_1,		         0,	0x000348b1, },
 347	{ XFER_UDMA_2,		         0,	0x00033881, },
 348	{ XFER_UDMA_3,		         0,	0x00033861, },
 349	{ XFER_UDMA_4,		         0,	0x00033841, },
 350	{ XFER_UDMA_5,		         0,	0x00033031, },
 351	{ XFER_UDMA_6,		         0,	0x00033021, },
 352	{ -1, 0, 0 }
 353};
 354
 355static const struct pata_macio_timing *pata_macio_find_timing(
 356					    struct pata_macio_priv *priv,
 357					    int mode)
 358{
 359	int i;
 360
 361	for (i = 0; priv->timings[i].mode > 0; i++) {
 362		if (priv->timings[i].mode == mode)
 363			return &priv->timings[i];
 364	}
 365	return NULL;
 366}
 367
 368
 369static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
 370{
 371	struct pata_macio_priv *priv = ap->private_data;
 372	void __iomem *rbase = ap->ioaddr.cmd_addr;
 373
 374	if (priv->kind == controller_sh_ata6 ||
 375	    priv->kind == controller_un_ata6 ||
 376	    priv->kind == controller_k2_ata6) {
 377		writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
 378		writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
 379	} else
 380		writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
 381}
 382
 383static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
 384{
 385	ata_sff_dev_select(ap, device);
 386
 387	/* Apply timings */
 388	pata_macio_apply_timings(ap, device);
 389}
 390
 391static void pata_macio_set_timings(struct ata_port *ap,
 392				   struct ata_device *adev)
 393{
 394	struct pata_macio_priv *priv = ap->private_data;
 395	const struct pata_macio_timing *t;
 396
 397	dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
 398		adev->devno,
 399		adev->pio_mode,
 400		ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
 401		adev->dma_mode,
 402		ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
 403
 404	/* First clear timings */
 405	priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
 406
 407	/* Now get the PIO timings */
 408	t = pata_macio_find_timing(priv, adev->pio_mode);
 409	if (t == NULL) {
 410		dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
 411			 adev->pio_mode);
 412		t = pata_macio_find_timing(priv, XFER_PIO_0);
 413	}
 414	BUG_ON(t == NULL);
 415
 416	/* PIO timings only ever use the first treg */
 417	priv->treg[adev->devno][0] |= t->reg1;
 418
 419	/* Now get DMA timings */
 420	t = pata_macio_find_timing(priv, adev->dma_mode);
 421	if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
 422		dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
 423		t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
 424	}
 425	BUG_ON(t == NULL);
 426
 427	/* DMA timings can use both tregs */
 428	priv->treg[adev->devno][0] |= t->reg1;
 429	priv->treg[adev->devno][1] |= t->reg2;
 430
 431	dev_dbg(priv->dev, " -> %08x %08x\n",
 432		priv->treg[adev->devno][0],
 433		priv->treg[adev->devno][1]);
 434
 435	/* Apply to hardware */
 436	pata_macio_apply_timings(ap, adev->devno);
 437}
 438
 439/*
 440 * Blast some well known "safe" values to the timing registers at init or
 441 * wakeup from sleep time, before we do real calculation
 442 */
 443static void pata_macio_default_timings(struct pata_macio_priv *priv)
 444{
 445	unsigned int value, value2 = 0;
 446
 447	switch(priv->kind) {
 448		case controller_sh_ata6:
 449			value = 0x0a820c97;
 450			value2 = 0x00033031;
 451			break;
 452		case controller_un_ata6:
 453		case controller_k2_ata6:
 454			value = 0x08618a92;
 455			value2 = 0x00002921;
 456			break;
 457		case controller_kl_ata4:
 458			value = 0x0008438c;
 459			break;
 460		case controller_kl_ata3:
 461			value = 0x00084526;
 462			break;
 463		case controller_heathrow:
 464		case controller_ohare:
 465		default:
 466			value = 0x00074526;
 467			break;
 468	}
 469	priv->treg[0][0] = priv->treg[1][0] = value;
 470	priv->treg[0][1] = priv->treg[1][1] = value2;
 471}
 472
 473static int pata_macio_cable_detect(struct ata_port *ap)
 474{
 475	struct pata_macio_priv *priv = ap->private_data;
 476
 477	/* Get cable type from device-tree */
 478	if (priv->kind == controller_kl_ata4 ||
 479	    priv->kind == controller_un_ata6 ||
 480	    priv->kind == controller_k2_ata6 ||
 481	    priv->kind == controller_sh_ata6) {
 482		const char* cable = of_get_property(priv->node, "cable-type",
 483						    NULL);
 484		struct device_node *root = of_find_node_by_path("/");
 485		const char *model = of_get_property(root, "model", NULL);
 486
 487		of_node_put(root);
 488
 489		if (cable && !strncmp(cable, "80-", 3)) {
 490			/* Some drives fail to detect 80c cable in PowerBook
 491			 * These machine use proprietary short IDE cable
 492			 * anyway
 493			 */
 494			if (!strncmp(model, "PowerBook", 9))
 495				return ATA_CBL_PATA40_SHORT;
 496			else
 497				return ATA_CBL_PATA80;
 498		}
 499	}
 500
 501	/* G5's seem to have incorrect cable type in device-tree.
 502	 * Let's assume they always have a 80 conductor cable, this seem to
 503	 * be always the case unless the user mucked around
 504	 */
 505	if (of_device_is_compatible(priv->node, "K2-UATA") ||
 506	    of_device_is_compatible(priv->node, "shasta-ata"))
 507		return ATA_CBL_PATA80;
 508
 509	/* Anything else is 40 connectors */
 510	return ATA_CBL_PATA40;
 511}
 512
 513static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
 514{
 515	unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
 516	struct ata_port *ap = qc->ap;
 517	struct pata_macio_priv *priv = ap->private_data;
 518	struct scatterlist *sg;
 519	struct dbdma_cmd *table;
 520	unsigned int si, pi;
 521
 522	dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
 523		   __func__, qc, qc->flags, write, qc->dev->devno);
 524
 525	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 526		return;
 527
 528	table = (struct dbdma_cmd *) priv->dma_table_cpu;
 529
 530	pi = 0;
 531	for_each_sg(qc->sg, sg, qc->n_elem, si) {
 532		u32 addr, sg_len, len;
 533
 534		/* determine if physical DMA addr spans 64K boundary.
 535		 * Note h/w doesn't support 64-bit, so we unconditionally
 536		 * truncate dma_addr_t to u32.
 537		 */
 538		addr = (u32) sg_dma_address(sg);
 539		sg_len = sg_dma_len(sg);
 540
 541		while (sg_len) {
 542			/* table overflow should never happen */
 543			BUG_ON (pi++ >= MAX_DCMDS);
 544
 545			len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
 546			table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
 547			table->req_count = cpu_to_le16(len);
 548			table->phy_addr = cpu_to_le32(addr);
 549			table->cmd_dep = 0;
 550			table->xfer_status = 0;
 551			table->res_count = 0;
 552			addr += len;
 553			sg_len -= len;
 554			++table;
 555		}
 556	}
 557
 558	/* Should never happen according to Tejun */
 559	BUG_ON(!pi);
 560
 561	/* Convert the last command to an input/output */
 562	table--;
 563	table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST);
 564	table++;
 565
 566	/* Add the stop command to the end of the list */
 567	memset(table, 0, sizeof(struct dbdma_cmd));
 568	table->command = cpu_to_le16(DBDMA_STOP);
 569
 570	dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
 
 
 571}
 572
 573
 574static void pata_macio_freeze(struct ata_port *ap)
 575{
 576	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 577
 578	if (dma_regs) {
 579		unsigned int timeout = 1000000;
 580
 581		/* Make sure DMA controller is stopped */
 582		writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
 583		while (--timeout && (readl(&dma_regs->status) & RUN))
 584			udelay(1);
 585	}
 586
 587	ata_sff_freeze(ap);
 588}
 589
 590
 591static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
 592{
 593	struct ata_port *ap = qc->ap;
 594	struct pata_macio_priv *priv = ap->private_data;
 595	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 596	int dev = qc->dev->devno;
 597
 598	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 599
 600	/* Make sure DMA commands updates are visible */
 601	writel(priv->dma_table_dma, &dma_regs->cmdptr);
 602
 603	/* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
 604	 * UDMA reads
 605	 */
 606	if (priv->kind == controller_kl_ata4 &&
 607	    (priv->treg[dev][0] & TR_66_UDMA_EN)) {
 608		void __iomem *rbase = ap->ioaddr.cmd_addr;
 609		u32 reg = priv->treg[dev][0];
 610
 611		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
 612			reg += 0x00800000;
 613		writel(reg, rbase + IDE_TIMING_CONFIG);
 614	}
 615
 616	/* issue r/w command */
 617	ap->ops->sff_exec_command(ap, &qc->tf);
 618}
 619
 620static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
 621{
 622	struct ata_port *ap = qc->ap;
 623	struct pata_macio_priv *priv = ap->private_data;
 624	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 625
 626	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 627
 628	writel((RUN << 16) | RUN, &dma_regs->control);
 629	/* Make sure it gets to the controller right now */
 630	(void)readl(&dma_regs->control);
 631}
 632
 633static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
 634{
 635	struct ata_port *ap = qc->ap;
 636	struct pata_macio_priv *priv = ap->private_data;
 637	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 638	unsigned int timeout = 1000000;
 639
 640	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 641
 642	/* Stop the DMA engine and wait for it to full halt */
 643	writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
 644	while (--timeout && (readl(&dma_regs->status) & RUN))
 645		udelay(1);
 646}
 647
 648static u8 pata_macio_bmdma_status(struct ata_port *ap)
 649{
 650	struct pata_macio_priv *priv = ap->private_data;
 651	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 652	u32 dstat, rstat = ATA_DMA_INTR;
 653	unsigned long timeout = 0;
 654
 655	dstat = readl(&dma_regs->status);
 656
 657	dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
 658
 659	/* We have two things to deal with here:
 660	 *
 661	 * - The dbdma won't stop if the command was started
 662	 * but completed with an error without transferring all
 663	 * datas. This happens when bad blocks are met during
 664	 * a multi-block transfer.
 665	 *
 666	 * - The dbdma fifo hasn't yet finished flushing to
 667	 * to system memory when the disk interrupt occurs.
 668	 *
 669	 */
 670
 671	/* First check for errors */
 672	if ((dstat & (RUN|DEAD)) != RUN)
 673		rstat |= ATA_DMA_ERR;
 674
 675	/* If ACTIVE is cleared, the STOP command has been hit and
 676	 * the transfer is complete. If not, we have to flush the
 677	 * channel.
 678	 */
 679	if ((dstat & ACTIVE) == 0)
 680		return rstat;
 681
 682	dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
 683
 684	/* If dbdma didn't execute the STOP command yet, the
 685	 * active bit is still set. We consider that we aren't
 686	 * sharing interrupts (which is hopefully the case with
 687	 * those controllers) and so we just try to flush the
 688	 * channel for pending data in the fifo
 689	 */
 690	udelay(1);
 691	writel((FLUSH << 16) | FLUSH, &dma_regs->control);
 692	for (;;) {
 693		udelay(1);
 694		dstat = readl(&dma_regs->status);
 695		if ((dstat & FLUSH) == 0)
 696			break;
 697		if (++timeout > 1000) {
 698			dev_warn(priv->dev, "timeout flushing DMA\n");
 699			rstat |= ATA_DMA_ERR;
 700			break;
 701		}
 702	}
 703	return rstat;
 704}
 705
 706/* port_start is when we allocate the DMA command list */
 707static int pata_macio_port_start(struct ata_port *ap)
 708{
 709	struct pata_macio_priv *priv = ap->private_data;
 710
 711	if (ap->ioaddr.bmdma_addr == NULL)
 712		return 0;
 713
 714	/* Allocate space for the DBDMA commands.
 715	 *
 716	 * The +2 is +1 for the stop command and +1 to allow for
 717	 * aligning the start address to a multiple of 16 bytes.
 718	 */
 719	priv->dma_table_cpu =
 720		dmam_alloc_coherent(priv->dev,
 721				    (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
 722				    &priv->dma_table_dma, GFP_KERNEL);
 723	if (priv->dma_table_cpu == NULL) {
 724		dev_err(priv->dev, "Unable to allocate DMA command list\n");
 725		ap->ioaddr.bmdma_addr = NULL;
 726		ap->mwdma_mask = 0;
 727		ap->udma_mask = 0;
 728	}
 729	return 0;
 730}
 731
 732static void pata_macio_irq_clear(struct ata_port *ap)
 733{
 734	struct pata_macio_priv *priv = ap->private_data;
 735
 736	/* Nothing to do here */
 737
 738	dev_dbgdma(priv->dev, "%s\n", __func__);
 739}
 740
 741static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
 742{
 743	dev_dbg(priv->dev, "Enabling & resetting... \n");
 744
 745	if (priv->mediabay)
 746		return;
 747
 748	if (priv->kind == controller_ohare && !resume) {
 749		/* The code below is having trouble on some ohare machines
 750		 * (timing related ?). Until I can put my hand on one of these
 751		 * units, I keep the old way
 752		 */
 753		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
 754	} else {
 755		int rc;
 756
 757 		/* Reset and enable controller */
 758		rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 759					 priv->node, priv->aapl_bus_id, 1);
 760		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
 761				    priv->node, priv->aapl_bus_id, 1);
 762		msleep(10);
 763		/* Only bother waiting if there's a reset control */
 764		if (rc == 0) {
 765			ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 766					    priv->node, priv->aapl_bus_id, 0);
 767			msleep(IDE_WAKEUP_DELAY_MS);
 768		}
 769	}
 770
 771	/* If resuming a PCI device, restore the config space here */
 772	if (priv->pdev && resume) {
 773		int rc;
 774
 775		pci_restore_state(priv->pdev);
 776		rc = pcim_enable_device(priv->pdev);
 777		if (rc)
 778			dev_err(&priv->pdev->dev,
 779				"Failed to enable device after resume (%d)\n",
 780				rc);
 781		else
 782			pci_set_master(priv->pdev);
 783	}
 784
 785	/* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
 786	 * seem necessary and speeds up the boot process
 787	 */
 788	if (priv->kauai_fcr)
 789		writel(KAUAI_FCR_UATA_MAGIC |
 790		       KAUAI_FCR_UATA_RESET_N |
 791		       KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
 792}
 793
 794/* Hook the standard slave config to fixup some HW related alignment
 795 * restrictions
 796 */
 797static int pata_macio_slave_config(struct scsi_device *sdev)
 798{
 799	struct ata_port *ap = ata_shost_to_port(sdev->host);
 800	struct pata_macio_priv *priv = ap->private_data;
 801	struct ata_device *dev;
 802	u16 cmd;
 803	int rc;
 804
 805	/* First call original */
 806	rc = ata_scsi_slave_config(sdev);
 807	if (rc)
 808		return rc;
 809
 810	/* This is lifted from sata_nv */
 811	dev = &ap->link.device[sdev->id];
 812
 813	/* OHare has issues with non cache aligned DMA on some chipsets */
 814	if (priv->kind == controller_ohare) {
 815		blk_queue_update_dma_alignment(sdev->request_queue, 31);
 816		blk_queue_update_dma_pad(sdev->request_queue, 31);
 817
 818		/* Tell the world about it */
 819		ata_dev_info(dev, "OHare alignment limits applied\n");
 820		return 0;
 821	}
 822
 823	/* We only have issues with ATAPI */
 824	if (dev->class != ATA_DEV_ATAPI)
 825		return 0;
 826
 827	/* Shasta and K2 seem to have "issues" with reads ... */
 828	if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
 829		/* Allright these are bad, apply restrictions */
 830		blk_queue_update_dma_alignment(sdev->request_queue, 15);
 831		blk_queue_update_dma_pad(sdev->request_queue, 15);
 832
 833		/* We enable MWI and hack cache line size directly here, this
 834		 * is specific to this chipset and not normal values, we happen
 835		 * to somewhat know what we are doing here (which is basically
 836		 * to do the same Apple does and pray they did not get it wrong :-)
 837		 */
 838		BUG_ON(!priv->pdev);
 839		pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
 840		pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
 841		pci_write_config_word(priv->pdev, PCI_COMMAND,
 842				      cmd | PCI_COMMAND_INVALIDATE);
 843
 844		/* Tell the world about it */
 845		ata_dev_info(dev, "K2/Shasta alignment limits applied\n");
 846	}
 847
 848	return 0;
 849}
 850
 851#ifdef CONFIG_PM_SLEEP
 852static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
 853{
 854	int rc;
 855
 856	/* First, core libata suspend to do most of the work */
 857	rc = ata_host_suspend(priv->host, mesg);
 858	if (rc)
 859		return rc;
 860
 861	/* Restore to default timings */
 862	pata_macio_default_timings(priv);
 863
 864	/* Mask interrupt. Not strictly necessary but old driver did
 865	 * it and I'd rather not change that here */
 866	disable_irq(priv->irq);
 867
 868	/* The media bay will handle itself just fine */
 869	if (priv->mediabay)
 870		return 0;
 871
 872	/* Kauai has bus control FCRs directly here */
 873	if (priv->kauai_fcr) {
 874		u32 fcr = readl(priv->kauai_fcr);
 875		fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
 876		writel(fcr, priv->kauai_fcr);
 877	}
 878
 879	/* For PCI, save state and disable DMA. No need to call
 880	 * pci_set_power_state(), the HW doesn't do D states that
 881	 * way, the platform code will take care of suspending the
 882	 * ASIC properly
 883	 */
 884	if (priv->pdev) {
 885		pci_save_state(priv->pdev);
 886		pci_disable_device(priv->pdev);
 887	}
 888
 889	/* Disable the bus on older machines and the cell on kauai */
 890	ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
 891			    priv->aapl_bus_id, 0);
 892
 893	return 0;
 894}
 895
 896static int pata_macio_do_resume(struct pata_macio_priv *priv)
 897{
 898	/* Reset and re-enable the HW */
 899	pata_macio_reset_hw(priv, 1);
 900
 901	/* Sanitize drive timings */
 902	pata_macio_apply_timings(priv->host->ports[0], 0);
 903
 904	/* We want our IRQ back ! */
 905	enable_irq(priv->irq);
 906
 907	/* Let the libata core take it from there */
 908	ata_host_resume(priv->host);
 909
 910	return 0;
 911}
 912#endif /* CONFIG_PM_SLEEP */
 913
 914static struct scsi_host_template pata_macio_sht = {
 915	ATA_BASE_SHT(DRV_NAME),
 916	.sg_tablesize		= MAX_DCMDS,
 917	/* We may not need that strict one */
 918	.dma_boundary		= ATA_DMA_BOUNDARY,
 919	/* Not sure what the real max is but we know it's less than 64K, let's
 920	 * use 64K minus 256
 921	 */
 922	.max_segment_size	= MAX_DBDMA_SEG,
 923	.slave_configure	= pata_macio_slave_config,
 
 
 
 924};
 925
 926static struct ata_port_operations pata_macio_ops = {
 927	.inherits		= &ata_bmdma_port_ops,
 928
 929	.freeze			= pata_macio_freeze,
 930	.set_piomode		= pata_macio_set_timings,
 931	.set_dmamode		= pata_macio_set_timings,
 932	.cable_detect		= pata_macio_cable_detect,
 933	.sff_dev_select		= pata_macio_dev_select,
 934	.qc_prep		= pata_macio_qc_prep,
 935	.bmdma_setup		= pata_macio_bmdma_setup,
 936	.bmdma_start		= pata_macio_bmdma_start,
 937	.bmdma_stop		= pata_macio_bmdma_stop,
 938	.bmdma_status		= pata_macio_bmdma_status,
 939	.port_start		= pata_macio_port_start,
 940	.sff_irq_clear		= pata_macio_irq_clear,
 941};
 942
 943static void pata_macio_invariants(struct pata_macio_priv *priv)
 944{
 945	const int *bidp;
 946
 947	/* Identify the type of controller */
 948	if (of_device_is_compatible(priv->node, "shasta-ata")) {
 949		priv->kind = controller_sh_ata6;
 950	        priv->timings = pata_macio_shasta_timings;
 951	} else if (of_device_is_compatible(priv->node, "kauai-ata")) {
 952		priv->kind = controller_un_ata6;
 953	        priv->timings = pata_macio_kauai_timings;
 954	} else if (of_device_is_compatible(priv->node, "K2-UATA")) {
 955		priv->kind = controller_k2_ata6;
 956	        priv->timings = pata_macio_kauai_timings;
 957	} else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
 958		if (of_node_name_eq(priv->node, "ata-4")) {
 959			priv->kind = controller_kl_ata4;
 960			priv->timings = pata_macio_kl66_timings;
 961		} else {
 962			priv->kind = controller_kl_ata3;
 963			priv->timings = pata_macio_kl33_timings;
 964		}
 965	} else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
 966		priv->kind = controller_heathrow;
 967		priv->timings = pata_macio_heathrow_timings;
 968	} else {
 969		priv->kind = controller_ohare;
 970		priv->timings = pata_macio_ohare_timings;
 971	}
 972
 973	/* XXX FIXME --- setup priv->mediabay here */
 974
 975	/* Get Apple bus ID (for clock and ASIC control) */
 976	bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
 977	priv->aapl_bus_id =  bidp ? *bidp : 0;
 978
 979	/* Fixup missing Apple bus ID in case of media-bay */
 980	if (priv->mediabay && bidp == 0)
 981		priv->aapl_bus_id = 1;
 982}
 983
 984static void pata_macio_setup_ios(struct ata_ioports *ioaddr,
 985				 void __iomem * base, void __iomem * dma)
 986{
 987	/* cmd_addr is the base of regs for that port */
 988	ioaddr->cmd_addr	= base;
 989
 990	/* taskfile registers */
 991	ioaddr->data_addr	= base + (ATA_REG_DATA    << 4);
 992	ioaddr->error_addr	= base + (ATA_REG_ERR     << 4);
 993	ioaddr->feature_addr	= base + (ATA_REG_FEATURE << 4);
 994	ioaddr->nsect_addr	= base + (ATA_REG_NSECT   << 4);
 995	ioaddr->lbal_addr	= base + (ATA_REG_LBAL    << 4);
 996	ioaddr->lbam_addr	= base + (ATA_REG_LBAM    << 4);
 997	ioaddr->lbah_addr	= base + (ATA_REG_LBAH    << 4);
 998	ioaddr->device_addr	= base + (ATA_REG_DEVICE  << 4);
 999	ioaddr->status_addr	= base + (ATA_REG_STATUS  << 4);
1000	ioaddr->command_addr	= base + (ATA_REG_CMD     << 4);
1001	ioaddr->altstatus_addr	= base + 0x160;
1002	ioaddr->ctl_addr	= base + 0x160;
1003	ioaddr->bmdma_addr	= dma;
1004}
1005
1006static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
1007					 struct ata_port_info *pinfo)
1008{
1009	int i = 0;
1010
1011	pinfo->pio_mask		= 0;
1012	pinfo->mwdma_mask	= 0;
1013	pinfo->udma_mask	= 0;
1014
1015	while (priv->timings[i].mode > 0) {
1016		unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
1017		switch(priv->timings[i].mode & 0xf0) {
1018		case 0x00: /* PIO */
1019			pinfo->pio_mask |= (mask >> 8);
1020			break;
1021		case 0x20: /* MWDMA */
1022			pinfo->mwdma_mask |= mask;
1023			break;
1024		case 0x40: /* UDMA */
1025			pinfo->udma_mask |= mask;
1026			break;
1027		}
1028		i++;
1029	}
1030	dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n",
1031		pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
1032}
1033
1034static int pata_macio_common_init(struct pata_macio_priv *priv,
1035				  resource_size_t tfregs,
1036				  resource_size_t dmaregs,
1037				  resource_size_t fcregs,
1038				  unsigned long irq)
1039{
1040	struct ata_port_info		pinfo;
1041	const struct ata_port_info	*ppi[] = { &pinfo, NULL };
1042	void __iomem			*dma_regs = NULL;
1043
1044	/* Fill up privates with various invariants collected from the
1045	 * device-tree
1046	 */
1047	pata_macio_invariants(priv);
1048
1049	/* Make sure we have sane initial timings in the cache */
1050	pata_macio_default_timings(priv);
1051
1052	/* Allocate libata host for 1 port */
1053	memset(&pinfo, 0, sizeof(struct ata_port_info));
1054	pmac_macio_calc_timing_masks(priv, &pinfo);
1055	pinfo.flags		= ATA_FLAG_SLAVE_POSS;
1056	pinfo.port_ops		= &pata_macio_ops;
1057	pinfo.private_data	= priv;
1058
1059	priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
1060	if (priv->host == NULL) {
1061		dev_err(priv->dev, "Failed to allocate ATA port structure\n");
1062		return -ENOMEM;
1063	}
1064
1065	/* Setup the private data in host too */
1066	priv->host->private_data = priv;
1067
1068	/* Map base registers */
1069	priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
1070	if (priv->tfregs == NULL) {
1071		dev_err(priv->dev, "Failed to map ATA ports\n");
1072		return -ENOMEM;
1073	}
1074	priv->host->iomap = &priv->tfregs;
1075
1076	/* Map DMA regs */
1077	if (dmaregs != 0) {
1078		dma_regs = devm_ioremap(priv->dev, dmaregs,
1079					sizeof(struct dbdma_regs));
1080		if (dma_regs == NULL)
1081			dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
1082	}
1083
1084	/* If chip has local feature control, map those regs too */
1085	if (fcregs != 0) {
1086		priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
1087		if (priv->kauai_fcr == NULL) {
1088			dev_err(priv->dev, "Failed to map ATA FCR register\n");
1089			return -ENOMEM;
1090		}
1091	}
1092
1093	/* Setup port data structure */
1094	pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
1095			     priv->tfregs, dma_regs);
1096	priv->host->ports[0]->private_data = priv;
1097
1098	/* hard-reset the controller */
1099	pata_macio_reset_hw(priv, 0);
1100	pata_macio_apply_timings(priv->host->ports[0], 0);
1101
1102	/* Enable bus master if necessary */
1103	if (priv->pdev && dma_regs)
1104		pci_set_master(priv->pdev);
1105
1106	dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
1107		 macio_ata_names[priv->kind], priv->aapl_bus_id);
1108
1109	/* Start it up */
1110	priv->irq = irq;
1111	return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
1112				 &pata_macio_sht);
1113}
1114
1115static int pata_macio_attach(struct macio_dev *mdev,
1116			     const struct of_device_id *match)
1117{
1118	struct pata_macio_priv	*priv;
1119	resource_size_t		tfregs, dmaregs = 0;
1120	unsigned long		irq;
1121	int			rc;
1122
1123	/* Check for broken device-trees */
1124	if (macio_resource_count(mdev) == 0) {
1125		dev_err(&mdev->ofdev.dev,
1126			"No addresses for controller\n");
1127		return -ENXIO;
1128	}
1129
1130	/* Enable managed resources */
1131	macio_enable_devres(mdev);
1132
1133	/* Allocate and init private data structure */
1134	priv = devm_kzalloc(&mdev->ofdev.dev,
1135			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1136	if (!priv)
1137		return -ENOMEM;
1138
1139	priv->node = of_node_get(mdev->ofdev.dev.of_node);
1140	priv->mdev = mdev;
1141	priv->dev = &mdev->ofdev.dev;
1142
1143	/* Request memory resource for taskfile registers */
1144	if (macio_request_resource(mdev, 0, "pata-macio")) {
1145		dev_err(&mdev->ofdev.dev,
1146			"Cannot obtain taskfile resource\n");
1147		return -EBUSY;
1148	}
1149	tfregs = macio_resource_start(mdev, 0);
1150
1151	/* Request resources for DMA registers if any */
1152	if (macio_resource_count(mdev) >= 2) {
1153		if (macio_request_resource(mdev, 1, "pata-macio-dma"))
1154			dev_err(&mdev->ofdev.dev,
1155				"Cannot obtain DMA resource\n");
1156		else
1157			dmaregs = macio_resource_start(mdev, 1);
1158	}
1159
1160	/*
1161	 * Fixup missing IRQ for some old implementations with broken
1162	 * device-trees.
1163	 *
1164	 * This is a bit bogus, it should be fixed in the device-tree itself,
1165	 * via the existing macio fixups, based on the type of interrupt
1166	 * controller in the machine. However, I have no test HW for this case,
1167	 * and this trick works well enough on those old machines...
1168	 */
1169	if (macio_irq_count(mdev) == 0) {
1170		dev_warn(&mdev->ofdev.dev,
1171			 "No interrupts for controller, using 13\n");
1172		irq = irq_create_mapping(NULL, 13);
1173	} else
1174		irq = macio_irq(mdev, 0);
1175
1176	/* Prevvent media bay callbacks until fully registered */
1177	lock_media_bay(priv->mdev->media_bay);
1178
1179	/* Get register addresses and call common initialization */
1180	rc = pata_macio_common_init(priv,
1181				    tfregs,		/* Taskfile regs */
1182				    dmaregs,		/* DBDMA regs */
1183				    0,			/* Feature control */
1184				    irq);
1185	unlock_media_bay(priv->mdev->media_bay);
1186
1187	return rc;
1188}
1189
1190static int pata_macio_detach(struct macio_dev *mdev)
1191{
1192	struct ata_host *host = macio_get_drvdata(mdev);
1193	struct pata_macio_priv *priv = host->private_data;
1194
1195	lock_media_bay(priv->mdev->media_bay);
1196
1197	/* Make sure the mediabay callback doesn't try to access
1198	 * dead stuff
1199	 */
1200	priv->host->private_data = NULL;
1201
1202	ata_host_detach(host);
1203
1204	unlock_media_bay(priv->mdev->media_bay);
1205
1206	return 0;
1207}
1208
1209#ifdef CONFIG_PM_SLEEP
1210static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1211{
1212	struct ata_host *host = macio_get_drvdata(mdev);
1213
1214	return pata_macio_do_suspend(host->private_data, mesg);
1215}
1216
1217static int pata_macio_resume(struct macio_dev *mdev)
1218{
1219	struct ata_host *host = macio_get_drvdata(mdev);
1220
1221	return pata_macio_do_resume(host->private_data);
1222}
1223#endif /* CONFIG_PM_SLEEP */
1224
1225#ifdef CONFIG_PMAC_MEDIABAY
1226static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
1227{
1228	struct ata_host *host = macio_get_drvdata(mdev);
1229	struct ata_port *ap;
1230	struct ata_eh_info *ehi;
1231	struct ata_device *dev;
1232	unsigned long flags;
1233
1234	if (!host || !host->private_data)
1235		return;
1236	ap = host->ports[0];
1237	spin_lock_irqsave(ap->lock, flags);
1238	ehi = &ap->link.eh_info;
1239	if (mb_state == MB_CD) {
1240		ata_ehi_push_desc(ehi, "mediabay plug");
1241		ata_ehi_hotplugged(ehi);
1242		ata_port_freeze(ap);
1243	} else {
1244		ata_ehi_push_desc(ehi, "mediabay unplug");
1245		ata_for_each_dev(dev, &ap->link, ALL)
1246			dev->flags |= ATA_DFLAG_DETACH;
1247		ata_port_abort(ap);
1248	}
1249	spin_unlock_irqrestore(ap->lock, flags);
1250
1251}
1252#endif /* CONFIG_PMAC_MEDIABAY */
1253
1254
1255static int pata_macio_pci_attach(struct pci_dev *pdev,
1256				 const struct pci_device_id *id)
1257{
1258	struct pata_macio_priv	*priv;
1259	struct device_node	*np;
1260	resource_size_t		rbase;
1261
1262	/* We cannot use a MacIO controller without its OF device node */
1263	np = pci_device_to_OF_node(pdev);
1264	if (np == NULL) {
1265		dev_err(&pdev->dev,
1266			"Cannot find OF device node for controller\n");
1267		return -ENODEV;
1268	}
1269
1270	/* Check that it can be enabled */
1271	if (pcim_enable_device(pdev)) {
1272		dev_err(&pdev->dev,
1273			"Cannot enable controller PCI device\n");
1274		return -ENXIO;
1275	}
1276
1277	/* Allocate and init private data structure */
1278	priv = devm_kzalloc(&pdev->dev,
1279			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1280	if (!priv)
1281		return -ENOMEM;
1282
1283	priv->node = of_node_get(np);
1284	priv->pdev = pdev;
1285	priv->dev = &pdev->dev;
1286
1287	/* Get MMIO regions */
1288	if (pci_request_regions(pdev, "pata-macio")) {
1289		dev_err(&pdev->dev,
1290			"Cannot obtain PCI resources\n");
1291		return -EBUSY;
1292	}
1293
1294	/* Get register addresses and call common initialization */
1295	rbase = pci_resource_start(pdev, 0);
1296	if (pata_macio_common_init(priv,
1297				   rbase + 0x2000,	/* Taskfile regs */
1298				   rbase + 0x1000,	/* DBDMA regs */
1299				   rbase,		/* Feature control */
1300				   pdev->irq))
1301		return -ENXIO;
1302
1303	return 0;
1304}
1305
1306static void pata_macio_pci_detach(struct pci_dev *pdev)
1307{
1308	struct ata_host *host = pci_get_drvdata(pdev);
1309
1310	ata_host_detach(host);
1311}
1312
1313#ifdef CONFIG_PM_SLEEP
1314static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1315{
1316	struct ata_host *host = pci_get_drvdata(pdev);
1317
1318	return pata_macio_do_suspend(host->private_data, mesg);
1319}
1320
1321static int pata_macio_pci_resume(struct pci_dev *pdev)
1322{
1323	struct ata_host *host = pci_get_drvdata(pdev);
1324
1325	return pata_macio_do_resume(host->private_data);
1326}
1327#endif /* CONFIG_PM_SLEEP */
1328
1329static const struct of_device_id pata_macio_match[] =
1330{
1331	{
1332	.name 		= "IDE",
1333	},
1334	{
1335	.name 		= "ATA",
1336	},
1337	{
1338	.type		= "ide",
1339	},
1340	{
1341	.type		= "ata",
1342	},
1343	{},
1344};
1345MODULE_DEVICE_TABLE(of, pata_macio_match);
1346
1347static struct macio_driver pata_macio_driver =
1348{
1349	.driver = {
1350		.name 		= "pata-macio",
1351		.owner		= THIS_MODULE,
1352		.of_match_table	= pata_macio_match,
1353	},
1354	.probe		= pata_macio_attach,
1355	.remove		= pata_macio_detach,
1356#ifdef CONFIG_PM_SLEEP
1357	.suspend	= pata_macio_suspend,
1358	.resume		= pata_macio_resume,
1359#endif
1360#ifdef CONFIG_PMAC_MEDIABAY
1361	.mediabay_event	= pata_macio_mb_event,
1362#endif
1363};
1364
1365static const struct pci_device_id pata_macio_pci_match[] = {
1366	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA),	0 },
1367	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100),	0 },
1368	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100),	0 },
1369	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA),	0 },
1370	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA),	0 },
1371	{},
1372};
1373
1374static struct pci_driver pata_macio_pci_driver = {
1375	.name		= "pata-pci-macio",
1376	.id_table	= pata_macio_pci_match,
1377	.probe		= pata_macio_pci_attach,
1378	.remove		= pata_macio_pci_detach,
1379#ifdef CONFIG_PM_SLEEP
1380	.suspend	= pata_macio_pci_suspend,
1381	.resume		= pata_macio_pci_resume,
1382#endif
1383	.driver = {
1384		.owner		= THIS_MODULE,
1385	},
1386};
1387MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
1388
1389
1390static int __init pata_macio_init(void)
1391{
1392	int rc;
1393
1394	if (!machine_is(powermac))
1395		return -ENODEV;
1396
1397	rc = pci_register_driver(&pata_macio_pci_driver);
1398	if (rc)
1399		return rc;
1400	rc = macio_register_driver(&pata_macio_driver);
1401	if (rc) {
1402		pci_unregister_driver(&pata_macio_pci_driver);
1403		return rc;
1404	}
1405	return 0;
1406}
1407
1408static void __exit pata_macio_exit(void)
1409{
1410	macio_unregister_driver(&pata_macio_driver);
1411	pci_unregister_driver(&pata_macio_pci_driver);
1412}
1413
1414module_init(pata_macio_init);
1415module_exit(pata_macio_exit);
1416
1417MODULE_AUTHOR("Benjamin Herrenschmidt");
1418MODULE_DESCRIPTION("Apple MacIO PATA driver");
1419MODULE_LICENSE("GPL");
1420MODULE_VERSION(DRV_VERSION);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Libata based driver for Apple "macio" family of PATA controllers
   4 *
   5 * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
   6 *                     <benh@kernel.crashing.org>
   7 *
   8 * Some bits and pieces from drivers/ide/ppc/pmac.c
   9 *
  10 */
  11
  12#undef DEBUG
  13#undef DEBUG_DMA
  14
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/blkdev.h>
  19#include <linux/ata.h>
  20#include <linux/libata.h>
  21#include <linux/adb.h>
  22#include <linux/pmu.h>
  23#include <linux/scatterlist.h>
  24#include <linux/of.h>
  25#include <linux/gfp.h>
  26#include <linux/pci.h>
  27
  28#include <scsi/scsi.h>
  29#include <scsi/scsi_host.h>
  30#include <scsi/scsi_device.h>
  31
  32#include <asm/macio.h>
  33#include <asm/io.h>
  34#include <asm/dbdma.h>
  35#include <asm/machdep.h>
  36#include <asm/pmac_feature.h>
  37#include <asm/mediabay.h>
  38
  39#ifdef DEBUG_DMA
  40#define dev_dbgdma(dev, format, arg...)		\
  41	dev_printk(KERN_DEBUG , dev , format , ## arg)
  42#else
  43#define dev_dbgdma(dev, format, arg...)		\
  44	({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
  45#endif
  46
  47#define DRV_NAME	"pata_macio"
  48#define DRV_VERSION	"0.9"
  49
  50/* Models of macio ATA controller */
  51enum {
  52	controller_ohare,	/* OHare based */
  53	controller_heathrow,	/* Heathrow/Paddington */
  54	controller_kl_ata3,	/* KeyLargo ATA-3 */
  55	controller_kl_ata4,	/* KeyLargo ATA-4 */
  56	controller_un_ata6,	/* UniNorth2 ATA-6 */
  57	controller_k2_ata6,	/* K2 ATA-6 */
  58	controller_sh_ata6,	/* Shasta ATA-6 */
  59};
  60
  61static const char* macio_ata_names[] = {
  62	"OHare ATA",		/* OHare based */
  63	"Heathrow ATA",		/* Heathrow/Paddington */
  64	"KeyLargo ATA-3",	/* KeyLargo ATA-3 (MDMA only) */
  65	"KeyLargo ATA-4",	/* KeyLargo ATA-4 (UDMA/66) */
  66	"UniNorth ATA-6",	/* UniNorth2 ATA-6 (UDMA/100) */
  67	"K2 ATA-6",		/* K2 ATA-6 (UDMA/100) */
  68	"Shasta ATA-6",		/* Shasta ATA-6 (UDMA/133) */
  69};
  70
  71/*
  72 * Extra registers, both 32-bit little-endian
  73 */
  74#define IDE_TIMING_CONFIG	0x200
  75#define IDE_INTERRUPT		0x300
  76
  77/* Kauai (U2) ATA has different register setup */
  78#define IDE_KAUAI_PIO_CONFIG	0x200
  79#define IDE_KAUAI_ULTRA_CONFIG	0x210
  80#define IDE_KAUAI_POLL_CONFIG	0x220
  81
  82/*
  83 * Timing configuration register definitions
  84 */
  85
  86/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
  87#define SYSCLK_TICKS(t)		(((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
  88#define SYSCLK_TICKS_66(t)	(((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
  89#define IDE_SYSCLK_NS		30	/* 33Mhz cell */
  90#define IDE_SYSCLK_66_NS	15	/* 66Mhz cell */
  91
  92/* 133Mhz cell, found in shasta.
  93 * See comments about 100 Mhz Uninorth 2...
  94 * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
  95 * weird and I don't now why .. at this stage
  96 */
  97#define TR_133_PIOREG_PIO_MASK		0xff000fff
  98#define TR_133_PIOREG_MDMA_MASK		0x00fff800
  99#define TR_133_UDMAREG_UDMA_MASK	0x0003ffff
 100#define TR_133_UDMAREG_UDMA_EN		0x00000001
 101
 102/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
 103 * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
 104 * controlled like gem or fw. It appears to be an evolution of keylargo
 105 * ATA4 with a timing register extended to 2x32bits registers (one
 106 * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
 107 * It has it's own local feature control register as well.
 108 *
 109 * After scratching my mind over the timing values, at least for PIO
 110 * and MDMA, I think I've figured the format of the timing register,
 111 * though I use pre-calculated tables for UDMA as usual...
 112 */
 113#define TR_100_PIO_ADDRSETUP_MASK	0xff000000 /* Size of field unknown */
 114#define TR_100_PIO_ADDRSETUP_SHIFT	24
 115#define TR_100_MDMA_MASK		0x00fff000
 116#define TR_100_MDMA_RECOVERY_MASK	0x00fc0000
 117#define TR_100_MDMA_RECOVERY_SHIFT	18
 118#define TR_100_MDMA_ACCESS_MASK		0x0003f000
 119#define TR_100_MDMA_ACCESS_SHIFT	12
 120#define TR_100_PIO_MASK			0xff000fff
 121#define TR_100_PIO_RECOVERY_MASK	0x00000fc0
 122#define TR_100_PIO_RECOVERY_SHIFT	6
 123#define TR_100_PIO_ACCESS_MASK		0x0000003f
 124#define TR_100_PIO_ACCESS_SHIFT		0
 125
 126#define TR_100_UDMAREG_UDMA_MASK	0x0000ffff
 127#define TR_100_UDMAREG_UDMA_EN		0x00000001
 128
 129
 130/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
 131 * 40 connector cable and to 4 on 80 connector one.
 132 * Clock unit is 15ns (66Mhz)
 133 *
 134 * 3 Values can be programmed:
 135 *  - Write data setup, which appears to match the cycle time. They
 136 *    also call it DIOW setup.
 137 *  - Ready to pause time (from spec)
 138 *  - Address setup. That one is weird. I don't see where exactly
 139 *    it fits in UDMA cycles, I got it's name from an obscure piece
 140 *    of commented out code in Darwin. They leave it to 0, we do as
 141 *    well, despite a comment that would lead to think it has a
 142 *    min value of 45ns.
 143 * Apple also add 60ns to the write data setup (or cycle time ?) on
 144 * reads.
 145 */
 146#define TR_66_UDMA_MASK			0xfff00000
 147#define TR_66_UDMA_EN			0x00100000 /* Enable Ultra mode for DMA */
 148#define TR_66_PIO_ADDRSETUP_MASK	0xe0000000 /* Address setup */
 149#define TR_66_PIO_ADDRSETUP_SHIFT	29
 150#define TR_66_UDMA_RDY2PAUS_MASK	0x1e000000 /* Ready 2 pause time */
 151#define TR_66_UDMA_RDY2PAUS_SHIFT	25
 152#define TR_66_UDMA_WRDATASETUP_MASK	0x01e00000 /* Write data setup time */
 153#define TR_66_UDMA_WRDATASETUP_SHIFT	21
 154#define TR_66_MDMA_MASK			0x000ffc00
 155#define TR_66_MDMA_RECOVERY_MASK	0x000f8000
 156#define TR_66_MDMA_RECOVERY_SHIFT	15
 157#define TR_66_MDMA_ACCESS_MASK		0x00007c00
 158#define TR_66_MDMA_ACCESS_SHIFT		10
 159#define TR_66_PIO_MASK			0xe00003ff
 160#define TR_66_PIO_RECOVERY_MASK		0x000003e0
 161#define TR_66_PIO_RECOVERY_SHIFT	5
 162#define TR_66_PIO_ACCESS_MASK		0x0000001f
 163#define TR_66_PIO_ACCESS_SHIFT		0
 164
 165/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
 166 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
 167 *
 168 * The access time and recovery time can be programmed. Some older
 169 * Darwin code base limit OHare to 150ns cycle time. I decided to do
 170 * the same here fore safety against broken old hardware ;)
 171 * The HalfTick bit, when set, adds half a clock (15ns) to the access
 172 * time and removes one from recovery. It's not supported on KeyLargo
 173 * implementation afaik. The E bit appears to be set for PIO mode 0 and
 174 * is used to reach long timings used in this mode.
 175 */
 176#define TR_33_MDMA_MASK			0x003ff800
 177#define TR_33_MDMA_RECOVERY_MASK	0x001f0000
 178#define TR_33_MDMA_RECOVERY_SHIFT	16
 179#define TR_33_MDMA_ACCESS_MASK		0x0000f800
 180#define TR_33_MDMA_ACCESS_SHIFT		11
 181#define TR_33_MDMA_HALFTICK		0x00200000
 182#define TR_33_PIO_MASK			0x000007ff
 183#define TR_33_PIO_E			0x00000400
 184#define TR_33_PIO_RECOVERY_MASK		0x000003e0
 185#define TR_33_PIO_RECOVERY_SHIFT	5
 186#define TR_33_PIO_ACCESS_MASK		0x0000001f
 187#define TR_33_PIO_ACCESS_SHIFT		0
 188
 189/*
 190 * Interrupt register definitions. Only present on newer cells
 191 * (Keylargo and later afaik) so we don't use it.
 192 */
 193#define IDE_INTR_DMA			0x80000000
 194#define IDE_INTR_DEVICE			0x40000000
 195
 196/*
 197 * FCR Register on Kauai. Not sure what bit 0x4 is  ...
 198 */
 199#define KAUAI_FCR_UATA_MAGIC		0x00000004
 200#define KAUAI_FCR_UATA_RESET_N		0x00000002
 201#define KAUAI_FCR_UATA_ENABLE		0x00000001
 202
 203
 204/* Allow up to 256 DBDMA commands per xfer */
 205#define MAX_DCMDS		256
 206
 207/* Don't let a DMA segment go all the way to 64K */
 208#define MAX_DBDMA_SEG		0xff00
 209
 210
 211/*
 212 * Wait 1s for disk to answer on IDE bus after a hard reset
 213 * of the device (via GPIO/FCR).
 214 *
 215 * Some devices seem to "pollute" the bus even after dropping
 216 * the BSY bit (typically some combo drives slave on the UDMA
 217 * bus) after a hard reset. Since we hard reset all drives on
 218 * KeyLargo ATA66, we have to keep that delay around. I may end
 219 * up not hard resetting anymore on these and keep the delay only
 220 * for older interfaces instead (we have to reset when coming
 221 * from MacOS...) --BenH.
 222 */
 223#define IDE_WAKEUP_DELAY_MS	1000
 224
 225struct pata_macio_timing;
 226
 227struct pata_macio_priv {
 228	int				kind;
 229	int				aapl_bus_id;
 230	int				mediabay : 1;
 231	struct device_node		*node;
 232	struct macio_dev		*mdev;
 233	struct pci_dev			*pdev;
 234	struct device			*dev;
 235	int				irq;
 236	u32				treg[2][2];
 237	void __iomem			*tfregs;
 238	void __iomem			*kauai_fcr;
 239	struct dbdma_cmd *		dma_table_cpu;
 240	dma_addr_t			dma_table_dma;
 241	struct ata_host			*host;
 242	const struct pata_macio_timing	*timings;
 243};
 244
 245/* Previous variants of this driver used to calculate timings
 246 * for various variants of the chip and use tables for others.
 247 *
 248 * Not only was this confusing, but in addition, it isn't clear
 249 * whether our calculation code was correct. It didn't entirely
 250 * match the darwin code and whatever documentation I could find
 251 * on these cells
 252 *
 253 * I decided to entirely rely on a table instead for this version
 254 * of the driver. Also, because I don't really care about derated
 255 * modes and really old HW other than making it work, I'm not going
 256 * to calculate / snoop timing values for something else than the
 257 * standard modes.
 258 */
 259struct pata_macio_timing {
 260	int	mode;
 261	u32	reg1;	/* Bits to set in first timing reg */
 262	u32	reg2;	/* Bits to set in second timing reg */
 263};
 264
 265static const struct pata_macio_timing pata_macio_ohare_timings[] = {
 266	{ XFER_PIO_0,		0x00000526,	0, },
 267	{ XFER_PIO_1,		0x00000085,	0, },
 268	{ XFER_PIO_2,		0x00000025,	0, },
 269	{ XFER_PIO_3,		0x00000025,	0, },
 270	{ XFER_PIO_4,		0x00000025,	0, },
 271	{ XFER_MW_DMA_0,	0x00074000,	0, },
 272	{ XFER_MW_DMA_1,	0x00221000,	0, },
 273	{ XFER_MW_DMA_2,	0x00211000,	0, },
 274	{ -1, 0, 0 }
 275};
 276
 277static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
 278	{ XFER_PIO_0,		0x00000526,	0, },
 279	{ XFER_PIO_1,		0x00000085,	0, },
 280	{ XFER_PIO_2,		0x00000025,	0, },
 281	{ XFER_PIO_3,		0x00000025,	0, },
 282	{ XFER_PIO_4,		0x00000025,	0, },
 283	{ XFER_MW_DMA_0,	0x00074000,	0, },
 284	{ XFER_MW_DMA_1,	0x00221000,	0, },
 285	{ XFER_MW_DMA_2,	0x00211000,	0, },
 286	{ -1, 0, 0 }
 287};
 288
 289static const struct pata_macio_timing pata_macio_kl33_timings[] = {
 290	{ XFER_PIO_0,		0x00000526,	0, },
 291	{ XFER_PIO_1,		0x00000085,	0, },
 292	{ XFER_PIO_2,		0x00000025,	0, },
 293	{ XFER_PIO_3,		0x00000025,	0, },
 294	{ XFER_PIO_4,		0x00000025,	0, },
 295	{ XFER_MW_DMA_0,	0x00084000,	0, },
 296	{ XFER_MW_DMA_1,	0x00021800,	0, },
 297	{ XFER_MW_DMA_2,	0x00011800,	0, },
 298	{ -1, 0, 0 }
 299};
 300
 301static const struct pata_macio_timing pata_macio_kl66_timings[] = {
 302	{ XFER_PIO_0,		0x0000038c,	0, },
 303	{ XFER_PIO_1,		0x0000020a,	0, },
 304	{ XFER_PIO_2,		0x00000127,	0, },
 305	{ XFER_PIO_3,		0x000000c6,	0, },
 306	{ XFER_PIO_4,		0x00000065,	0, },
 307	{ XFER_MW_DMA_0,	0x00084000,	0, },
 308	{ XFER_MW_DMA_1,	0x00029800,	0, },
 309	{ XFER_MW_DMA_2,	0x00019400,	0, },
 310	{ XFER_UDMA_0,		0x19100000,	0, },
 311	{ XFER_UDMA_1,		0x14d00000,	0, },
 312	{ XFER_UDMA_2,		0x10900000,	0, },
 313	{ XFER_UDMA_3,		0x0c700000,	0, },
 314	{ XFER_UDMA_4,		0x0c500000,	0, },
 315	{ -1, 0, 0 }
 316};
 317
 318static const struct pata_macio_timing pata_macio_kauai_timings[] = {
 319	{ XFER_PIO_0,		0x08000a92,	0, },
 320	{ XFER_PIO_1,		0x0800060f,	0, },
 321	{ XFER_PIO_2,		0x0800038b,	0, },
 322	{ XFER_PIO_3,		0x05000249,	0, },
 323	{ XFER_PIO_4,		0x04000148,	0, },
 324	{ XFER_MW_DMA_0,	0x00618000,	0, },
 325	{ XFER_MW_DMA_1,	0x00209000,	0, },
 326	{ XFER_MW_DMA_2,	0x00148000,	0, },
 327	{ XFER_UDMA_0,		         0,	0x000070c1, },
 328	{ XFER_UDMA_1,		         0,	0x00005d81, },
 329	{ XFER_UDMA_2,		         0,	0x00004a61, },
 330	{ XFER_UDMA_3,		         0,	0x00003a51, },
 331	{ XFER_UDMA_4,		         0,	0x00002a31, },
 332	{ XFER_UDMA_5,		         0,	0x00002921, },
 333	{ -1, 0, 0 }
 334};
 335
 336static const struct pata_macio_timing pata_macio_shasta_timings[] = {
 337	{ XFER_PIO_0,		0x0a000c97,	0, },
 338	{ XFER_PIO_1,		0x07000712,	0, },
 339	{ XFER_PIO_2,		0x040003cd,	0, },
 340	{ XFER_PIO_3,		0x0500028b,	0, },
 341	{ XFER_PIO_4,		0x0400010a,	0, },
 342	{ XFER_MW_DMA_0,	0x00820800,	0, },
 343	{ XFER_MW_DMA_1,	0x0028b000,	0, },
 344	{ XFER_MW_DMA_2,	0x001ca000,	0, },
 345	{ XFER_UDMA_0,		         0,	0x00035901, },
 346	{ XFER_UDMA_1,		         0,	0x000348b1, },
 347	{ XFER_UDMA_2,		         0,	0x00033881, },
 348	{ XFER_UDMA_3,		         0,	0x00033861, },
 349	{ XFER_UDMA_4,		         0,	0x00033841, },
 350	{ XFER_UDMA_5,		         0,	0x00033031, },
 351	{ XFER_UDMA_6,		         0,	0x00033021, },
 352	{ -1, 0, 0 }
 353};
 354
 355static const struct pata_macio_timing *pata_macio_find_timing(
 356					    struct pata_macio_priv *priv,
 357					    int mode)
 358{
 359	int i;
 360
 361	for (i = 0; priv->timings[i].mode > 0; i++) {
 362		if (priv->timings[i].mode == mode)
 363			return &priv->timings[i];
 364	}
 365	return NULL;
 366}
 367
 368
 369static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
 370{
 371	struct pata_macio_priv *priv = ap->private_data;
 372	void __iomem *rbase = ap->ioaddr.cmd_addr;
 373
 374	if (priv->kind == controller_sh_ata6 ||
 375	    priv->kind == controller_un_ata6 ||
 376	    priv->kind == controller_k2_ata6) {
 377		writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
 378		writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
 379	} else
 380		writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
 381}
 382
 383static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
 384{
 385	ata_sff_dev_select(ap, device);
 386
 387	/* Apply timings */
 388	pata_macio_apply_timings(ap, device);
 389}
 390
 391static void pata_macio_set_timings(struct ata_port *ap,
 392				   struct ata_device *adev)
 393{
 394	struct pata_macio_priv *priv = ap->private_data;
 395	const struct pata_macio_timing *t;
 396
 397	dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
 398		adev->devno,
 399		adev->pio_mode,
 400		ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
 401		adev->dma_mode,
 402		ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
 403
 404	/* First clear timings */
 405	priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
 406
 407	/* Now get the PIO timings */
 408	t = pata_macio_find_timing(priv, adev->pio_mode);
 409	if (t == NULL) {
 410		dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
 411			 adev->pio_mode);
 412		t = pata_macio_find_timing(priv, XFER_PIO_0);
 413	}
 414	BUG_ON(t == NULL);
 415
 416	/* PIO timings only ever use the first treg */
 417	priv->treg[adev->devno][0] |= t->reg1;
 418
 419	/* Now get DMA timings */
 420	t = pata_macio_find_timing(priv, adev->dma_mode);
 421	if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
 422		dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
 423		t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
 424	}
 425	BUG_ON(t == NULL);
 426
 427	/* DMA timings can use both tregs */
 428	priv->treg[adev->devno][0] |= t->reg1;
 429	priv->treg[adev->devno][1] |= t->reg2;
 430
 431	dev_dbg(priv->dev, " -> %08x %08x\n",
 432		priv->treg[adev->devno][0],
 433		priv->treg[adev->devno][1]);
 434
 435	/* Apply to hardware */
 436	pata_macio_apply_timings(ap, adev->devno);
 437}
 438
 439/*
 440 * Blast some well known "safe" values to the timing registers at init or
 441 * wakeup from sleep time, before we do real calculation
 442 */
 443static void pata_macio_default_timings(struct pata_macio_priv *priv)
 444{
 445	unsigned int value, value2 = 0;
 446
 447	switch(priv->kind) {
 448		case controller_sh_ata6:
 449			value = 0x0a820c97;
 450			value2 = 0x00033031;
 451			break;
 452		case controller_un_ata6:
 453		case controller_k2_ata6:
 454			value = 0x08618a92;
 455			value2 = 0x00002921;
 456			break;
 457		case controller_kl_ata4:
 458			value = 0x0008438c;
 459			break;
 460		case controller_kl_ata3:
 461			value = 0x00084526;
 462			break;
 463		case controller_heathrow:
 464		case controller_ohare:
 465		default:
 466			value = 0x00074526;
 467			break;
 468	}
 469	priv->treg[0][0] = priv->treg[1][0] = value;
 470	priv->treg[0][1] = priv->treg[1][1] = value2;
 471}
 472
 473static int pata_macio_cable_detect(struct ata_port *ap)
 474{
 475	struct pata_macio_priv *priv = ap->private_data;
 476
 477	/* Get cable type from device-tree */
 478	if (priv->kind == controller_kl_ata4 ||
 479	    priv->kind == controller_un_ata6 ||
 480	    priv->kind == controller_k2_ata6 ||
 481	    priv->kind == controller_sh_ata6) {
 482		const char* cable = of_get_property(priv->node, "cable-type",
 483						    NULL);
 484		struct device_node *root = of_find_node_by_path("/");
 485		const char *model = of_get_property(root, "model", NULL);
 486
 487		of_node_put(root);
 488
 489		if (cable && !strncmp(cable, "80-", 3)) {
 490			/* Some drives fail to detect 80c cable in PowerBook
 491			 * These machine use proprietary short IDE cable
 492			 * anyway
 493			 */
 494			if (!strncmp(model, "PowerBook", 9))
 495				return ATA_CBL_PATA40_SHORT;
 496			else
 497				return ATA_CBL_PATA80;
 498		}
 499	}
 500
 501	/* G5's seem to have incorrect cable type in device-tree.
 502	 * Let's assume they always have a 80 conductor cable, this seem to
 503	 * be always the case unless the user mucked around
 504	 */
 505	if (of_device_is_compatible(priv->node, "K2-UATA") ||
 506	    of_device_is_compatible(priv->node, "shasta-ata"))
 507		return ATA_CBL_PATA80;
 508
 509	/* Anything else is 40 connectors */
 510	return ATA_CBL_PATA40;
 511}
 512
 513static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
 514{
 515	unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
 516	struct ata_port *ap = qc->ap;
 517	struct pata_macio_priv *priv = ap->private_data;
 518	struct scatterlist *sg;
 519	struct dbdma_cmd *table;
 520	unsigned int si, pi;
 521
 522	dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
 523		   __func__, qc, qc->flags, write, qc->dev->devno);
 524
 525	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
 526		return AC_ERR_OK;
 527
 528	table = (struct dbdma_cmd *) priv->dma_table_cpu;
 529
 530	pi = 0;
 531	for_each_sg(qc->sg, sg, qc->n_elem, si) {
 532		u32 addr, sg_len, len;
 533
 534		/* determine if physical DMA addr spans 64K boundary.
 535		 * Note h/w doesn't support 64-bit, so we unconditionally
 536		 * truncate dma_addr_t to u32.
 537		 */
 538		addr = (u32) sg_dma_address(sg);
 539		sg_len = sg_dma_len(sg);
 540
 541		while (sg_len) {
 542			/* table overflow should never happen */
 543			BUG_ON (pi++ >= MAX_DCMDS);
 544
 545			len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
 546			table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
 547			table->req_count = cpu_to_le16(len);
 548			table->phy_addr = cpu_to_le32(addr);
 549			table->cmd_dep = 0;
 550			table->xfer_status = 0;
 551			table->res_count = 0;
 552			addr += len;
 553			sg_len -= len;
 554			++table;
 555		}
 556	}
 557
 558	/* Should never happen according to Tejun */
 559	BUG_ON(!pi);
 560
 561	/* Convert the last command to an input/output */
 562	table--;
 563	table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST);
 564	table++;
 565
 566	/* Add the stop command to the end of the list */
 567	memset(table, 0, sizeof(struct dbdma_cmd));
 568	table->command = cpu_to_le16(DBDMA_STOP);
 569
 570	dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
 571
 572	return AC_ERR_OK;
 573}
 574
 575
 576static void pata_macio_freeze(struct ata_port *ap)
 577{
 578	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 579
 580	if (dma_regs) {
 581		unsigned int timeout = 1000000;
 582
 583		/* Make sure DMA controller is stopped */
 584		writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
 585		while (--timeout && (readl(&dma_regs->status) & RUN))
 586			udelay(1);
 587	}
 588
 589	ata_sff_freeze(ap);
 590}
 591
 592
 593static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
 594{
 595	struct ata_port *ap = qc->ap;
 596	struct pata_macio_priv *priv = ap->private_data;
 597	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 598	int dev = qc->dev->devno;
 599
 600	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 601
 602	/* Make sure DMA commands updates are visible */
 603	writel(priv->dma_table_dma, &dma_regs->cmdptr);
 604
 605	/* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
 606	 * UDMA reads
 607	 */
 608	if (priv->kind == controller_kl_ata4 &&
 609	    (priv->treg[dev][0] & TR_66_UDMA_EN)) {
 610		void __iomem *rbase = ap->ioaddr.cmd_addr;
 611		u32 reg = priv->treg[dev][0];
 612
 613		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
 614			reg += 0x00800000;
 615		writel(reg, rbase + IDE_TIMING_CONFIG);
 616	}
 617
 618	/* issue r/w command */
 619	ap->ops->sff_exec_command(ap, &qc->tf);
 620}
 621
 622static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
 623{
 624	struct ata_port *ap = qc->ap;
 625	struct pata_macio_priv *priv = ap->private_data;
 626	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 627
 628	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 629
 630	writel((RUN << 16) | RUN, &dma_regs->control);
 631	/* Make sure it gets to the controller right now */
 632	(void)readl(&dma_regs->control);
 633}
 634
 635static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
 636{
 637	struct ata_port *ap = qc->ap;
 638	struct pata_macio_priv *priv = ap->private_data;
 639	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 640	unsigned int timeout = 1000000;
 641
 642	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
 643
 644	/* Stop the DMA engine and wait for it to full halt */
 645	writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
 646	while (--timeout && (readl(&dma_regs->status) & RUN))
 647		udelay(1);
 648}
 649
 650static u8 pata_macio_bmdma_status(struct ata_port *ap)
 651{
 652	struct pata_macio_priv *priv = ap->private_data;
 653	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
 654	u32 dstat, rstat = ATA_DMA_INTR;
 655	unsigned long timeout = 0;
 656
 657	dstat = readl(&dma_regs->status);
 658
 659	dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
 660
 661	/* We have two things to deal with here:
 662	 *
 663	 * - The dbdma won't stop if the command was started
 664	 * but completed with an error without transferring all
 665	 * datas. This happens when bad blocks are met during
 666	 * a multi-block transfer.
 667	 *
 668	 * - The dbdma fifo hasn't yet finished flushing to
 669	 * system memory when the disk interrupt occurs.
 
 670	 */
 671
 672	/* First check for errors */
 673	if ((dstat & (RUN|DEAD)) != RUN)
 674		rstat |= ATA_DMA_ERR;
 675
 676	/* If ACTIVE is cleared, the STOP command has been hit and
 677	 * the transfer is complete. If not, we have to flush the
 678	 * channel.
 679	 */
 680	if ((dstat & ACTIVE) == 0)
 681		return rstat;
 682
 683	dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
 684
 685	/* If dbdma didn't execute the STOP command yet, the
 686	 * active bit is still set. We consider that we aren't
 687	 * sharing interrupts (which is hopefully the case with
 688	 * those controllers) and so we just try to flush the
 689	 * channel for pending data in the fifo
 690	 */
 691	udelay(1);
 692	writel((FLUSH << 16) | FLUSH, &dma_regs->control);
 693	for (;;) {
 694		udelay(1);
 695		dstat = readl(&dma_regs->status);
 696		if ((dstat & FLUSH) == 0)
 697			break;
 698		if (++timeout > 1000) {
 699			dev_warn(priv->dev, "timeout flushing DMA\n");
 700			rstat |= ATA_DMA_ERR;
 701			break;
 702		}
 703	}
 704	return rstat;
 705}
 706
 707/* port_start is when we allocate the DMA command list */
 708static int pata_macio_port_start(struct ata_port *ap)
 709{
 710	struct pata_macio_priv *priv = ap->private_data;
 711
 712	if (ap->ioaddr.bmdma_addr == NULL)
 713		return 0;
 714
 715	/* Allocate space for the DBDMA commands.
 716	 *
 717	 * The +2 is +1 for the stop command and +1 to allow for
 718	 * aligning the start address to a multiple of 16 bytes.
 719	 */
 720	priv->dma_table_cpu =
 721		dmam_alloc_coherent(priv->dev,
 722				    (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
 723				    &priv->dma_table_dma, GFP_KERNEL);
 724	if (priv->dma_table_cpu == NULL) {
 725		dev_err(priv->dev, "Unable to allocate DMA command list\n");
 726		ap->ioaddr.bmdma_addr = NULL;
 727		ap->mwdma_mask = 0;
 728		ap->udma_mask = 0;
 729	}
 730	return 0;
 731}
 732
 733static void pata_macio_irq_clear(struct ata_port *ap)
 734{
 735	struct pata_macio_priv *priv = ap->private_data;
 736
 737	/* Nothing to do here */
 738
 739	dev_dbgdma(priv->dev, "%s\n", __func__);
 740}
 741
 742static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
 743{
 744	dev_dbg(priv->dev, "Enabling & resetting... \n");
 745
 746	if (priv->mediabay)
 747		return;
 748
 749	if (priv->kind == controller_ohare && !resume) {
 750		/* The code below is having trouble on some ohare machines
 751		 * (timing related ?). Until I can put my hand on one of these
 752		 * units, I keep the old way
 753		 */
 754		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
 755	} else {
 756		int rc;
 757
 758 		/* Reset and enable controller */
 759		rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 760					 priv->node, priv->aapl_bus_id, 1);
 761		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
 762				    priv->node, priv->aapl_bus_id, 1);
 763		msleep(10);
 764		/* Only bother waiting if there's a reset control */
 765		if (rc == 0) {
 766			ppc_md.feature_call(PMAC_FTR_IDE_RESET,
 767					    priv->node, priv->aapl_bus_id, 0);
 768			msleep(IDE_WAKEUP_DELAY_MS);
 769		}
 770	}
 771
 772	/* If resuming a PCI device, restore the config space here */
 773	if (priv->pdev && resume) {
 774		int rc;
 775
 776		pci_restore_state(priv->pdev);
 777		rc = pcim_enable_device(priv->pdev);
 778		if (rc)
 779			dev_err(&priv->pdev->dev,
 780				"Failed to enable device after resume (%d)\n",
 781				rc);
 782		else
 783			pci_set_master(priv->pdev);
 784	}
 785
 786	/* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
 787	 * seem necessary and speeds up the boot process
 788	 */
 789	if (priv->kauai_fcr)
 790		writel(KAUAI_FCR_UATA_MAGIC |
 791		       KAUAI_FCR_UATA_RESET_N |
 792		       KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
 793}
 794
 795/* Hook the standard slave config to fixup some HW related alignment
 796 * restrictions
 797 */
 798static int pata_macio_slave_config(struct scsi_device *sdev)
 799{
 800	struct ata_port *ap = ata_shost_to_port(sdev->host);
 801	struct pata_macio_priv *priv = ap->private_data;
 802	struct ata_device *dev;
 803	u16 cmd;
 804	int rc;
 805
 806	/* First call original */
 807	rc = ata_scsi_slave_config(sdev);
 808	if (rc)
 809		return rc;
 810
 811	/* This is lifted from sata_nv */
 812	dev = &ap->link.device[sdev->id];
 813
 814	/* OHare has issues with non cache aligned DMA on some chipsets */
 815	if (priv->kind == controller_ohare) {
 816		blk_queue_update_dma_alignment(sdev->request_queue, 31);
 817		blk_queue_update_dma_pad(sdev->request_queue, 31);
 818
 819		/* Tell the world about it */
 820		ata_dev_info(dev, "OHare alignment limits applied\n");
 821		return 0;
 822	}
 823
 824	/* We only have issues with ATAPI */
 825	if (dev->class != ATA_DEV_ATAPI)
 826		return 0;
 827
 828	/* Shasta and K2 seem to have "issues" with reads ... */
 829	if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
 830		/* Allright these are bad, apply restrictions */
 831		blk_queue_update_dma_alignment(sdev->request_queue, 15);
 832		blk_queue_update_dma_pad(sdev->request_queue, 15);
 833
 834		/* We enable MWI and hack cache line size directly here, this
 835		 * is specific to this chipset and not normal values, we happen
 836		 * to somewhat know what we are doing here (which is basically
 837		 * to do the same Apple does and pray they did not get it wrong :-)
 838		 */
 839		BUG_ON(!priv->pdev);
 840		pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
 841		pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
 842		pci_write_config_word(priv->pdev, PCI_COMMAND,
 843				      cmd | PCI_COMMAND_INVALIDATE);
 844
 845		/* Tell the world about it */
 846		ata_dev_info(dev, "K2/Shasta alignment limits applied\n");
 847	}
 848
 849	return 0;
 850}
 851
 852#ifdef CONFIG_PM_SLEEP
 853static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
 854{
 
 
 855	/* First, core libata suspend to do most of the work */
 856	ata_host_suspend(priv->host, mesg);
 
 
 857
 858	/* Restore to default timings */
 859	pata_macio_default_timings(priv);
 860
 861	/* Mask interrupt. Not strictly necessary but old driver did
 862	 * it and I'd rather not change that here */
 863	disable_irq(priv->irq);
 864
 865	/* The media bay will handle itself just fine */
 866	if (priv->mediabay)
 867		return 0;
 868
 869	/* Kauai has bus control FCRs directly here */
 870	if (priv->kauai_fcr) {
 871		u32 fcr = readl(priv->kauai_fcr);
 872		fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
 873		writel(fcr, priv->kauai_fcr);
 874	}
 875
 876	/* For PCI, save state and disable DMA. No need to call
 877	 * pci_set_power_state(), the HW doesn't do D states that
 878	 * way, the platform code will take care of suspending the
 879	 * ASIC properly
 880	 */
 881	if (priv->pdev) {
 882		pci_save_state(priv->pdev);
 883		pci_disable_device(priv->pdev);
 884	}
 885
 886	/* Disable the bus on older machines and the cell on kauai */
 887	ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
 888			    priv->aapl_bus_id, 0);
 889
 890	return 0;
 891}
 892
 893static int pata_macio_do_resume(struct pata_macio_priv *priv)
 894{
 895	/* Reset and re-enable the HW */
 896	pata_macio_reset_hw(priv, 1);
 897
 898	/* Sanitize drive timings */
 899	pata_macio_apply_timings(priv->host->ports[0], 0);
 900
 901	/* We want our IRQ back ! */
 902	enable_irq(priv->irq);
 903
 904	/* Let the libata core take it from there */
 905	ata_host_resume(priv->host);
 906
 907	return 0;
 908}
 909#endif /* CONFIG_PM_SLEEP */
 910
 911static struct scsi_host_template pata_macio_sht = {
 912	__ATA_BASE_SHT(DRV_NAME),
 913	.sg_tablesize		= MAX_DCMDS,
 914	/* We may not need that strict one */
 915	.dma_boundary		= ATA_DMA_BOUNDARY,
 916	/* Not sure what the real max is but we know it's less than 64K, let's
 917	 * use 64K minus 256
 918	 */
 919	.max_segment_size	= MAX_DBDMA_SEG,
 920	.slave_configure	= pata_macio_slave_config,
 921	.sdev_groups		= ata_common_sdev_groups,
 922	.can_queue		= ATA_DEF_QUEUE,
 923	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
 924};
 925
 926static struct ata_port_operations pata_macio_ops = {
 927	.inherits		= &ata_bmdma_port_ops,
 928
 929	.freeze			= pata_macio_freeze,
 930	.set_piomode		= pata_macio_set_timings,
 931	.set_dmamode		= pata_macio_set_timings,
 932	.cable_detect		= pata_macio_cable_detect,
 933	.sff_dev_select		= pata_macio_dev_select,
 934	.qc_prep		= pata_macio_qc_prep,
 935	.bmdma_setup		= pata_macio_bmdma_setup,
 936	.bmdma_start		= pata_macio_bmdma_start,
 937	.bmdma_stop		= pata_macio_bmdma_stop,
 938	.bmdma_status		= pata_macio_bmdma_status,
 939	.port_start		= pata_macio_port_start,
 940	.sff_irq_clear		= pata_macio_irq_clear,
 941};
 942
 943static void pata_macio_invariants(struct pata_macio_priv *priv)
 944{
 945	const int *bidp;
 946
 947	/* Identify the type of controller */
 948	if (of_device_is_compatible(priv->node, "shasta-ata")) {
 949		priv->kind = controller_sh_ata6;
 950	        priv->timings = pata_macio_shasta_timings;
 951	} else if (of_device_is_compatible(priv->node, "kauai-ata")) {
 952		priv->kind = controller_un_ata6;
 953	        priv->timings = pata_macio_kauai_timings;
 954	} else if (of_device_is_compatible(priv->node, "K2-UATA")) {
 955		priv->kind = controller_k2_ata6;
 956	        priv->timings = pata_macio_kauai_timings;
 957	} else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
 958		if (of_node_name_eq(priv->node, "ata-4")) {
 959			priv->kind = controller_kl_ata4;
 960			priv->timings = pata_macio_kl66_timings;
 961		} else {
 962			priv->kind = controller_kl_ata3;
 963			priv->timings = pata_macio_kl33_timings;
 964		}
 965	} else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
 966		priv->kind = controller_heathrow;
 967		priv->timings = pata_macio_heathrow_timings;
 968	} else {
 969		priv->kind = controller_ohare;
 970		priv->timings = pata_macio_ohare_timings;
 971	}
 972
 973	/* XXX FIXME --- setup priv->mediabay here */
 974
 975	/* Get Apple bus ID (for clock and ASIC control) */
 976	bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
 977	priv->aapl_bus_id =  bidp ? *bidp : 0;
 978
 979	/* Fixup missing Apple bus ID in case of media-bay */
 980	if (priv->mediabay && !bidp)
 981		priv->aapl_bus_id = 1;
 982}
 983
 984static void pata_macio_setup_ios(struct ata_ioports *ioaddr,
 985				 void __iomem * base, void __iomem * dma)
 986{
 987	/* cmd_addr is the base of regs for that port */
 988	ioaddr->cmd_addr	= base;
 989
 990	/* taskfile registers */
 991	ioaddr->data_addr	= base + (ATA_REG_DATA    << 4);
 992	ioaddr->error_addr	= base + (ATA_REG_ERR     << 4);
 993	ioaddr->feature_addr	= base + (ATA_REG_FEATURE << 4);
 994	ioaddr->nsect_addr	= base + (ATA_REG_NSECT   << 4);
 995	ioaddr->lbal_addr	= base + (ATA_REG_LBAL    << 4);
 996	ioaddr->lbam_addr	= base + (ATA_REG_LBAM    << 4);
 997	ioaddr->lbah_addr	= base + (ATA_REG_LBAH    << 4);
 998	ioaddr->device_addr	= base + (ATA_REG_DEVICE  << 4);
 999	ioaddr->status_addr	= base + (ATA_REG_STATUS  << 4);
1000	ioaddr->command_addr	= base + (ATA_REG_CMD     << 4);
1001	ioaddr->altstatus_addr	= base + 0x160;
1002	ioaddr->ctl_addr	= base + 0x160;
1003	ioaddr->bmdma_addr	= dma;
1004}
1005
1006static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
1007					 struct ata_port_info *pinfo)
1008{
1009	int i = 0;
1010
1011	pinfo->pio_mask		= 0;
1012	pinfo->mwdma_mask	= 0;
1013	pinfo->udma_mask	= 0;
1014
1015	while (priv->timings[i].mode > 0) {
1016		unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
1017		switch(priv->timings[i].mode & 0xf0) {
1018		case 0x00: /* PIO */
1019			pinfo->pio_mask |= (mask >> 8);
1020			break;
1021		case 0x20: /* MWDMA */
1022			pinfo->mwdma_mask |= mask;
1023			break;
1024		case 0x40: /* UDMA */
1025			pinfo->udma_mask |= mask;
1026			break;
1027		}
1028		i++;
1029	}
1030	dev_dbg(priv->dev, "Supported masks: PIO=%x, MWDMA=%x, UDMA=%x\n",
1031		pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
1032}
1033
1034static int pata_macio_common_init(struct pata_macio_priv *priv,
1035				  resource_size_t tfregs,
1036				  resource_size_t dmaregs,
1037				  resource_size_t fcregs,
1038				  unsigned long irq)
1039{
1040	struct ata_port_info		pinfo;
1041	const struct ata_port_info	*ppi[] = { &pinfo, NULL };
1042	void __iomem			*dma_regs = NULL;
1043
1044	/* Fill up privates with various invariants collected from the
1045	 * device-tree
1046	 */
1047	pata_macio_invariants(priv);
1048
1049	/* Make sure we have sane initial timings in the cache */
1050	pata_macio_default_timings(priv);
1051
1052	/* Allocate libata host for 1 port */
1053	memset(&pinfo, 0, sizeof(struct ata_port_info));
1054	pmac_macio_calc_timing_masks(priv, &pinfo);
1055	pinfo.flags		= ATA_FLAG_SLAVE_POSS;
1056	pinfo.port_ops		= &pata_macio_ops;
1057	pinfo.private_data	= priv;
1058
1059	priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
1060	if (priv->host == NULL) {
1061		dev_err(priv->dev, "Failed to allocate ATA port structure\n");
1062		return -ENOMEM;
1063	}
1064
1065	/* Setup the private data in host too */
1066	priv->host->private_data = priv;
1067
1068	/* Map base registers */
1069	priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
1070	if (priv->tfregs == NULL) {
1071		dev_err(priv->dev, "Failed to map ATA ports\n");
1072		return -ENOMEM;
1073	}
1074	priv->host->iomap = &priv->tfregs;
1075
1076	/* Map DMA regs */
1077	if (dmaregs != 0) {
1078		dma_regs = devm_ioremap(priv->dev, dmaregs,
1079					sizeof(struct dbdma_regs));
1080		if (dma_regs == NULL)
1081			dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
1082	}
1083
1084	/* If chip has local feature control, map those regs too */
1085	if (fcregs != 0) {
1086		priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
1087		if (priv->kauai_fcr == NULL) {
1088			dev_err(priv->dev, "Failed to map ATA FCR register\n");
1089			return -ENOMEM;
1090		}
1091	}
1092
1093	/* Setup port data structure */
1094	pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
1095			     priv->tfregs, dma_regs);
1096	priv->host->ports[0]->private_data = priv;
1097
1098	/* hard-reset the controller */
1099	pata_macio_reset_hw(priv, 0);
1100	pata_macio_apply_timings(priv->host->ports[0], 0);
1101
1102	/* Enable bus master if necessary */
1103	if (priv->pdev && dma_regs)
1104		pci_set_master(priv->pdev);
1105
1106	dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
1107		 macio_ata_names[priv->kind], priv->aapl_bus_id);
1108
1109	/* Start it up */
1110	priv->irq = irq;
1111	return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
1112				 &pata_macio_sht);
1113}
1114
1115static int pata_macio_attach(struct macio_dev *mdev,
1116			     const struct of_device_id *match)
1117{
1118	struct pata_macio_priv	*priv;
1119	resource_size_t		tfregs, dmaregs = 0;
1120	unsigned long		irq;
1121	int			rc;
1122
1123	/* Check for broken device-trees */
1124	if (macio_resource_count(mdev) == 0) {
1125		dev_err(&mdev->ofdev.dev,
1126			"No addresses for controller\n");
1127		return -ENXIO;
1128	}
1129
1130	/* Enable managed resources */
1131	macio_enable_devres(mdev);
1132
1133	/* Allocate and init private data structure */
1134	priv = devm_kzalloc(&mdev->ofdev.dev,
1135			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1136	if (!priv)
1137		return -ENOMEM;
1138
1139	priv->node = of_node_get(mdev->ofdev.dev.of_node);
1140	priv->mdev = mdev;
1141	priv->dev = &mdev->ofdev.dev;
1142
1143	/* Request memory resource for taskfile registers */
1144	if (macio_request_resource(mdev, 0, "pata-macio")) {
1145		dev_err(&mdev->ofdev.dev,
1146			"Cannot obtain taskfile resource\n");
1147		return -EBUSY;
1148	}
1149	tfregs = macio_resource_start(mdev, 0);
1150
1151	/* Request resources for DMA registers if any */
1152	if (macio_resource_count(mdev) >= 2) {
1153		if (macio_request_resource(mdev, 1, "pata-macio-dma"))
1154			dev_err(&mdev->ofdev.dev,
1155				"Cannot obtain DMA resource\n");
1156		else
1157			dmaregs = macio_resource_start(mdev, 1);
1158	}
1159
1160	/*
1161	 * Fixup missing IRQ for some old implementations with broken
1162	 * device-trees.
1163	 *
1164	 * This is a bit bogus, it should be fixed in the device-tree itself,
1165	 * via the existing macio fixups, based on the type of interrupt
1166	 * controller in the machine. However, I have no test HW for this case,
1167	 * and this trick works well enough on those old machines...
1168	 */
1169	if (macio_irq_count(mdev) == 0) {
1170		dev_warn(&mdev->ofdev.dev,
1171			 "No interrupts for controller, using 13\n");
1172		irq = irq_create_mapping(NULL, 13);
1173	} else
1174		irq = macio_irq(mdev, 0);
1175
1176	/* Prevvent media bay callbacks until fully registered */
1177	lock_media_bay(priv->mdev->media_bay);
1178
1179	/* Get register addresses and call common initialization */
1180	rc = pata_macio_common_init(priv,
1181				    tfregs,		/* Taskfile regs */
1182				    dmaregs,		/* DBDMA regs */
1183				    0,			/* Feature control */
1184				    irq);
1185	unlock_media_bay(priv->mdev->media_bay);
1186
1187	return rc;
1188}
1189
1190static int pata_macio_detach(struct macio_dev *mdev)
1191{
1192	struct ata_host *host = macio_get_drvdata(mdev);
1193	struct pata_macio_priv *priv = host->private_data;
1194
1195	lock_media_bay(priv->mdev->media_bay);
1196
1197	/* Make sure the mediabay callback doesn't try to access
1198	 * dead stuff
1199	 */
1200	priv->host->private_data = NULL;
1201
1202	ata_host_detach(host);
1203
1204	unlock_media_bay(priv->mdev->media_bay);
1205
1206	return 0;
1207}
1208
1209#ifdef CONFIG_PM_SLEEP
1210static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1211{
1212	struct ata_host *host = macio_get_drvdata(mdev);
1213
1214	return pata_macio_do_suspend(host->private_data, mesg);
1215}
1216
1217static int pata_macio_resume(struct macio_dev *mdev)
1218{
1219	struct ata_host *host = macio_get_drvdata(mdev);
1220
1221	return pata_macio_do_resume(host->private_data);
1222}
1223#endif /* CONFIG_PM_SLEEP */
1224
1225#ifdef CONFIG_PMAC_MEDIABAY
1226static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
1227{
1228	struct ata_host *host = macio_get_drvdata(mdev);
1229	struct ata_port *ap;
1230	struct ata_eh_info *ehi;
1231	struct ata_device *dev;
1232	unsigned long flags;
1233
1234	if (!host || !host->private_data)
1235		return;
1236	ap = host->ports[0];
1237	spin_lock_irqsave(ap->lock, flags);
1238	ehi = &ap->link.eh_info;
1239	if (mb_state == MB_CD) {
1240		ata_ehi_push_desc(ehi, "mediabay plug");
1241		ata_ehi_hotplugged(ehi);
1242		ata_port_freeze(ap);
1243	} else {
1244		ata_ehi_push_desc(ehi, "mediabay unplug");
1245		ata_for_each_dev(dev, &ap->link, ALL)
1246			dev->flags |= ATA_DFLAG_DETACH;
1247		ata_port_abort(ap);
1248	}
1249	spin_unlock_irqrestore(ap->lock, flags);
1250
1251}
1252#endif /* CONFIG_PMAC_MEDIABAY */
1253
1254
1255static int pata_macio_pci_attach(struct pci_dev *pdev,
1256				 const struct pci_device_id *id)
1257{
1258	struct pata_macio_priv	*priv;
1259	struct device_node	*np;
1260	resource_size_t		rbase;
1261
1262	/* We cannot use a MacIO controller without its OF device node */
1263	np = pci_device_to_OF_node(pdev);
1264	if (np == NULL) {
1265		dev_err(&pdev->dev,
1266			"Cannot find OF device node for controller\n");
1267		return -ENODEV;
1268	}
1269
1270	/* Check that it can be enabled */
1271	if (pcim_enable_device(pdev)) {
1272		dev_err(&pdev->dev,
1273			"Cannot enable controller PCI device\n");
1274		return -ENXIO;
1275	}
1276
1277	/* Allocate and init private data structure */
1278	priv = devm_kzalloc(&pdev->dev,
1279			    sizeof(struct pata_macio_priv), GFP_KERNEL);
1280	if (!priv)
1281		return -ENOMEM;
1282
1283	priv->node = of_node_get(np);
1284	priv->pdev = pdev;
1285	priv->dev = &pdev->dev;
1286
1287	/* Get MMIO regions */
1288	if (pci_request_regions(pdev, "pata-macio")) {
1289		dev_err(&pdev->dev,
1290			"Cannot obtain PCI resources\n");
1291		return -EBUSY;
1292	}
1293
1294	/* Get register addresses and call common initialization */
1295	rbase = pci_resource_start(pdev, 0);
1296	if (pata_macio_common_init(priv,
1297				   rbase + 0x2000,	/* Taskfile regs */
1298				   rbase + 0x1000,	/* DBDMA regs */
1299				   rbase,		/* Feature control */
1300				   pdev->irq))
1301		return -ENXIO;
1302
1303	return 0;
1304}
1305
1306static void pata_macio_pci_detach(struct pci_dev *pdev)
1307{
1308	struct ata_host *host = pci_get_drvdata(pdev);
1309
1310	ata_host_detach(host);
1311}
1312
1313#ifdef CONFIG_PM_SLEEP
1314static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1315{
1316	struct ata_host *host = pci_get_drvdata(pdev);
1317
1318	return pata_macio_do_suspend(host->private_data, mesg);
1319}
1320
1321static int pata_macio_pci_resume(struct pci_dev *pdev)
1322{
1323	struct ata_host *host = pci_get_drvdata(pdev);
1324
1325	return pata_macio_do_resume(host->private_data);
1326}
1327#endif /* CONFIG_PM_SLEEP */
1328
1329static const struct of_device_id pata_macio_match[] =
1330{
1331	{ .name = "IDE", },
1332	{ .name = "ATA", },
1333	{ .type = "ide", },
1334	{ .type = "ata", },
1335	{ /* sentinel */ }
 
 
 
 
 
 
 
 
1336};
1337MODULE_DEVICE_TABLE(of, pata_macio_match);
1338
1339static struct macio_driver pata_macio_driver =
1340{
1341	.driver = {
1342		.name 		= "pata-macio",
1343		.owner		= THIS_MODULE,
1344		.of_match_table	= pata_macio_match,
1345	},
1346	.probe		= pata_macio_attach,
1347	.remove		= pata_macio_detach,
1348#ifdef CONFIG_PM_SLEEP
1349	.suspend	= pata_macio_suspend,
1350	.resume		= pata_macio_resume,
1351#endif
1352#ifdef CONFIG_PMAC_MEDIABAY
1353	.mediabay_event	= pata_macio_mb_event,
1354#endif
1355};
1356
1357static const struct pci_device_id pata_macio_pci_match[] = {
1358	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA),	0 },
1359	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100),	0 },
1360	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100),	0 },
1361	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA),	0 },
1362	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA),	0 },
1363	{},
1364};
1365
1366static struct pci_driver pata_macio_pci_driver = {
1367	.name		= "pata-pci-macio",
1368	.id_table	= pata_macio_pci_match,
1369	.probe		= pata_macio_pci_attach,
1370	.remove		= pata_macio_pci_detach,
1371#ifdef CONFIG_PM_SLEEP
1372	.suspend	= pata_macio_pci_suspend,
1373	.resume		= pata_macio_pci_resume,
1374#endif
1375	.driver = {
1376		.owner		= THIS_MODULE,
1377	},
1378};
1379MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
1380
1381
1382static int __init pata_macio_init(void)
1383{
1384	int rc;
1385
1386	if (!machine_is(powermac))
1387		return -ENODEV;
1388
1389	rc = pci_register_driver(&pata_macio_pci_driver);
1390	if (rc)
1391		return rc;
1392	rc = macio_register_driver(&pata_macio_driver);
1393	if (rc) {
1394		pci_unregister_driver(&pata_macio_pci_driver);
1395		return rc;
1396	}
1397	return 0;
1398}
1399
1400static void __exit pata_macio_exit(void)
1401{
1402	macio_unregister_driver(&pata_macio_driver);
1403	pci_unregister_driver(&pata_macio_pci_driver);
1404}
1405
1406module_init(pata_macio_init);
1407module_exit(pata_macio_exit);
1408
1409MODULE_AUTHOR("Benjamin Herrenschmidt");
1410MODULE_DESCRIPTION("Apple MacIO PATA driver");
1411MODULE_LICENSE("GPL");
1412MODULE_VERSION(DRV_VERSION);