Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * sata_mv.c - Marvell SATA support
   4 *
   5 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
   6 * Copyright 2005: EMC Corporation, all rights reserved.
   7 * Copyright 2005 Red Hat, Inc.  All rights reserved.
   8 *
   9 * Originally written by Brett Russ.
  10 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
  11 *
  12 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  13 */
  14
  15/*
  16 * sata_mv TODO list:
  17 *
  18 * --> Develop a low-power-consumption strategy, and implement it.
  19 *
  20 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
  21 *
  22 * --> [Experiment, Marvell value added] Is it possible to use target
  23 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
  24 *       creating LibATA target mode support would be very interesting.
  25 *
  26 *       Target mode, for those without docs, is the ability to directly
  27 *       connect two SATA ports.
  28 */
  29
  30/*
  31 * 80x1-B2 errata PCI#11:
  32 *
  33 * Users of the 6041/6081 Rev.B2 chips (current is C0)
  34 * should be careful to insert those cards only onto PCI-X bus #0,
  35 * and only in device slots 0..7, not higher.  The chips may not
  36 * work correctly otherwise  (note: this is a pretty rare condition).
  37 */
  38
  39#include <linux/kernel.h>
  40#include <linux/module.h>
  41#include <linux/pci.h>
  42#include <linux/init.h>
  43#include <linux/blkdev.h>
  44#include <linux/delay.h>
  45#include <linux/interrupt.h>
  46#include <linux/dmapool.h>
  47#include <linux/dma-mapping.h>
  48#include <linux/device.h>
  49#include <linux/clk.h>
  50#include <linux/phy/phy.h>
  51#include <linux/platform_device.h>
  52#include <linux/ata_platform.h>
  53#include <linux/mbus.h>
  54#include <linux/bitops.h>
  55#include <linux/gfp.h>
  56#include <linux/of.h>
  57#include <linux/of_irq.h>
  58#include <scsi/scsi_host.h>
  59#include <scsi/scsi_cmnd.h>
  60#include <scsi/scsi_device.h>
  61#include <linux/libata.h>
  62
  63#define DRV_NAME	"sata_mv"
  64#define DRV_VERSION	"1.28"
  65
  66/*
  67 * module options
  68 */
  69
  70#ifdef CONFIG_PCI
  71static int msi;
 
  72module_param(msi, int, S_IRUGO);
  73MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  74#endif
  75
  76static int irq_coalescing_io_count;
  77module_param(irq_coalescing_io_count, int, S_IRUGO);
  78MODULE_PARM_DESC(irq_coalescing_io_count,
  79		 "IRQ coalescing I/O count threshold (0..255)");
  80
  81static int irq_coalescing_usecs;
  82module_param(irq_coalescing_usecs, int, S_IRUGO);
  83MODULE_PARM_DESC(irq_coalescing_usecs,
  84		 "IRQ coalescing time threshold in usecs");
  85
  86enum {
  87	/* BAR's are enumerated in terms of pci_resource_start() terms */
  88	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
  89	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
  90	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
  91
  92	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
  93	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
  94
  95	/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
  96	COAL_CLOCKS_PER_USEC	= 150,		/* for calculating COAL_TIMEs */
  97	MAX_COAL_TIME_THRESHOLD	= ((1 << 24) - 1), /* internal clocks count */
  98	MAX_COAL_IO_COUNT	= 255,		/* completed I/O count */
  99
 100	MV_PCI_REG_BASE		= 0,
 101
 102	/*
 103	 * Per-chip ("all ports") interrupt coalescing feature.
 104	 * This is only for GEN_II / GEN_IIE hardware.
 105	 *
 106	 * Coalescing defers the interrupt until either the IO_THRESHOLD
 107	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 108	 */
 109	COAL_REG_BASE		= 0x18000,
 110	IRQ_COAL_CAUSE		= (COAL_REG_BASE + 0x08),
 111	ALL_PORTS_COAL_IRQ	= (1 << 4),	/* all ports irq event */
 112
 113	IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
 114	IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
 115
 116	/*
 117	 * Registers for the (unused here) transaction coalescing feature:
 118	 */
 119	TRAN_COAL_CAUSE_LO	= (COAL_REG_BASE + 0x88),
 120	TRAN_COAL_CAUSE_HI	= (COAL_REG_BASE + 0x8c),
 121
 122	SATAHC0_REG_BASE	= 0x20000,
 123	FLASH_CTL		= 0x1046c,
 124	GPIO_PORT_CTL		= 0x104f0,
 125	RESET_CFG		= 0x180d8,
 126
 127	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
 128	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
 129	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
 130	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
 131
 132	MV_MAX_Q_DEPTH		= 32,
 133	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
 134
 135	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
 136	 * CRPB needs alignment on a 256B boundary. Size == 256B
 137	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
 138	 */
 139	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
 140	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
 141	MV_MAX_SG_CT		= 256,
 142	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
 143
 144	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
 145	MV_PORT_HC_SHIFT	= 2,
 146	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
 147	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
 148	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
 149
 150	/* Host Flags */
 151	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
 152
 153	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
 154
 155	MV_GEN_I_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
 156
 157	MV_GEN_II_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NCQ |
 158				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
 159
 160	MV_GEN_IIE_FLAGS	= MV_GEN_II_FLAGS | ATA_FLAG_AN,
 161
 162	CRQB_FLAG_READ		= (1 << 0),
 163	CRQB_TAG_SHIFT		= 1,
 164	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
 165	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
 166	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
 167	CRQB_CMD_ADDR_SHIFT	= 8,
 168	CRQB_CMD_CS		= (0x2 << 11),
 169	CRQB_CMD_LAST		= (1 << 15),
 170
 171	CRPB_FLAG_STATUS_SHIFT	= 8,
 172	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
 173	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
 174
 175	EPRD_FLAG_END_OF_TBL	= (1 << 31),
 176
 177	/* PCI interface registers */
 178
 179	MV_PCI_COMMAND		= 0xc00,
 180	MV_PCI_COMMAND_MWRCOM	= (1 << 4),	/* PCI Master Write Combining */
 181	MV_PCI_COMMAND_MRDTRIG	= (1 << 7),	/* PCI Master Read Trigger */
 182
 183	PCI_MAIN_CMD_STS	= 0xd30,
 184	STOP_PCI_MASTER		= (1 << 2),
 185	PCI_MASTER_EMPTY	= (1 << 3),
 186	GLOB_SFT_RST		= (1 << 4),
 187
 188	MV_PCI_MODE		= 0xd00,
 189	MV_PCI_MODE_MASK	= 0x30,
 190
 191	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
 192	MV_PCI_DISC_TIMER	= 0xd04,
 193	MV_PCI_MSI_TRIGGER	= 0xc38,
 194	MV_PCI_SERR_MASK	= 0xc28,
 195	MV_PCI_XBAR_TMOUT	= 0x1d04,
 196	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
 197	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
 198	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
 199	MV_PCI_ERR_COMMAND	= 0x1d50,
 200
 201	PCI_IRQ_CAUSE		= 0x1d58,
 202	PCI_IRQ_MASK		= 0x1d5c,
 203	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
 204
 205	PCIE_IRQ_CAUSE		= 0x1900,
 206	PCIE_IRQ_MASK		= 0x1910,
 207	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
 208
 209	/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
 210	PCI_HC_MAIN_IRQ_CAUSE	= 0x1d60,
 211	PCI_HC_MAIN_IRQ_MASK	= 0x1d64,
 212	SOC_HC_MAIN_IRQ_CAUSE	= 0x20020,
 213	SOC_HC_MAIN_IRQ_MASK	= 0x20024,
 214	ERR_IRQ			= (1 << 0),	/* shift by (2 * port #) */
 215	DONE_IRQ		= (1 << 1),	/* shift by (2 * port #) */
 216	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
 217	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
 218	DONE_IRQ_0_3		= 0x000000aa,	/* DONE_IRQ ports 0,1,2,3 */
 219	DONE_IRQ_4_7		= (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
 220	PCI_ERR			= (1 << 18),
 221	TRAN_COAL_LO_DONE	= (1 << 19),	/* transaction coalescing */
 222	TRAN_COAL_HI_DONE	= (1 << 20),	/* transaction coalescing */
 223	PORTS_0_3_COAL_DONE	= (1 << 8),	/* HC0 IRQ coalescing */
 224	PORTS_4_7_COAL_DONE	= (1 << 17),	/* HC1 IRQ coalescing */
 225	ALL_PORTS_COAL_DONE	= (1 << 21),	/* GEN_II(E) IRQ coalescing */
 226	GPIO_INT		= (1 << 22),
 227	SELF_INT		= (1 << 23),
 228	TWSI_INT		= (1 << 24),
 229	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
 230	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
 231	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
 232
 233	/* SATAHC registers */
 234	HC_CFG			= 0x00,
 235
 236	HC_IRQ_CAUSE		= 0x14,
 237	DMA_IRQ			= (1 << 0),	/* shift by port # */
 238	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
 239	DEV_IRQ			= (1 << 8),	/* shift by port # */
 240
 241	/*
 242	 * Per-HC (Host-Controller) interrupt coalescing feature.
 243	 * This is present on all chip generations.
 244	 *
 245	 * Coalescing defers the interrupt until either the IO_THRESHOLD
 246	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 247	 */
 248	HC_IRQ_COAL_IO_THRESHOLD	= 0x000c,
 249	HC_IRQ_COAL_TIME_THRESHOLD	= 0x0010,
 250
 251	SOC_LED_CTRL		= 0x2c,
 252	SOC_LED_CTRL_BLINK	= (1 << 0),	/* Active LED blink */
 253	SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),	/* Multiplex dev presence */
 254						/*  with dev activity LED */
 255
 256	/* Shadow block registers */
 257	SHD_BLK			= 0x100,
 258	SHD_CTL_AST		= 0x20,		/* ofs from SHD_BLK */
 259
 260	/* SATA registers */
 261	SATA_STATUS		= 0x300,  /* ctrl, err regs follow status */
 262	SATA_ACTIVE		= 0x350,
 263	FIS_IRQ_CAUSE		= 0x364,
 264	FIS_IRQ_CAUSE_AN	= (1 << 9),	/* async notification */
 265
 266	LTMODE			= 0x30c,	/* requires read-after-write */
 267	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
 268
 269	PHY_MODE2		= 0x330,
 270	PHY_MODE3		= 0x310,
 271
 272	PHY_MODE4		= 0x314,	/* requires read-after-write */
 273	PHY_MODE4_CFG_MASK	= 0x00000003,	/* phy internal config field */
 274	PHY_MODE4_CFG_VALUE	= 0x00000001,	/* phy internal config field */
 275	PHY_MODE4_RSVD_ZEROS	= 0x5de3fffa,	/* Gen2e always write zeros */
 276	PHY_MODE4_RSVD_ONES	= 0x00000005,	/* Gen2e always write ones */
 277
 278	SATA_IFCTL		= 0x344,
 279	SATA_TESTCTL		= 0x348,
 280	SATA_IFSTAT		= 0x34c,
 281	VENDOR_UNIQUE_FIS	= 0x35c,
 282
 283	FISCFG			= 0x360,
 284	FISCFG_WAIT_DEV_ERR	= (1 << 8),	/* wait for host on DevErr */
 285	FISCFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
 286
 287	PHY_MODE9_GEN2		= 0x398,
 288	PHY_MODE9_GEN1		= 0x39c,
 289	PHYCFG_OFS		= 0x3a0,	/* only in 65n devices */
 290
 291	MV5_PHY_MODE		= 0x74,
 292	MV5_LTMODE		= 0x30,
 293	MV5_PHY_CTL		= 0x0C,
 294	SATA_IFCFG		= 0x050,
 295	LP_PHY_CTL		= 0x058,
 296	LP_PHY_CTL_PIN_PU_PLL   = (1 << 0),
 297	LP_PHY_CTL_PIN_PU_RX    = (1 << 1),
 298	LP_PHY_CTL_PIN_PU_TX    = (1 << 2),
 299	LP_PHY_CTL_GEN_TX_3G    = (1 << 5),
 300	LP_PHY_CTL_GEN_RX_3G    = (1 << 9),
 301
 302	MV_M2_PREAMP_MASK	= 0x7e0,
 303
 304	/* Port registers */
 305	EDMA_CFG		= 0,
 306	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
 307	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
 308	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
 309	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
 310	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
 311	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
 312	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
 313
 314	EDMA_ERR_IRQ_CAUSE	= 0x8,
 315	EDMA_ERR_IRQ_MASK	= 0xc,
 316	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
 317	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
 318	EDMA_ERR_DEV		= (1 << 2),	/* device error */
 319	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
 320	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
 321	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
 322	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
 323	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
 324	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
 325	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
 326	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
 327	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
 328	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
 329	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
 330
 331	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
 332	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
 333	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
 334	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
 335	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
 336
 337	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
 338
 339	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
 340	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
 341	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
 342	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
 343	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
 344	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
 345
 346	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
 347
 348	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
 349	EDMA_ERR_OVERRUN_5	= (1 << 5),
 350	EDMA_ERR_UNDERRUN_5	= (1 << 6),
 351
 352	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
 353				  EDMA_ERR_LNK_CTRL_RX_1 |
 354				  EDMA_ERR_LNK_CTRL_RX_3 |
 355				  EDMA_ERR_LNK_CTRL_TX,
 356
 357	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
 358				  EDMA_ERR_PRD_PAR |
 359				  EDMA_ERR_DEV_DCON |
 360				  EDMA_ERR_DEV_CON |
 361				  EDMA_ERR_SERR |
 362				  EDMA_ERR_SELF_DIS |
 363				  EDMA_ERR_CRQB_PAR |
 364				  EDMA_ERR_CRPB_PAR |
 365				  EDMA_ERR_INTRL_PAR |
 366				  EDMA_ERR_IORDY |
 367				  EDMA_ERR_LNK_CTRL_RX_2 |
 368				  EDMA_ERR_LNK_DATA_RX |
 369				  EDMA_ERR_LNK_DATA_TX |
 370				  EDMA_ERR_TRANS_PROTO,
 371
 372	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
 373				  EDMA_ERR_PRD_PAR |
 374				  EDMA_ERR_DEV_DCON |
 375				  EDMA_ERR_DEV_CON |
 376				  EDMA_ERR_OVERRUN_5 |
 377				  EDMA_ERR_UNDERRUN_5 |
 378				  EDMA_ERR_SELF_DIS_5 |
 379				  EDMA_ERR_CRQB_PAR |
 380				  EDMA_ERR_CRPB_PAR |
 381				  EDMA_ERR_INTRL_PAR |
 382				  EDMA_ERR_IORDY,
 383
 384	EDMA_REQ_Q_BASE_HI	= 0x10,
 385	EDMA_REQ_Q_IN_PTR	= 0x14,		/* also contains BASE_LO */
 386
 387	EDMA_REQ_Q_OUT_PTR	= 0x18,
 388	EDMA_REQ_Q_PTR_SHIFT	= 5,
 389
 390	EDMA_RSP_Q_BASE_HI	= 0x1c,
 391	EDMA_RSP_Q_IN_PTR	= 0x20,
 392	EDMA_RSP_Q_OUT_PTR	= 0x24,		/* also contains BASE_LO */
 393	EDMA_RSP_Q_PTR_SHIFT	= 3,
 394
 395	EDMA_CMD		= 0x28,		/* EDMA command register */
 396	EDMA_EN			= (1 << 0),	/* enable EDMA */
 397	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
 398	EDMA_RESET		= (1 << 2),	/* reset eng/trans/link/phy */
 399
 400	EDMA_STATUS		= 0x30,		/* EDMA engine status */
 401	EDMA_STATUS_CACHE_EMPTY	= (1 << 6),	/* GenIIe command cache empty */
 402	EDMA_STATUS_IDLE	= (1 << 7),	/* GenIIe EDMA enabled/idle */
 403
 404	EDMA_IORDY_TMOUT	= 0x34,
 405	EDMA_ARB_CFG		= 0x38,
 406
 407	EDMA_HALTCOND		= 0x60,		/* GenIIe halt conditions */
 408	EDMA_UNKNOWN_RSVD	= 0x6C,		/* GenIIe unknown/reserved */
 409
 410	BMDMA_CMD		= 0x224,	/* bmdma command register */
 411	BMDMA_STATUS		= 0x228,	/* bmdma status register */
 412	BMDMA_PRD_LOW		= 0x22c,	/* bmdma PRD addr 31:0 */
 413	BMDMA_PRD_HIGH		= 0x230,	/* bmdma PRD addr 63:32 */
 414
 415	/* Host private flags (hp_flags) */
 416	MV_HP_FLAG_MSI		= (1 << 0),
 417	MV_HP_ERRATA_50XXB0	= (1 << 1),
 418	MV_HP_ERRATA_50XXB2	= (1 << 2),
 419	MV_HP_ERRATA_60X1B2	= (1 << 3),
 420	MV_HP_ERRATA_60X1C0	= (1 << 4),
 421	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
 422	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
 423	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
 424	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
 425	MV_HP_CUT_THROUGH	= (1 << 10),	/* can use EDMA cut-through */
 426	MV_HP_FLAG_SOC		= (1 << 11),	/* SystemOnChip, no PCI */
 427	MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),	/* is led blinking enabled? */
 428	MV_HP_FIX_LP_PHY_CTL	= (1 << 13),	/* fix speed in LP_PHY_CTL ? */
 429
 430	/* Port private flags (pp_flags) */
 431	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
 432	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
 433	MV_PP_FLAG_FBS_EN	= (1 << 2),	/* is EDMA set up for FBS? */
 434	MV_PP_FLAG_DELAYED_EH	= (1 << 3),	/* delayed dev err handling */
 435	MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),	/* ignore initial ATA_DRDY */
 436};
 437
 438#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
 439#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
 440#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
 441#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
 442#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
 443
 444#define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
 445#define WINDOW_BASE(i)		(0x20034 + ((i) << 4))
 446
 447enum {
 448	/* DMA boundary 0xffff is required by the s/g splitting
 449	 * we need on /length/ in mv_fill-sg().
 450	 */
 451	MV_DMA_BOUNDARY		= 0xffffU,
 452
 453	/* mask of register bits containing lower 32 bits
 454	 * of EDMA request queue DMA address
 455	 */
 456	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
 457
 458	/* ditto, for response queue */
 459	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
 460};
 461
 462enum chip_type {
 463	chip_504x,
 464	chip_508x,
 465	chip_5080,
 466	chip_604x,
 467	chip_608x,
 468	chip_6042,
 469	chip_7042,
 470	chip_soc,
 471};
 472
 473/* Command ReQuest Block: 32B */
 474struct mv_crqb {
 475	__le32			sg_addr;
 476	__le32			sg_addr_hi;
 477	__le16			ctrl_flags;
 478	__le16			ata_cmd[11];
 479};
 480
 481struct mv_crqb_iie {
 482	__le32			addr;
 483	__le32			addr_hi;
 484	__le32			flags;
 485	__le32			len;
 486	__le32			ata_cmd[4];
 487};
 488
 489/* Command ResPonse Block: 8B */
 490struct mv_crpb {
 491	__le16			id;
 492	__le16			flags;
 493	__le32			tmstmp;
 494};
 495
 496/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
 497struct mv_sg {
 498	__le32			addr;
 499	__le32			flags_size;
 500	__le32			addr_hi;
 501	__le32			reserved;
 502};
 503
 504/*
 505 * We keep a local cache of a few frequently accessed port
 506 * registers here, to avoid having to read them (very slow)
 507 * when switching between EDMA and non-EDMA modes.
 508 */
 509struct mv_cached_regs {
 510	u32			fiscfg;
 511	u32			ltmode;
 512	u32			haltcond;
 513	u32			unknown_rsvd;
 514};
 515
 516struct mv_port_priv {
 517	struct mv_crqb		*crqb;
 518	dma_addr_t		crqb_dma;
 519	struct mv_crpb		*crpb;
 520	dma_addr_t		crpb_dma;
 521	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
 522	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
 523
 524	unsigned int		req_idx;
 525	unsigned int		resp_idx;
 526
 527	u32			pp_flags;
 528	struct mv_cached_regs	cached;
 529	unsigned int		delayed_eh_pmp_map;
 530};
 531
 532struct mv_port_signal {
 533	u32			amps;
 534	u32			pre;
 535};
 536
 537struct mv_host_priv {
 538	u32			hp_flags;
 539	unsigned int 		board_idx;
 540	u32			main_irq_mask;
 541	struct mv_port_signal	signal[8];
 542	const struct mv_hw_ops	*ops;
 543	int			n_ports;
 544	void __iomem		*base;
 545	void __iomem		*main_irq_cause_addr;
 546	void __iomem		*main_irq_mask_addr;
 547	u32			irq_cause_offset;
 548	u32			irq_mask_offset;
 549	u32			unmask_all_irqs;
 550
 551	/*
 552	 * Needed on some devices that require their clocks to be enabled.
 553	 * These are optional: if the platform device does not have any
 554	 * clocks, they won't be used.  Also, if the underlying hardware
 555	 * does not support the common clock framework (CONFIG_HAVE_CLK=n),
 556	 * all the clock operations become no-ops (see clk.h).
 557	 */
 558	struct clk		*clk;
 559	struct clk              **port_clks;
 560	/*
 561	 * Some devices have a SATA PHY which can be enabled/disabled
 562	 * in order to save power. These are optional: if the platform
 563	 * devices does not have any phy, they won't be used.
 564	 */
 565	struct phy		**port_phys;
 566	/*
 567	 * These consistent DMA memory pools give us guaranteed
 568	 * alignment for hardware-accessed data structures,
 569	 * and less memory waste in accomplishing the alignment.
 570	 */
 571	struct dma_pool		*crqb_pool;
 572	struct dma_pool		*crpb_pool;
 573	struct dma_pool		*sg_tbl_pool;
 574};
 575
 576struct mv_hw_ops {
 577	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
 578			   unsigned int port);
 579	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
 580	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
 581			   void __iomem *mmio);
 582	int (*reset_hc)(struct ata_host *host, void __iomem *mmio,
 583			unsigned int n_hc);
 584	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
 585	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
 586};
 587
 588static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 589static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 590static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 591static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 592static int mv_port_start(struct ata_port *ap);
 593static void mv_port_stop(struct ata_port *ap);
 594static int mv_qc_defer(struct ata_queued_cmd *qc);
 595static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
 596static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
 597static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
 598static int mv_hardreset(struct ata_link *link, unsigned int *class,
 599			unsigned long deadline);
 600static void mv_eh_freeze(struct ata_port *ap);
 601static void mv_eh_thaw(struct ata_port *ap);
 602static void mv6_dev_config(struct ata_device *dev);
 603
 604static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 605			   unsigned int port);
 606static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 607static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
 608			   void __iomem *mmio);
 609static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
 610			unsigned int n_hc);
 611static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 612static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
 613
 614static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 615			   unsigned int port);
 616static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 617static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
 618			   void __iomem *mmio);
 619static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
 620			unsigned int n_hc);
 621static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 622static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
 623				      void __iomem *mmio);
 624static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
 625				      void __iomem *mmio);
 626static int mv_soc_reset_hc(struct ata_host *host,
 627				  void __iomem *mmio, unsigned int n_hc);
 628static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
 629				      void __iomem *mmio);
 630static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
 631static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
 632				  void __iomem *mmio, unsigned int port);
 633static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
 634static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
 635			     unsigned int port_no);
 636static int mv_stop_edma(struct ata_port *ap);
 637static int mv_stop_edma_engine(void __iomem *port_mmio);
 638static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
 639
 640static void mv_pmp_select(struct ata_port *ap, int pmp);
 641static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
 642				unsigned long deadline);
 643static int  mv_softreset(struct ata_link *link, unsigned int *class,
 644				unsigned long deadline);
 645static void mv_pmp_error_handler(struct ata_port *ap);
 646static void mv_process_crpb_entries(struct ata_port *ap,
 647					struct mv_port_priv *pp);
 648
 649static void mv_sff_irq_clear(struct ata_port *ap);
 650static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
 651static void mv_bmdma_setup(struct ata_queued_cmd *qc);
 652static void mv_bmdma_start(struct ata_queued_cmd *qc);
 653static void mv_bmdma_stop(struct ata_queued_cmd *qc);
 654static u8   mv_bmdma_status(struct ata_port *ap);
 655static u8 mv_sff_check_status(struct ata_port *ap);
 656
 657/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
 658 * because we have to allow room for worst case splitting of
 659 * PRDs for 64K boundaries in mv_fill_sg().
 660 */
 661#ifdef CONFIG_PCI
 662static const struct scsi_host_template mv5_sht = {
 663	ATA_BASE_SHT(DRV_NAME),
 664	.sg_tablesize		= MV_MAX_SG_CT / 2,
 665	.dma_boundary		= MV_DMA_BOUNDARY,
 666};
 667#endif
 668static const struct scsi_host_template mv6_sht = {
 669	__ATA_BASE_SHT(DRV_NAME),
 670	.can_queue		= MV_MAX_Q_DEPTH - 1,
 671	.sg_tablesize		= MV_MAX_SG_CT / 2,
 672	.dma_boundary		= MV_DMA_BOUNDARY,
 673	.sdev_groups		= ata_ncq_sdev_groups,
 674	.change_queue_depth	= ata_scsi_change_queue_depth,
 675	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
 676	.slave_configure	= ata_scsi_slave_config
 677};
 678
 679static struct ata_port_operations mv5_ops = {
 680	.inherits		= &ata_sff_port_ops,
 681
 682	.lost_interrupt		= ATA_OP_NULL,
 683
 684	.qc_defer		= mv_qc_defer,
 685	.qc_prep		= mv_qc_prep,
 686	.qc_issue		= mv_qc_issue,
 687
 688	.freeze			= mv_eh_freeze,
 689	.thaw			= mv_eh_thaw,
 690	.hardreset		= mv_hardreset,
 691
 692	.scr_read		= mv5_scr_read,
 693	.scr_write		= mv5_scr_write,
 694
 695	.port_start		= mv_port_start,
 696	.port_stop		= mv_port_stop,
 697};
 698
 699static struct ata_port_operations mv6_ops = {
 700	.inherits		= &ata_bmdma_port_ops,
 701
 702	.lost_interrupt		= ATA_OP_NULL,
 703
 704	.qc_defer		= mv_qc_defer,
 705	.qc_prep		= mv_qc_prep,
 706	.qc_issue		= mv_qc_issue,
 707
 708	.dev_config             = mv6_dev_config,
 709
 710	.freeze			= mv_eh_freeze,
 711	.thaw			= mv_eh_thaw,
 712	.hardreset		= mv_hardreset,
 713	.softreset		= mv_softreset,
 714	.pmp_hardreset		= mv_pmp_hardreset,
 715	.pmp_softreset		= mv_softreset,
 716	.error_handler		= mv_pmp_error_handler,
 717
 718	.scr_read		= mv_scr_read,
 719	.scr_write		= mv_scr_write,
 720
 721	.sff_check_status	= mv_sff_check_status,
 722	.sff_irq_clear		= mv_sff_irq_clear,
 723	.check_atapi_dma	= mv_check_atapi_dma,
 724	.bmdma_setup		= mv_bmdma_setup,
 725	.bmdma_start		= mv_bmdma_start,
 726	.bmdma_stop		= mv_bmdma_stop,
 727	.bmdma_status		= mv_bmdma_status,
 728
 729	.port_start		= mv_port_start,
 730	.port_stop		= mv_port_stop,
 731};
 732
 733static struct ata_port_operations mv_iie_ops = {
 734	.inherits		= &mv6_ops,
 735	.dev_config		= ATA_OP_NULL,
 736	.qc_prep		= mv_qc_prep_iie,
 737};
 738
 739static const struct ata_port_info mv_port_info[] = {
 740	{  /* chip_504x */
 741		.flags		= MV_GEN_I_FLAGS,
 742		.pio_mask	= ATA_PIO4,
 743		.udma_mask	= ATA_UDMA6,
 744		.port_ops	= &mv5_ops,
 745	},
 746	{  /* chip_508x */
 747		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 748		.pio_mask	= ATA_PIO4,
 749		.udma_mask	= ATA_UDMA6,
 750		.port_ops	= &mv5_ops,
 751	},
 752	{  /* chip_5080 */
 753		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 754		.pio_mask	= ATA_PIO4,
 755		.udma_mask	= ATA_UDMA6,
 756		.port_ops	= &mv5_ops,
 757	},
 758	{  /* chip_604x */
 759		.flags		= MV_GEN_II_FLAGS,
 760		.pio_mask	= ATA_PIO4,
 761		.udma_mask	= ATA_UDMA6,
 762		.port_ops	= &mv6_ops,
 763	},
 764	{  /* chip_608x */
 765		.flags		= MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
 766		.pio_mask	= ATA_PIO4,
 767		.udma_mask	= ATA_UDMA6,
 768		.port_ops	= &mv6_ops,
 769	},
 770	{  /* chip_6042 */
 771		.flags		= MV_GEN_IIE_FLAGS,
 772		.pio_mask	= ATA_PIO4,
 773		.udma_mask	= ATA_UDMA6,
 774		.port_ops	= &mv_iie_ops,
 775	},
 776	{  /* chip_7042 */
 777		.flags		= MV_GEN_IIE_FLAGS,
 778		.pio_mask	= ATA_PIO4,
 779		.udma_mask	= ATA_UDMA6,
 780		.port_ops	= &mv_iie_ops,
 781	},
 782	{  /* chip_soc */
 783		.flags		= MV_GEN_IIE_FLAGS,
 784		.pio_mask	= ATA_PIO4,
 785		.udma_mask	= ATA_UDMA6,
 786		.port_ops	= &mv_iie_ops,
 787	},
 788};
 789
 790static const struct pci_device_id mv_pci_tbl[] = {
 791	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
 792	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
 793	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
 794	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
 795	/* RocketRAID 1720/174x have different identifiers */
 796	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
 797	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
 798	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
 799
 800	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
 801	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
 802	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
 803	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
 804	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
 805
 806	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
 807
 808	/* Adaptec 1430SA */
 809	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
 810
 811	/* Marvell 7042 support */
 812	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
 813
 814	/* Highpoint RocketRAID PCIe series */
 815	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
 816	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
 817
 818	{ }			/* terminate list */
 819};
 820
 821static const struct mv_hw_ops mv5xxx_ops = {
 822	.phy_errata		= mv5_phy_errata,
 823	.enable_leds		= mv5_enable_leds,
 824	.read_preamp		= mv5_read_preamp,
 825	.reset_hc		= mv5_reset_hc,
 826	.reset_flash		= mv5_reset_flash,
 827	.reset_bus		= mv5_reset_bus,
 828};
 829
 830static const struct mv_hw_ops mv6xxx_ops = {
 831	.phy_errata		= mv6_phy_errata,
 832	.enable_leds		= mv6_enable_leds,
 833	.read_preamp		= mv6_read_preamp,
 834	.reset_hc		= mv6_reset_hc,
 835	.reset_flash		= mv6_reset_flash,
 836	.reset_bus		= mv_reset_pci_bus,
 837};
 838
 839static const struct mv_hw_ops mv_soc_ops = {
 840	.phy_errata		= mv6_phy_errata,
 841	.enable_leds		= mv_soc_enable_leds,
 842	.read_preamp		= mv_soc_read_preamp,
 843	.reset_hc		= mv_soc_reset_hc,
 844	.reset_flash		= mv_soc_reset_flash,
 845	.reset_bus		= mv_soc_reset_bus,
 846};
 847
 848static const struct mv_hw_ops mv_soc_65n_ops = {
 849	.phy_errata		= mv_soc_65n_phy_errata,
 850	.enable_leds		= mv_soc_enable_leds,
 851	.reset_hc		= mv_soc_reset_hc,
 852	.reset_flash		= mv_soc_reset_flash,
 853	.reset_bus		= mv_soc_reset_bus,
 854};
 855
 856/*
 857 * Functions
 858 */
 859
 860static inline void writelfl(unsigned long data, void __iomem *addr)
 861{
 862	writel(data, addr);
 863	(void) readl(addr);	/* flush to avoid PCI posted write */
 864}
 865
 866static inline unsigned int mv_hc_from_port(unsigned int port)
 867{
 868	return port >> MV_PORT_HC_SHIFT;
 869}
 870
 871static inline unsigned int mv_hardport_from_port(unsigned int port)
 872{
 873	return port & MV_PORT_MASK;
 874}
 875
 876/*
 877 * Consolidate some rather tricky bit shift calculations.
 878 * This is hot-path stuff, so not a function.
 879 * Simple code, with two return values, so macro rather than inline.
 880 *
 881 * port is the sole input, in range 0..7.
 882 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
 883 * hardport is the other output, in range 0..3.
 884 *
 885 * Note that port and hardport may be the same variable in some cases.
 886 */
 887#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
 888{								\
 889	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
 890	hardport = mv_hardport_from_port(port);			\
 891	shift   += hardport * 2;				\
 892}
 893
 894static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
 895{
 896	return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
 897}
 898
 899static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
 900						 unsigned int port)
 901{
 902	return mv_hc_base(base, mv_hc_from_port(port));
 903}
 904
 905static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
 906{
 907	return  mv_hc_base_from_port(base, port) +
 908		MV_SATAHC_ARBTR_REG_SZ +
 909		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
 910}
 911
 912static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
 913{
 914	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
 915	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
 916
 917	return hc_mmio + ofs;
 918}
 919
 920static inline void __iomem *mv_host_base(struct ata_host *host)
 921{
 922	struct mv_host_priv *hpriv = host->private_data;
 923	return hpriv->base;
 924}
 925
 926static inline void __iomem *mv_ap_base(struct ata_port *ap)
 927{
 928	return mv_port_base(mv_host_base(ap->host), ap->port_no);
 929}
 930
 931static inline int mv_get_hc_count(unsigned long port_flags)
 932{
 933	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
 934}
 935
 936/**
 937 *      mv_save_cached_regs - (re-)initialize cached port registers
 938 *      @ap: the port whose registers we are caching
 939 *
 940 *	Initialize the local cache of port registers,
 941 *	so that reading them over and over again can
 942 *	be avoided on the hotter paths of this driver.
 943 *	This saves a few microseconds each time we switch
 944 *	to/from EDMA mode to perform (eg.) a drive cache flush.
 945 */
 946static void mv_save_cached_regs(struct ata_port *ap)
 947{
 948	void __iomem *port_mmio = mv_ap_base(ap);
 949	struct mv_port_priv *pp = ap->private_data;
 950
 951	pp->cached.fiscfg = readl(port_mmio + FISCFG);
 952	pp->cached.ltmode = readl(port_mmio + LTMODE);
 953	pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
 954	pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
 955}
 956
 957/**
 958 *      mv_write_cached_reg - write to a cached port register
 959 *      @addr: hardware address of the register
 960 *      @old: pointer to cached value of the register
 961 *      @new: new value for the register
 962 *
 963 *	Write a new value to a cached register,
 964 *	but only if the value is different from before.
 965 */
 966static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
 967{
 968	if (new != *old) {
 969		unsigned long laddr;
 970		*old = new;
 971		/*
 972		 * Workaround for 88SX60x1-B2 FEr SATA#13:
 973		 * Read-after-write is needed to prevent generating 64-bit
 974		 * write cycles on the PCI bus for SATA interface registers
 975		 * at offsets ending in 0x4 or 0xc.
 976		 *
 977		 * Looks like a lot of fuss, but it avoids an unnecessary
 978		 * +1 usec read-after-write delay for unaffected registers.
 979		 */
 980		laddr = (unsigned long)addr & 0xffff;
 981		if (laddr >= 0x300 && laddr <= 0x33c) {
 982			laddr &= 0x000f;
 983			if (laddr == 0x4 || laddr == 0xc) {
 984				writelfl(new, addr); /* read after write */
 985				return;
 986			}
 987		}
 988		writel(new, addr); /* unaffected by the errata */
 989	}
 990}
 991
 992static void mv_set_edma_ptrs(void __iomem *port_mmio,
 993			     struct mv_host_priv *hpriv,
 994			     struct mv_port_priv *pp)
 995{
 996	u32 index;
 997
 998	/*
 999	 * initialize request queue
1000	 */
1001	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
1002	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1003
1004	WARN_ON(pp->crqb_dma & 0x3ff);
1005	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
1006	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
1007		 port_mmio + EDMA_REQ_Q_IN_PTR);
1008	writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
1009
1010	/*
1011	 * initialize response queue
1012	 */
1013	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
1014	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1015
1016	WARN_ON(pp->crpb_dma & 0xff);
1017	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1018	writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1019	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1020		 port_mmio + EDMA_RSP_Q_OUT_PTR);
1021}
1022
1023static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1024{
1025	/*
1026	 * When writing to the main_irq_mask in hardware,
1027	 * we must ensure exclusivity between the interrupt coalescing bits
1028	 * and the corresponding individual port DONE_IRQ bits.
1029	 *
1030	 * Note that this register is really an "IRQ enable" register,
1031	 * not an "IRQ mask" register as Marvell's naming might suggest.
1032	 */
1033	if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1034		mask &= ~DONE_IRQ_0_3;
1035	if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1036		mask &= ~DONE_IRQ_4_7;
1037	writelfl(mask, hpriv->main_irq_mask_addr);
1038}
1039
1040static void mv_set_main_irq_mask(struct ata_host *host,
1041				 u32 disable_bits, u32 enable_bits)
1042{
1043	struct mv_host_priv *hpriv = host->private_data;
1044	u32 old_mask, new_mask;
1045
1046	old_mask = hpriv->main_irq_mask;
1047	new_mask = (old_mask & ~disable_bits) | enable_bits;
1048	if (new_mask != old_mask) {
1049		hpriv->main_irq_mask = new_mask;
1050		mv_write_main_irq_mask(new_mask, hpriv);
1051	}
1052}
1053
1054static void mv_enable_port_irqs(struct ata_port *ap,
1055				     unsigned int port_bits)
1056{
1057	unsigned int shift, hardport, port = ap->port_no;
1058	u32 disable_bits, enable_bits;
1059
1060	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1061
1062	disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1063	enable_bits  = port_bits << shift;
1064	mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1065}
1066
1067static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1068					  void __iomem *port_mmio,
1069					  unsigned int port_irqs)
1070{
1071	struct mv_host_priv *hpriv = ap->host->private_data;
1072	int hardport = mv_hardport_from_port(ap->port_no);
1073	void __iomem *hc_mmio = mv_hc_base_from_port(
1074				mv_host_base(ap->host), ap->port_no);
1075	u32 hc_irq_cause;
1076
1077	/* clear EDMA event indicators, if any */
1078	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1079
1080	/* clear pending irq events */
1081	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1082	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1083
1084	/* clear FIS IRQ Cause */
1085	if (IS_GEN_IIE(hpriv))
1086		writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1087
1088	mv_enable_port_irqs(ap, port_irqs);
1089}
1090
1091static void mv_set_irq_coalescing(struct ata_host *host,
1092				  unsigned int count, unsigned int usecs)
1093{
1094	struct mv_host_priv *hpriv = host->private_data;
1095	void __iomem *mmio = hpriv->base, *hc_mmio;
1096	u32 coal_enable = 0;
1097	unsigned long flags;
1098	unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1099	const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1100							ALL_PORTS_COAL_DONE;
1101
1102	/* Disable IRQ coalescing if either threshold is zero */
1103	if (!usecs || !count) {
1104		clks = count = 0;
1105	} else {
1106		/* Respect maximum limits of the hardware */
1107		clks = usecs * COAL_CLOCKS_PER_USEC;
1108		if (clks > MAX_COAL_TIME_THRESHOLD)
1109			clks = MAX_COAL_TIME_THRESHOLD;
1110		if (count > MAX_COAL_IO_COUNT)
1111			count = MAX_COAL_IO_COUNT;
1112	}
1113
1114	spin_lock_irqsave(&host->lock, flags);
1115	mv_set_main_irq_mask(host, coal_disable, 0);
1116
1117	if (is_dual_hc && !IS_GEN_I(hpriv)) {
1118		/*
1119		 * GEN_II/GEN_IIE with dual host controllers:
1120		 * one set of global thresholds for the entire chip.
1121		 */
1122		writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
1123		writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1124		/* clear leftover coal IRQ bit */
1125		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1126		if (count)
1127			coal_enable = ALL_PORTS_COAL_DONE;
1128		clks = count = 0; /* force clearing of regular regs below */
1129	}
1130
1131	/*
1132	 * All chips: independent thresholds for each HC on the chip.
1133	 */
1134	hc_mmio = mv_hc_base_from_port(mmio, 0);
1135	writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1136	writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1137	writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1138	if (count)
1139		coal_enable |= PORTS_0_3_COAL_DONE;
1140	if (is_dual_hc) {
1141		hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1142		writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1143		writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1144		writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1145		if (count)
1146			coal_enable |= PORTS_4_7_COAL_DONE;
1147	}
1148
1149	mv_set_main_irq_mask(host, 0, coal_enable);
1150	spin_unlock_irqrestore(&host->lock, flags);
1151}
1152
1153/*
1154 *      mv_start_edma - Enable eDMA engine
 
1155 *      @pp: port private data
1156 *
1157 *      Verify the local cache of the eDMA state is accurate with a
1158 *      WARN_ON.
1159 *
1160 *      LOCKING:
1161 *      Inherited from caller.
1162 */
1163static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1164			 struct mv_port_priv *pp, u8 protocol)
1165{
1166	int want_ncq = (protocol == ATA_PROT_NCQ);
1167
1168	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1169		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1170		if (want_ncq != using_ncq)
1171			mv_stop_edma(ap);
1172	}
1173	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1174		struct mv_host_priv *hpriv = ap->host->private_data;
1175
1176		mv_edma_cfg(ap, want_ncq, 1);
1177
1178		mv_set_edma_ptrs(port_mmio, hpriv, pp);
1179		mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1180
1181		writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1182		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1183	}
1184}
1185
1186static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1187{
1188	void __iomem *port_mmio = mv_ap_base(ap);
1189	const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1190	const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1191	int i;
1192
1193	/*
1194	 * Wait for the EDMA engine to finish transactions in progress.
1195	 * No idea what a good "timeout" value might be, but measurements
1196	 * indicate that it often requires hundreds of microseconds
1197	 * with two drives in-use.  So we use the 15msec value above
1198	 * as a rough guess at what even more drives might require.
1199	 */
1200	for (i = 0; i < timeout; ++i) {
1201		u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1202		if ((edma_stat & empty_idle) == empty_idle)
1203			break;
1204		udelay(per_loop);
1205	}
1206	/* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
1207}
1208
1209/**
1210 *      mv_stop_edma_engine - Disable eDMA engine
1211 *      @port_mmio: io base address
1212 *
1213 *      LOCKING:
1214 *      Inherited from caller.
1215 */
1216static int mv_stop_edma_engine(void __iomem *port_mmio)
1217{
1218	int i;
1219
1220	/* Disable eDMA.  The disable bit auto clears. */
1221	writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1222
1223	/* Wait for the chip to confirm eDMA is off. */
1224	for (i = 10000; i > 0; i--) {
1225		u32 reg = readl(port_mmio + EDMA_CMD);
1226		if (!(reg & EDMA_EN))
1227			return 0;
1228		udelay(10);
1229	}
1230	return -EIO;
1231}
1232
1233static int mv_stop_edma(struct ata_port *ap)
1234{
1235	void __iomem *port_mmio = mv_ap_base(ap);
1236	struct mv_port_priv *pp = ap->private_data;
1237	int err = 0;
1238
1239	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1240		return 0;
1241	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1242	mv_wait_for_edma_empty_idle(ap);
1243	if (mv_stop_edma_engine(port_mmio)) {
1244		ata_port_err(ap, "Unable to stop eDMA\n");
1245		err = -EIO;
1246	}
1247	mv_edma_cfg(ap, 0, 0);
1248	return err;
1249}
1250
1251static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
 
1252{
1253	int b, w, o;
1254	unsigned char linebuf[38];
1255
1256	for (b = 0; b < bytes; ) {
1257		for (w = 0, o = 0; b < bytes && w < 4; w++) {
1258			o += scnprintf(linebuf + o, sizeof(linebuf) - o,
1259				       "%08x ", readl(start + b));
1260			b += sizeof(u32);
1261		}
1262		dev_dbg(dev, "%s: %p: %s\n",
1263			__func__, start + b, linebuf);
1264	}
1265}
 
1266
1267static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1268{
1269	int b, w, o;
1270	u32 dw = 0;
1271	unsigned char linebuf[38];
1272
1273	for (b = 0; b < bytes; ) {
1274		for (w = 0, o = 0; b < bytes && w < 4; w++) {
 
1275			(void) pci_read_config_dword(pdev, b, &dw);
1276			o += snprintf(linebuf + o, sizeof(linebuf) - o,
1277				      "%08x ", dw);
1278			b += sizeof(u32);
1279		}
1280		dev_dbg(&pdev->dev, "%s: %02x: %s\n",
1281			__func__, b, linebuf);
1282	}
 
1283}
1284
1285static void mv_dump_all_regs(void __iomem *mmio_base,
1286			     struct pci_dev *pdev)
1287{
1288	void __iomem *hc_base;
 
 
1289	void __iomem *port_base;
1290	int start_port, num_ports, p, start_hc, num_hcs, hc;
1291
1292	start_hc = start_port = 0;
1293	num_ports = 8;		/* should be benign for 4 port devs */
1294	num_hcs = 2;
1295	dev_dbg(&pdev->dev,
1296		"%s: All registers for port(s) %u-%u:\n", __func__,
1297		start_port, num_ports > 1 ? num_ports - 1 : start_port);
1298
1299	dev_dbg(&pdev->dev, "%s: PCI config space regs:\n", __func__);
1300	mv_dump_pci_cfg(pdev, 0x68);
1301
1302	dev_dbg(&pdev->dev, "%s: PCI regs:\n", __func__);
1303	mv_dump_mem(&pdev->dev, mmio_base+0xc00, 0x3c);
1304	mv_dump_mem(&pdev->dev, mmio_base+0xd00, 0x34);
1305	mv_dump_mem(&pdev->dev, mmio_base+0xf00, 0x4);
1306	mv_dump_mem(&pdev->dev, mmio_base+0x1d00, 0x6c);
 
 
 
 
 
 
1307	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1308		hc_base = mv_hc_base(mmio_base, hc);
1309		dev_dbg(&pdev->dev, "%s: HC regs (HC %i):\n", __func__, hc);
1310		mv_dump_mem(&pdev->dev, hc_base, 0x1c);
1311	}
1312	for (p = start_port; p < start_port + num_ports; p++) {
1313		port_base = mv_port_base(mmio_base, p);
1314		dev_dbg(&pdev->dev, "%s: EDMA regs (port %i):\n", __func__, p);
1315		mv_dump_mem(&pdev->dev, port_base, 0x54);
1316		dev_dbg(&pdev->dev, "%s: SATA regs (port %i):\n", __func__, p);
1317		mv_dump_mem(&pdev->dev, port_base+0x300, 0x60);
1318	}
 
1319}
1320
1321static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1322{
1323	unsigned int ofs;
1324
1325	switch (sc_reg_in) {
1326	case SCR_STATUS:
1327	case SCR_CONTROL:
1328	case SCR_ERROR:
1329		ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1330		break;
1331	case SCR_ACTIVE:
1332		ofs = SATA_ACTIVE;   /* active is not with the others */
1333		break;
1334	default:
1335		ofs = 0xffffffffU;
1336		break;
1337	}
1338	return ofs;
1339}
1340
1341static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1342{
1343	unsigned int ofs = mv_scr_offset(sc_reg_in);
1344
1345	if (ofs != 0xffffffffU) {
1346		*val = readl(mv_ap_base(link->ap) + ofs);
1347		return 0;
1348	} else
1349		return -EINVAL;
1350}
1351
1352static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1353{
1354	unsigned int ofs = mv_scr_offset(sc_reg_in);
1355
1356	if (ofs != 0xffffffffU) {
1357		void __iomem *addr = mv_ap_base(link->ap) + ofs;
1358		struct mv_host_priv *hpriv = link->ap->host->private_data;
1359		if (sc_reg_in == SCR_CONTROL) {
1360			/*
1361			 * Workaround for 88SX60x1 FEr SATA#26:
1362			 *
1363			 * COMRESETs have to take care not to accidentally
1364			 * put the drive to sleep when writing SCR_CONTROL.
1365			 * Setting bits 12..15 prevents this problem.
1366			 *
1367			 * So if we see an outbound COMMRESET, set those bits.
1368			 * Ditto for the followup write that clears the reset.
1369			 *
1370			 * The proprietary driver does this for
1371			 * all chip versions, and so do we.
1372			 */
1373			if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1374				val |= 0xf000;
1375
1376			if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
1377				void __iomem *lp_phy_addr =
1378					mv_ap_base(link->ap) + LP_PHY_CTL;
1379				/*
1380				 * Set PHY speed according to SControl speed.
1381				 */
1382				u32 lp_phy_val =
1383					LP_PHY_CTL_PIN_PU_PLL |
1384					LP_PHY_CTL_PIN_PU_RX  |
1385					LP_PHY_CTL_PIN_PU_TX;
1386
1387				if ((val & 0xf0) != 0x10)
1388					lp_phy_val |=
1389						LP_PHY_CTL_GEN_TX_3G |
1390						LP_PHY_CTL_GEN_RX_3G;
1391
1392				writelfl(lp_phy_val, lp_phy_addr);
1393			}
1394		}
1395		writelfl(val, addr);
1396		return 0;
1397	} else
1398		return -EINVAL;
1399}
1400
1401static void mv6_dev_config(struct ata_device *adev)
1402{
1403	/*
1404	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1405	 *
1406	 * Gen-II does not support NCQ over a port multiplier
1407	 *  (no FIS-based switching).
1408	 */
1409	if (adev->flags & ATA_DFLAG_NCQ) {
1410		if (sata_pmp_attached(adev->link->ap)) {
1411			adev->flags &= ~ATA_DFLAG_NCQ;
1412			ata_dev_info(adev,
1413				"NCQ disabled for command-based switching\n");
1414		}
1415	}
1416}
1417
1418static int mv_qc_defer(struct ata_queued_cmd *qc)
1419{
1420	struct ata_link *link = qc->dev->link;
1421	struct ata_port *ap = link->ap;
1422	struct mv_port_priv *pp = ap->private_data;
1423
1424	/*
1425	 * Don't allow new commands if we're in a delayed EH state
1426	 * for NCQ and/or FIS-based switching.
1427	 */
1428	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1429		return ATA_DEFER_PORT;
1430
1431	/* PIO commands need exclusive link: no other commands [DMA or PIO]
1432	 * can run concurrently.
1433	 * set excl_link when we want to send a PIO command in DMA mode
1434	 * or a non-NCQ command in NCQ mode.
1435	 * When we receive a command from that link, and there are no
1436	 * outstanding commands, mark a flag to clear excl_link and let
1437	 * the command go through.
1438	 */
1439	if (unlikely(ap->excl_link)) {
1440		if (link == ap->excl_link) {
1441			if (ap->nr_active_links)
1442				return ATA_DEFER_PORT;
1443			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1444			return 0;
1445		} else
1446			return ATA_DEFER_PORT;
1447	}
1448
1449	/*
1450	 * If the port is completely idle, then allow the new qc.
1451	 */
1452	if (ap->nr_active_links == 0)
1453		return 0;
1454
1455	/*
1456	 * The port is operating in host queuing mode (EDMA) with NCQ
1457	 * enabled, allow multiple NCQ commands.  EDMA also allows
1458	 * queueing multiple DMA commands but libata core currently
1459	 * doesn't allow it.
1460	 */
1461	if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1462	    (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1463		if (ata_is_ncq(qc->tf.protocol))
1464			return 0;
1465		else {
1466			ap->excl_link = link;
1467			return ATA_DEFER_PORT;
1468		}
1469	}
1470
1471	return ATA_DEFER_PORT;
1472}
1473
1474static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1475{
1476	struct mv_port_priv *pp = ap->private_data;
1477	void __iomem *port_mmio;
1478
1479	u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
1480	u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
1481	u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1482
1483	ltmode   = *old_ltmode & ~LTMODE_BIT8;
1484	haltcond = *old_haltcond | EDMA_ERR_DEV;
1485
1486	if (want_fbs) {
1487		fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1488		ltmode = *old_ltmode | LTMODE_BIT8;
1489		if (want_ncq)
1490			haltcond &= ~EDMA_ERR_DEV;
1491		else
1492			fiscfg |=  FISCFG_WAIT_DEV_ERR;
1493	} else {
1494		fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1495	}
1496
1497	port_mmio = mv_ap_base(ap);
1498	mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1499	mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1500	mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1501}
1502
1503static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1504{
1505	struct mv_host_priv *hpriv = ap->host->private_data;
1506	u32 old, new;
1507
1508	/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1509	old = readl(hpriv->base + GPIO_PORT_CTL);
1510	if (want_ncq)
1511		new = old | (1 << 22);
1512	else
1513		new = old & ~(1 << 22);
1514	if (new != old)
1515		writel(new, hpriv->base + GPIO_PORT_CTL);
1516}
1517
1518/*
1519 *	mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1520 *	@ap: Port being initialized
1521 *
1522 *	There are two DMA modes on these chips:  basic DMA, and EDMA.
1523 *
1524 *	Bit-0 of the "EDMA RESERVED" register enables/disables use
1525 *	of basic DMA on the GEN_IIE versions of the chips.
1526 *
1527 *	This bit survives EDMA resets, and must be set for basic DMA
1528 *	to function, and should be cleared when EDMA is active.
1529 */
1530static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1531{
1532	struct mv_port_priv *pp = ap->private_data;
1533	u32 new, *old = &pp->cached.unknown_rsvd;
1534
1535	if (enable_bmdma)
1536		new = *old | 1;
1537	else
1538		new = *old & ~1;
1539	mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1540}
1541
1542/*
1543 * SOC chips have an issue whereby the HDD LEDs don't always blink
1544 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1545 * of the SOC takes care of it, generating a steady blink rate when
1546 * any drive on the chip is active.
1547 *
1548 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1549 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1550 *
1551 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1552 * LED operation works then, and provides better (more accurate) feedback.
1553 *
1554 * Note that this code assumes that an SOC never has more than one HC onboard.
1555 */
1556static void mv_soc_led_blink_enable(struct ata_port *ap)
1557{
1558	struct ata_host *host = ap->host;
1559	struct mv_host_priv *hpriv = host->private_data;
1560	void __iomem *hc_mmio;
1561	u32 led_ctrl;
1562
1563	if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1564		return;
1565	hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1566	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1567	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1568	writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1569}
1570
1571static void mv_soc_led_blink_disable(struct ata_port *ap)
1572{
1573	struct ata_host *host = ap->host;
1574	struct mv_host_priv *hpriv = host->private_data;
1575	void __iomem *hc_mmio;
1576	u32 led_ctrl;
1577	unsigned int port;
1578
1579	if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1580		return;
1581
1582	/* disable led-blink only if no ports are using NCQ */
1583	for (port = 0; port < hpriv->n_ports; port++) {
1584		struct ata_port *this_ap = host->ports[port];
1585		struct mv_port_priv *pp = this_ap->private_data;
1586
1587		if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1588			return;
1589	}
1590
1591	hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1592	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1593	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1594	writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1595}
1596
1597static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1598{
1599	u32 cfg;
1600	struct mv_port_priv *pp    = ap->private_data;
1601	struct mv_host_priv *hpriv = ap->host->private_data;
1602	void __iomem *port_mmio    = mv_ap_base(ap);
1603
1604	/* set up non-NCQ EDMA configuration */
1605	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
1606	pp->pp_flags &=
1607	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1608
1609	if (IS_GEN_I(hpriv))
1610		cfg |= (1 << 8);	/* enab config burst size mask */
1611
1612	else if (IS_GEN_II(hpriv)) {
1613		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1614		mv_60x1_errata_sata25(ap, want_ncq);
1615
1616	} else if (IS_GEN_IIE(hpriv)) {
1617		int want_fbs = sata_pmp_attached(ap);
1618		/*
1619		 * Possible future enhancement:
1620		 *
1621		 * The chip can use FBS with non-NCQ, if we allow it,
1622		 * But first we need to have the error handling in place
1623		 * for this mode (datasheet section 7.3.15.4.2.3).
1624		 * So disallow non-NCQ FBS for now.
1625		 */
1626		want_fbs &= want_ncq;
1627
1628		mv_config_fbs(ap, want_ncq, want_fbs);
1629
1630		if (want_fbs) {
1631			pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1632			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1633		}
1634
1635		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1636		if (want_edma) {
1637			cfg |= (1 << 22); /* enab 4-entry host queue cache */
1638			if (!IS_SOC(hpriv))
1639				cfg |= (1 << 18); /* enab early completion */
1640		}
1641		if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1642			cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1643		mv_bmdma_enable_iie(ap, !want_edma);
1644
1645		if (IS_SOC(hpriv)) {
1646			if (want_ncq)
1647				mv_soc_led_blink_enable(ap);
1648			else
1649				mv_soc_led_blink_disable(ap);
1650		}
1651	}
1652
1653	if (want_ncq) {
1654		cfg |= EDMA_CFG_NCQ;
1655		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1656	}
1657
1658	writelfl(cfg, port_mmio + EDMA_CFG);
1659}
1660
1661static void mv_port_free_dma_mem(struct ata_port *ap)
1662{
1663	struct mv_host_priv *hpriv = ap->host->private_data;
1664	struct mv_port_priv *pp = ap->private_data;
1665	int tag;
1666
1667	if (pp->crqb) {
1668		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1669		pp->crqb = NULL;
1670	}
1671	if (pp->crpb) {
1672		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1673		pp->crpb = NULL;
1674	}
1675	/*
1676	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1677	 * For later hardware, we have one unique sg_tbl per NCQ tag.
1678	 */
1679	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1680		if (pp->sg_tbl[tag]) {
1681			if (tag == 0 || !IS_GEN_I(hpriv))
1682				dma_pool_free(hpriv->sg_tbl_pool,
1683					      pp->sg_tbl[tag],
1684					      pp->sg_tbl_dma[tag]);
1685			pp->sg_tbl[tag] = NULL;
1686		}
1687	}
1688}
1689
1690/**
1691 *      mv_port_start - Port specific init/start routine.
1692 *      @ap: ATA channel to manipulate
1693 *
1694 *      Allocate and point to DMA memory, init port private memory,
1695 *      zero indices.
1696 *
1697 *      LOCKING:
1698 *      Inherited from caller.
1699 */
1700static int mv_port_start(struct ata_port *ap)
1701{
1702	struct device *dev = ap->host->dev;
1703	struct mv_host_priv *hpriv = ap->host->private_data;
1704	struct mv_port_priv *pp;
1705	unsigned long flags;
1706	int tag;
1707
1708	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1709	if (!pp)
1710		return -ENOMEM;
1711	ap->private_data = pp;
1712
1713	pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1714	if (!pp->crqb)
1715		return -ENOMEM;
 
1716
1717	pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1718	if (!pp->crpb)
1719		goto out_port_free_dma_mem;
 
1720
1721	/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1722	if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1723		ap->flags |= ATA_FLAG_AN;
1724	/*
1725	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1726	 * For later hardware, we need one unique sg_tbl per NCQ tag.
1727	 */
1728	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1729		if (tag == 0 || !IS_GEN_I(hpriv)) {
1730			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1731					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1732			if (!pp->sg_tbl[tag])
1733				goto out_port_free_dma_mem;
1734		} else {
1735			pp->sg_tbl[tag]     = pp->sg_tbl[0];
1736			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1737		}
1738	}
1739
1740	spin_lock_irqsave(ap->lock, flags);
1741	mv_save_cached_regs(ap);
1742	mv_edma_cfg(ap, 0, 0);
1743	spin_unlock_irqrestore(ap->lock, flags);
1744
1745	return 0;
1746
1747out_port_free_dma_mem:
1748	mv_port_free_dma_mem(ap);
1749	return -ENOMEM;
1750}
1751
1752/**
1753 *      mv_port_stop - Port specific cleanup/stop routine.
1754 *      @ap: ATA channel to manipulate
1755 *
1756 *      Stop DMA, cleanup port memory.
1757 *
1758 *      LOCKING:
1759 *      This routine uses the host lock to protect the DMA stop.
1760 */
1761static void mv_port_stop(struct ata_port *ap)
1762{
1763	unsigned long flags;
1764
1765	spin_lock_irqsave(ap->lock, flags);
1766	mv_stop_edma(ap);
1767	mv_enable_port_irqs(ap, 0);
1768	spin_unlock_irqrestore(ap->lock, flags);
1769	mv_port_free_dma_mem(ap);
1770}
1771
1772/**
1773 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1774 *      @qc: queued command whose SG list to source from
1775 *
1776 *      Populate the SG list and mark the last entry.
1777 *
1778 *      LOCKING:
1779 *      Inherited from caller.
1780 */
1781static void mv_fill_sg(struct ata_queued_cmd *qc)
1782{
1783	struct mv_port_priv *pp = qc->ap->private_data;
1784	struct scatterlist *sg;
1785	struct mv_sg *mv_sg, *last_sg = NULL;
1786	unsigned int si;
1787
1788	mv_sg = pp->sg_tbl[qc->hw_tag];
1789	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1790		dma_addr_t addr = sg_dma_address(sg);
1791		u32 sg_len = sg_dma_len(sg);
1792
1793		while (sg_len) {
1794			u32 offset = addr & 0xffff;
1795			u32 len = sg_len;
1796
1797			if (offset + len > 0x10000)
1798				len = 0x10000 - offset;
1799
1800			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1801			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1802			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1803			mv_sg->reserved = 0;
1804
1805			sg_len -= len;
1806			addr += len;
1807
1808			last_sg = mv_sg;
1809			mv_sg++;
1810		}
1811	}
1812
1813	if (likely(last_sg))
1814		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1815	mb(); /* ensure data structure is visible to the chipset */
1816}
1817
1818static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1819{
1820	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1821		(last ? CRQB_CMD_LAST : 0);
1822	*cmdw = cpu_to_le16(tmp);
1823}
1824
1825/**
1826 *	mv_sff_irq_clear - Clear hardware interrupt after DMA.
1827 *	@ap: Port associated with this ATA transaction.
1828 *
1829 *	We need this only for ATAPI bmdma transactions,
1830 *	as otherwise we experience spurious interrupts
1831 *	after libata-sff handles the bmdma interrupts.
1832 */
1833static void mv_sff_irq_clear(struct ata_port *ap)
1834{
1835	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1836}
1837
1838/**
1839 *	mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1840 *	@qc: queued command to check for chipset/DMA compatibility.
1841 *
1842 *	The bmdma engines cannot handle speculative data sizes
1843 *	(bytecount under/over flow).  So only allow DMA for
1844 *	data transfer commands with known data sizes.
1845 *
1846 *	LOCKING:
1847 *	Inherited from caller.
1848 */
1849static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1850{
1851	struct scsi_cmnd *scmd = qc->scsicmd;
1852
1853	if (scmd) {
1854		switch (scmd->cmnd[0]) {
1855		case READ_6:
1856		case READ_10:
1857		case READ_12:
1858		case WRITE_6:
1859		case WRITE_10:
1860		case WRITE_12:
1861		case GPCMD_READ_CD:
1862		case GPCMD_SEND_DVD_STRUCTURE:
1863		case GPCMD_SEND_CUE_SHEET:
1864			return 0; /* DMA is safe */
1865		}
1866	}
1867	return -EOPNOTSUPP; /* use PIO instead */
1868}
1869
1870/**
1871 *	mv_bmdma_setup - Set up BMDMA transaction
1872 *	@qc: queued command to prepare DMA for.
1873 *
1874 *	LOCKING:
1875 *	Inherited from caller.
1876 */
1877static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1878{
1879	struct ata_port *ap = qc->ap;
1880	void __iomem *port_mmio = mv_ap_base(ap);
1881	struct mv_port_priv *pp = ap->private_data;
1882
1883	mv_fill_sg(qc);
1884
1885	/* clear all DMA cmd bits */
1886	writel(0, port_mmio + BMDMA_CMD);
1887
1888	/* load PRD table addr. */
1889	writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
1890		port_mmio + BMDMA_PRD_HIGH);
1891	writelfl(pp->sg_tbl_dma[qc->hw_tag],
1892		port_mmio + BMDMA_PRD_LOW);
1893
1894	/* issue r/w command */
1895	ap->ops->sff_exec_command(ap, &qc->tf);
1896}
1897
1898/**
1899 *	mv_bmdma_start - Start a BMDMA transaction
1900 *	@qc: queued command to start DMA on.
1901 *
1902 *	LOCKING:
1903 *	Inherited from caller.
1904 */
1905static void mv_bmdma_start(struct ata_queued_cmd *qc)
1906{
1907	struct ata_port *ap = qc->ap;
1908	void __iomem *port_mmio = mv_ap_base(ap);
1909	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1910	u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1911
1912	/* start host DMA transaction */
1913	writelfl(cmd, port_mmio + BMDMA_CMD);
1914}
1915
1916/**
1917 *	mv_bmdma_stop_ap - Stop BMDMA transfer
1918 *	@ap: port to stop
1919 *
1920 *	Clears the ATA_DMA_START flag in the bmdma control register
1921 *
1922 *	LOCKING:
1923 *	Inherited from caller.
1924 */
1925static void mv_bmdma_stop_ap(struct ata_port *ap)
1926{
1927	void __iomem *port_mmio = mv_ap_base(ap);
1928	u32 cmd;
1929
1930	/* clear start/stop bit */
1931	cmd = readl(port_mmio + BMDMA_CMD);
1932	if (cmd & ATA_DMA_START) {
1933		cmd &= ~ATA_DMA_START;
1934		writelfl(cmd, port_mmio + BMDMA_CMD);
1935
1936		/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1937		ata_sff_dma_pause(ap);
1938	}
1939}
1940
1941static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1942{
1943	mv_bmdma_stop_ap(qc->ap);
1944}
1945
1946/**
1947 *	mv_bmdma_status - Read BMDMA status
1948 *	@ap: port for which to retrieve DMA status.
1949 *
1950 *	Read and return equivalent of the sff BMDMA status register.
1951 *
1952 *	LOCKING:
1953 *	Inherited from caller.
1954 */
1955static u8 mv_bmdma_status(struct ata_port *ap)
1956{
1957	void __iomem *port_mmio = mv_ap_base(ap);
1958	u32 reg, status;
1959
1960	/*
1961	 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1962	 * and the ATA_DMA_INTR bit doesn't exist.
1963	 */
1964	reg = readl(port_mmio + BMDMA_STATUS);
1965	if (reg & ATA_DMA_ACTIVE)
1966		status = ATA_DMA_ACTIVE;
1967	else if (reg & ATA_DMA_ERR)
1968		status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1969	else {
1970		/*
1971		 * Just because DMA_ACTIVE is 0 (DMA completed),
1972		 * this does _not_ mean the device is "done".
1973		 * So we should not yet be signalling ATA_DMA_INTR
1974		 * in some cases.  Eg. DSM/TRIM, and perhaps others.
1975		 */
1976		mv_bmdma_stop_ap(ap);
1977		if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1978			status = 0;
1979		else
1980			status = ATA_DMA_INTR;
1981	}
1982	return status;
1983}
1984
1985static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1986{
1987	struct ata_taskfile *tf = &qc->tf;
1988	/*
1989	 * Workaround for 88SX60x1 FEr SATA#24.
1990	 *
1991	 * Chip may corrupt WRITEs if multi_count >= 4kB.
1992	 * Note that READs are unaffected.
1993	 *
1994	 * It's not clear if this errata really means "4K bytes",
1995	 * or if it always happens for multi_count > 7
1996	 * regardless of device sector_size.
1997	 *
1998	 * So, for safety, any write with multi_count > 7
1999	 * gets converted here into a regular PIO write instead:
2000	 */
2001	if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
2002		if (qc->dev->multi_count > 7) {
2003			switch (tf->command) {
2004			case ATA_CMD_WRITE_MULTI:
2005				tf->command = ATA_CMD_PIO_WRITE;
2006				break;
2007			case ATA_CMD_WRITE_MULTI_FUA_EXT:
2008				tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
2009				fallthrough;
2010			case ATA_CMD_WRITE_MULTI_EXT:
2011				tf->command = ATA_CMD_PIO_WRITE_EXT;
2012				break;
2013			}
2014		}
2015	}
2016}
2017
2018/**
2019 *      mv_qc_prep - Host specific command preparation.
2020 *      @qc: queued command to prepare
2021 *
2022 *      This routine simply redirects to the general purpose routine
2023 *      if command is not DMA.  Else, it handles prep of the CRQB
2024 *      (command request block), does some sanity checking, and calls
2025 *      the SG load routine.
2026 *
2027 *      LOCKING:
2028 *      Inherited from caller.
2029 */
2030static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
2031{
2032	struct ata_port *ap = qc->ap;
2033	struct mv_port_priv *pp = ap->private_data;
2034	__le16 *cw;
2035	struct ata_taskfile *tf = &qc->tf;
2036	u16 flags = 0;
2037	unsigned in_index;
2038
2039	switch (tf->protocol) {
2040	case ATA_PROT_DMA:
2041		if (tf->command == ATA_CMD_DSM)
2042			return AC_ERR_OK;
2043		fallthrough;
2044	case ATA_PROT_NCQ:
2045		break;	/* continue below */
2046	case ATA_PROT_PIO:
2047		mv_rw_multi_errata_sata24(qc);
2048		return AC_ERR_OK;
2049	default:
2050		return AC_ERR_OK;
2051	}
2052
2053	/* Fill in command request block
2054	 */
2055	if (!(tf->flags & ATA_TFLAG_WRITE))
2056		flags |= CRQB_FLAG_READ;
2057	WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2058	flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2059	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2060
2061	/* get current queue index from software */
2062	in_index = pp->req_idx;
2063
2064	pp->crqb[in_index].sg_addr =
2065		cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2066	pp->crqb[in_index].sg_addr_hi =
2067		cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2068	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2069
2070	cw = &pp->crqb[in_index].ata_cmd[0];
2071
2072	/* Sadly, the CRQB cannot accommodate all registers--there are
2073	 * only 11 bytes...so we must pick and choose required
2074	 * registers based on the command.  So, we drop feature and
2075	 * hob_feature for [RW] DMA commands, but they are needed for
2076	 * NCQ.  NCQ will drop hob_nsect, which is not needed there
2077	 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2078	 */
2079	switch (tf->command) {
2080	case ATA_CMD_READ:
2081	case ATA_CMD_READ_EXT:
2082	case ATA_CMD_WRITE:
2083	case ATA_CMD_WRITE_EXT:
2084	case ATA_CMD_WRITE_FUA_EXT:
2085		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2086		break;
2087	case ATA_CMD_FPDMA_READ:
2088	case ATA_CMD_FPDMA_WRITE:
2089		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2090		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2091		break;
2092	default:
2093		/* The only other commands EDMA supports in non-queued and
2094		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2095		 * of which are defined/used by Linux.  If we get here, this
2096		 * driver needs work.
 
 
 
2097		 */
2098		ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
2099				tf->command);
2100		return AC_ERR_INVALID;
2101	}
2102	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2103	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2104	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2105	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2106	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2107	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2108	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2109	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2110	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
2111
2112	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2113		return AC_ERR_OK;
2114	mv_fill_sg(qc);
2115
2116	return AC_ERR_OK;
2117}
2118
2119/**
2120 *      mv_qc_prep_iie - Host specific command preparation.
2121 *      @qc: queued command to prepare
2122 *
2123 *      This routine simply redirects to the general purpose routine
2124 *      if command is not DMA.  Else, it handles prep of the CRQB
2125 *      (command request block), does some sanity checking, and calls
2126 *      the SG load routine.
2127 *
2128 *      LOCKING:
2129 *      Inherited from caller.
2130 */
2131static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
2132{
2133	struct ata_port *ap = qc->ap;
2134	struct mv_port_priv *pp = ap->private_data;
2135	struct mv_crqb_iie *crqb;
2136	struct ata_taskfile *tf = &qc->tf;
2137	unsigned in_index;
2138	u32 flags = 0;
2139
2140	if ((tf->protocol != ATA_PROT_DMA) &&
2141	    (tf->protocol != ATA_PROT_NCQ))
2142		return AC_ERR_OK;
2143	if (tf->command == ATA_CMD_DSM)
2144		return AC_ERR_OK;  /* use bmdma for this */
2145
2146	/* Fill in Gen IIE command request block */
2147	if (!(tf->flags & ATA_TFLAG_WRITE))
2148		flags |= CRQB_FLAG_READ;
2149
2150	WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2151	flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2152	flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
2153	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2154
2155	/* get current queue index from software */
2156	in_index = pp->req_idx;
2157
2158	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2159	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2160	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2161	crqb->flags = cpu_to_le32(flags);
2162
2163	crqb->ata_cmd[0] = cpu_to_le32(
2164			(tf->command << 16) |
2165			(tf->feature << 24)
2166		);
2167	crqb->ata_cmd[1] = cpu_to_le32(
2168			(tf->lbal << 0) |
2169			(tf->lbam << 8) |
2170			(tf->lbah << 16) |
2171			(tf->device << 24)
2172		);
2173	crqb->ata_cmd[2] = cpu_to_le32(
2174			(tf->hob_lbal << 0) |
2175			(tf->hob_lbam << 8) |
2176			(tf->hob_lbah << 16) |
2177			(tf->hob_feature << 24)
2178		);
2179	crqb->ata_cmd[3] = cpu_to_le32(
2180			(tf->nsect << 0) |
2181			(tf->hob_nsect << 8)
2182		);
2183
2184	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2185		return AC_ERR_OK;
2186	mv_fill_sg(qc);
2187
2188	return AC_ERR_OK;
2189}
2190
2191/**
2192 *	mv_sff_check_status - fetch device status, if valid
2193 *	@ap: ATA port to fetch status from
2194 *
2195 *	When using command issue via mv_qc_issue_fis(),
2196 *	the initial ATA_BUSY state does not show up in the
2197 *	ATA status (shadow) register.  This can confuse libata!
2198 *
2199 *	So we have a hook here to fake ATA_BUSY for that situation,
2200 *	until the first time a BUSY, DRQ, or ERR bit is seen.
2201 *
2202 *	The rest of the time, it simply returns the ATA status register.
2203 */
2204static u8 mv_sff_check_status(struct ata_port *ap)
2205{
2206	u8 stat = ioread8(ap->ioaddr.status_addr);
2207	struct mv_port_priv *pp = ap->private_data;
2208
2209	if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2210		if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2211			pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2212		else
2213			stat = ATA_BUSY;
2214	}
2215	return stat;
2216}
2217
2218/**
2219 *	mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2220 *	@ap: ATA port to send a FIS
2221 *	@fis: fis to be sent
2222 *	@nwords: number of 32-bit words in the fis
2223 */
2224static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2225{
2226	void __iomem *port_mmio = mv_ap_base(ap);
2227	u32 ifctl, old_ifctl, ifstat;
2228	int i, timeout = 200, final_word = nwords - 1;
2229
2230	/* Initiate FIS transmission mode */
2231	old_ifctl = readl(port_mmio + SATA_IFCTL);
2232	ifctl = 0x100 | (old_ifctl & 0xf);
2233	writelfl(ifctl, port_mmio + SATA_IFCTL);
2234
2235	/* Send all words of the FIS except for the final word */
2236	for (i = 0; i < final_word; ++i)
2237		writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2238
2239	/* Flag end-of-transmission, and then send the final word */
2240	writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2241	writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2242
2243	/*
2244	 * Wait for FIS transmission to complete.
2245	 * This typically takes just a single iteration.
2246	 */
2247	do {
2248		ifstat = readl(port_mmio + SATA_IFSTAT);
2249	} while (!(ifstat & 0x1000) && --timeout);
2250
2251	/* Restore original port configuration */
2252	writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2253
2254	/* See if it worked */
2255	if ((ifstat & 0x3000) != 0x1000) {
2256		ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2257			      __func__, ifstat);
2258		return AC_ERR_OTHER;
2259	}
2260	return 0;
2261}
2262
2263/**
2264 *	mv_qc_issue_fis - Issue a command directly as a FIS
2265 *	@qc: queued command to start
2266 *
2267 *	Note that the ATA shadow registers are not updated
2268 *	after command issue, so the device will appear "READY"
2269 *	if polled, even while it is BUSY processing the command.
2270 *
2271 *	So we use a status hook to fake ATA_BUSY until the drive changes state.
2272 *
2273 *	Note: we don't get updated shadow regs on *completion*
2274 *	of non-data commands. So avoid sending them via this function,
2275 *	as they will appear to have completed immediately.
2276 *
2277 *	GEN_IIE has special registers that we could get the result tf from,
2278 *	but earlier chipsets do not.  For now, we ignore those registers.
2279 */
2280static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2281{
2282	struct ata_port *ap = qc->ap;
2283	struct mv_port_priv *pp = ap->private_data;
2284	struct ata_link *link = qc->dev->link;
2285	u32 fis[5];
2286	int err = 0;
2287
2288	ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2289	err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2290	if (err)
2291		return err;
2292
2293	switch (qc->tf.protocol) {
2294	case ATAPI_PROT_PIO:
2295		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2296		fallthrough;
2297	case ATAPI_PROT_NODATA:
2298		ap->hsm_task_state = HSM_ST_FIRST;
2299		break;
2300	case ATA_PROT_PIO:
2301		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2302		if (qc->tf.flags & ATA_TFLAG_WRITE)
2303			ap->hsm_task_state = HSM_ST_FIRST;
2304		else
2305			ap->hsm_task_state = HSM_ST;
2306		break;
2307	default:
2308		ap->hsm_task_state = HSM_ST_LAST;
2309		break;
2310	}
2311
2312	if (qc->tf.flags & ATA_TFLAG_POLLING)
2313		ata_sff_queue_pio_task(link, 0);
2314	return 0;
2315}
2316
2317/**
2318 *      mv_qc_issue - Initiate a command to the host
2319 *      @qc: queued command to start
2320 *
2321 *      This routine simply redirects to the general purpose routine
2322 *      if command is not DMA.  Else, it sanity checks our local
2323 *      caches of the request producer/consumer indices then enables
2324 *      DMA and bumps the request producer index.
2325 *
2326 *      LOCKING:
2327 *      Inherited from caller.
2328 */
2329static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2330{
2331	static int limit_warnings = 10;
2332	struct ata_port *ap = qc->ap;
2333	void __iomem *port_mmio = mv_ap_base(ap);
2334	struct mv_port_priv *pp = ap->private_data;
2335	u32 in_index;
2336	unsigned int port_irqs;
2337
2338	pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2339
2340	switch (qc->tf.protocol) {
2341	case ATA_PROT_DMA:
2342		if (qc->tf.command == ATA_CMD_DSM) {
2343			if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
2344				return AC_ERR_OTHER;
2345			break;  /* use bmdma for this */
2346		}
2347		fallthrough;
2348	case ATA_PROT_NCQ:
2349		mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2350		pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2351		in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2352
2353		/* Write the request in pointer to kick the EDMA to life */
2354		writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2355					port_mmio + EDMA_REQ_Q_IN_PTR);
2356		return 0;
2357
2358	case ATA_PROT_PIO:
2359		/*
2360		 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2361		 *
2362		 * Someday, we might implement special polling workarounds
2363		 * for these, but it all seems rather unnecessary since we
2364		 * normally use only DMA for commands which transfer more
2365		 * than a single block of data.
2366		 *
2367		 * Much of the time, this could just work regardless.
2368		 * So for now, just log the incident, and allow the attempt.
2369		 */
2370		if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2371			--limit_warnings;
2372			ata_link_warn(qc->dev->link, DRV_NAME
2373				      ": attempting PIO w/multiple DRQ: "
2374				      "this may fail due to h/w errata\n");
2375		}
2376		fallthrough;
2377	case ATA_PROT_NODATA:
2378	case ATAPI_PROT_PIO:
2379	case ATAPI_PROT_NODATA:
2380		if (ap->flags & ATA_FLAG_PIO_POLLING)
2381			qc->tf.flags |= ATA_TFLAG_POLLING;
2382		break;
2383	}
2384
2385	if (qc->tf.flags & ATA_TFLAG_POLLING)
2386		port_irqs = ERR_IRQ;	/* mask device interrupt when polling */
2387	else
2388		port_irqs = ERR_IRQ | DONE_IRQ;	/* unmask all interrupts */
2389
2390	/*
2391	 * We're about to send a non-EDMA capable command to the
2392	 * port.  Turn off EDMA so there won't be problems accessing
2393	 * shadow block, etc registers.
2394	 */
2395	mv_stop_edma(ap);
2396	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2397	mv_pmp_select(ap, qc->dev->link->pmp);
2398
2399	if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2400		struct mv_host_priv *hpriv = ap->host->private_data;
2401		/*
2402		 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2403		 *
2404		 * After any NCQ error, the READ_LOG_EXT command
2405		 * from libata-eh *must* use mv_qc_issue_fis().
2406		 * Otherwise it might fail, due to chip errata.
2407		 *
2408		 * Rather than special-case it, we'll just *always*
2409		 * use this method here for READ_LOG_EXT, making for
2410		 * easier testing.
2411		 */
2412		if (IS_GEN_II(hpriv))
2413			return mv_qc_issue_fis(qc);
2414	}
2415	return ata_bmdma_qc_issue(qc);
2416}
2417
2418static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2419{
2420	struct mv_port_priv *pp = ap->private_data;
2421	struct ata_queued_cmd *qc;
2422
2423	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2424		return NULL;
2425	qc = ata_qc_from_tag(ap, ap->link.active_tag);
2426	if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2427		return qc;
2428	return NULL;
2429}
2430
2431static void mv_pmp_error_handler(struct ata_port *ap)
2432{
2433	unsigned int pmp, pmp_map;
2434	struct mv_port_priv *pp = ap->private_data;
2435
2436	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2437		/*
2438		 * Perform NCQ error analysis on failed PMPs
2439		 * before we freeze the port entirely.
2440		 *
2441		 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2442		 */
2443		pmp_map = pp->delayed_eh_pmp_map;
2444		pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2445		for (pmp = 0; pmp_map != 0; pmp++) {
2446			unsigned int this_pmp = (1 << pmp);
2447			if (pmp_map & this_pmp) {
2448				struct ata_link *link = &ap->pmp_link[pmp];
2449				pmp_map &= ~this_pmp;
2450				ata_eh_analyze_ncq_error(link);
2451			}
2452		}
2453		ata_port_freeze(ap);
2454	}
2455	sata_pmp_error_handler(ap);
2456}
2457
2458static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2459{
2460	void __iomem *port_mmio = mv_ap_base(ap);
2461
2462	return readl(port_mmio + SATA_TESTCTL) >> 16;
2463}
2464
2465static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2466{
 
2467	unsigned int pmp;
2468
2469	/*
2470	 * Initialize EH info for PMPs which saw device errors
2471	 */
 
2472	for (pmp = 0; pmp_map != 0; pmp++) {
2473		unsigned int this_pmp = (1 << pmp);
2474		if (pmp_map & this_pmp) {
2475			struct ata_link *link = &ap->pmp_link[pmp];
2476			struct ata_eh_info *ehi = &link->eh_info;
2477
2478			pmp_map &= ~this_pmp;
 
2479			ata_ehi_clear_desc(ehi);
2480			ata_ehi_push_desc(ehi, "dev err");
2481			ehi->err_mask |= AC_ERR_DEV;
2482			ehi->action |= ATA_EH_RESET;
2483			ata_link_abort(link);
2484		}
2485	}
2486}
2487
2488static int mv_req_q_empty(struct ata_port *ap)
2489{
2490	void __iomem *port_mmio = mv_ap_base(ap);
2491	u32 in_ptr, out_ptr;
2492
2493	in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2494			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2495	out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2496			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2497	return (in_ptr == out_ptr);	/* 1 == queue_is_empty */
2498}
2499
2500static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2501{
2502	struct mv_port_priv *pp = ap->private_data;
2503	int failed_links;
2504	unsigned int old_map, new_map;
2505
2506	/*
2507	 * Device error during FBS+NCQ operation:
2508	 *
2509	 * Set a port flag to prevent further I/O being enqueued.
2510	 * Leave the EDMA running to drain outstanding commands from this port.
2511	 * Perform the post-mortem/EH only when all responses are complete.
2512	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2513	 */
2514	if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2515		pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2516		pp->delayed_eh_pmp_map = 0;
2517	}
2518	old_map = pp->delayed_eh_pmp_map;
2519	new_map = old_map | mv_get_err_pmp_map(ap);
2520
2521	if (old_map != new_map) {
2522		pp->delayed_eh_pmp_map = new_map;
2523		mv_pmp_eh_prep(ap, new_map & ~old_map);
2524	}
2525	failed_links = hweight16(new_map);
2526
2527	ata_port_info(ap,
2528		      "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
2529		      __func__, pp->delayed_eh_pmp_map,
2530		      ap->qc_active, failed_links,
2531		      ap->nr_active_links);
2532
2533	if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2534		mv_process_crpb_entries(ap, pp);
2535		mv_stop_edma(ap);
2536		mv_eh_freeze(ap);
2537		ata_port_info(ap, "%s: done\n", __func__);
2538		return 1;	/* handled */
2539	}
2540	ata_port_info(ap, "%s: waiting\n", __func__);
2541	return 1;	/* handled */
2542}
2543
2544static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2545{
2546	/*
2547	 * Possible future enhancement:
2548	 *
2549	 * FBS+non-NCQ operation is not yet implemented.
2550	 * See related notes in mv_edma_cfg().
2551	 *
2552	 * Device error during FBS+non-NCQ operation:
2553	 *
2554	 * We need to snapshot the shadow registers for each failed command.
2555	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2556	 */
2557	return 0;	/* not handled */
2558}
2559
2560static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2561{
2562	struct mv_port_priv *pp = ap->private_data;
2563
2564	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2565		return 0;	/* EDMA was not active: not handled */
2566	if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2567		return 0;	/* FBS was not active: not handled */
2568
2569	if (!(edma_err_cause & EDMA_ERR_DEV))
2570		return 0;	/* non DEV error: not handled */
2571	edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2572	if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2573		return 0;	/* other problems: not handled */
2574
2575	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2576		/*
2577		 * EDMA should NOT have self-disabled for this case.
2578		 * If it did, then something is wrong elsewhere,
2579		 * and we cannot handle it here.
2580		 */
2581		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2582			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2583				      __func__, edma_err_cause, pp->pp_flags);
2584			return 0; /* not handled */
2585		}
2586		return mv_handle_fbs_ncq_dev_err(ap);
2587	} else {
2588		/*
2589		 * EDMA should have self-disabled for this case.
2590		 * If it did not, then something is wrong elsewhere,
2591		 * and we cannot handle it here.
2592		 */
2593		if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2594			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2595				      __func__, edma_err_cause, pp->pp_flags);
2596			return 0; /* not handled */
2597		}
2598		return mv_handle_fbs_non_ncq_dev_err(ap);
2599	}
2600	return 0;	/* not handled */
2601}
2602
2603static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2604{
2605	struct ata_eh_info *ehi = &ap->link.eh_info;
2606	char *when = "idle";
2607
2608	ata_ehi_clear_desc(ehi);
2609	if (edma_was_enabled) {
2610		when = "EDMA enabled";
2611	} else {
2612		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2613		if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2614			when = "polling";
2615	}
2616	ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2617	ehi->err_mask |= AC_ERR_OTHER;
2618	ehi->action   |= ATA_EH_RESET;
2619	ata_port_freeze(ap);
2620}
2621
2622/**
2623 *      mv_err_intr - Handle error interrupts on the port
2624 *      @ap: ATA channel to manipulate
2625 *
2626 *      Most cases require a full reset of the chip's state machine,
2627 *      which also performs a COMRESET.
2628 *      Also, if the port disabled DMA, update our cached copy to match.
2629 *
2630 *      LOCKING:
2631 *      Inherited from caller.
2632 */
2633static void mv_err_intr(struct ata_port *ap)
2634{
2635	void __iomem *port_mmio = mv_ap_base(ap);
2636	u32 edma_err_cause, eh_freeze_mask, serr = 0;
2637	u32 fis_cause = 0;
2638	struct mv_port_priv *pp = ap->private_data;
2639	struct mv_host_priv *hpriv = ap->host->private_data;
2640	unsigned int action = 0, err_mask = 0;
2641	struct ata_eh_info *ehi = &ap->link.eh_info;
2642	struct ata_queued_cmd *qc;
2643	int abort = 0;
2644
2645	/*
2646	 * Read and clear the SError and err_cause bits.
2647	 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2648	 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2649	 */
2650	sata_scr_read(&ap->link, SCR_ERROR, &serr);
2651	sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2652
2653	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2654	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2655		fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2656		writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2657	}
2658	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2659
2660	if (edma_err_cause & EDMA_ERR_DEV) {
2661		/*
2662		 * Device errors during FIS-based switching operation
2663		 * require special handling.
2664		 */
2665		if (mv_handle_dev_err(ap, edma_err_cause))
2666			return;
2667	}
2668
2669	qc = mv_get_active_qc(ap);
2670	ata_ehi_clear_desc(ehi);
2671	ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2672			  edma_err_cause, pp->pp_flags);
2673
2674	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2675		ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2676		if (fis_cause & FIS_IRQ_CAUSE_AN) {
2677			u32 ec = edma_err_cause &
2678			       ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2679			sata_async_notification(ap);
2680			if (!ec)
2681				return; /* Just an AN; no need for the nukes */
2682			ata_ehi_push_desc(ehi, "SDB notify");
2683		}
2684	}
2685	/*
2686	 * All generations share these EDMA error cause bits:
2687	 */
2688	if (edma_err_cause & EDMA_ERR_DEV) {
2689		err_mask |= AC_ERR_DEV;
2690		action |= ATA_EH_RESET;
2691		ata_ehi_push_desc(ehi, "dev error");
2692	}
2693	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2694			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2695			EDMA_ERR_INTRL_PAR)) {
2696		err_mask |= AC_ERR_ATA_BUS;
2697		action |= ATA_EH_RESET;
2698		ata_ehi_push_desc(ehi, "parity error");
2699	}
2700	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2701		ata_ehi_hotplugged(ehi);
2702		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2703			"dev disconnect" : "dev connect");
2704		action |= ATA_EH_RESET;
2705	}
2706
2707	/*
2708	 * Gen-I has a different SELF_DIS bit,
2709	 * different FREEZE bits, and no SERR bit:
2710	 */
2711	if (IS_GEN_I(hpriv)) {
2712		eh_freeze_mask = EDMA_EH_FREEZE_5;
2713		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2714			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2715			ata_ehi_push_desc(ehi, "EDMA self-disable");
2716		}
2717	} else {
2718		eh_freeze_mask = EDMA_EH_FREEZE;
2719		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2720			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2721			ata_ehi_push_desc(ehi, "EDMA self-disable");
2722		}
2723		if (edma_err_cause & EDMA_ERR_SERR) {
2724			ata_ehi_push_desc(ehi, "SError=%08x", serr);
2725			err_mask |= AC_ERR_ATA_BUS;
2726			action |= ATA_EH_RESET;
2727		}
2728	}
2729
2730	if (!err_mask) {
2731		err_mask = AC_ERR_OTHER;
2732		action |= ATA_EH_RESET;
2733	}
2734
2735	ehi->serror |= serr;
2736	ehi->action |= action;
2737
2738	if (qc)
2739		qc->err_mask |= err_mask;
2740	else
2741		ehi->err_mask |= err_mask;
2742
2743	if (err_mask == AC_ERR_DEV) {
2744		/*
2745		 * Cannot do ata_port_freeze() here,
2746		 * because it would kill PIO access,
2747		 * which is needed for further diagnosis.
2748		 */
2749		mv_eh_freeze(ap);
2750		abort = 1;
2751	} else if (edma_err_cause & eh_freeze_mask) {
2752		/*
2753		 * Note to self: ata_port_freeze() calls ata_port_abort()
2754		 */
2755		ata_port_freeze(ap);
2756	} else {
2757		abort = 1;
2758	}
2759
2760	if (abort) {
2761		if (qc)
2762			ata_link_abort(qc->dev->link);
2763		else
2764			ata_port_abort(ap);
2765	}
2766}
2767
2768static bool mv_process_crpb_response(struct ata_port *ap,
2769		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2770{
2771	u8 ata_status;
2772	u16 edma_status = le16_to_cpu(response->flags);
2773
2774	/*
2775	 * edma_status from a response queue entry:
2776	 *   LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2777	 *   MSB is saved ATA status from command completion.
2778	 */
2779	if (!ncq_enabled) {
2780		u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2781		if (err_cause) {
2782			/*
2783			 * Error will be seen/handled by
2784			 * mv_err_intr().  So do nothing at all here.
2785			 */
2786			return false;
2787		}
2788	}
2789	ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2790	if (!ac_err_mask(ata_status))
2791		return true;
2792	/* else: leave it for mv_err_intr() */
2793	return false;
2794}
2795
2796static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2797{
2798	void __iomem *port_mmio = mv_ap_base(ap);
2799	struct mv_host_priv *hpriv = ap->host->private_data;
2800	u32 in_index;
2801	bool work_done = false;
2802	u32 done_mask = 0;
2803	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2804
2805	/* Get the hardware queue position index */
2806	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2807			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2808
2809	/* Process new responses from since the last time we looked */
2810	while (in_index != pp->resp_idx) {
2811		unsigned int tag;
2812		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2813
2814		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2815
2816		if (IS_GEN_I(hpriv)) {
2817			/* 50xx: no NCQ, only one command active at a time */
2818			tag = ap->link.active_tag;
2819		} else {
2820			/* Gen II/IIE: get command tag from CRPB entry */
2821			tag = le16_to_cpu(response->id) & 0x1f;
2822		}
2823		if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2824			done_mask |= 1 << tag;
2825		work_done = true;
2826	}
2827
2828	if (work_done) {
2829		ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2830
2831		/* Update the software queue position index in hardware */
2832		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2833			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2834			 port_mmio + EDMA_RSP_Q_OUT_PTR);
2835	}
2836}
2837
2838static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2839{
2840	struct mv_port_priv *pp;
2841	int edma_was_enabled;
2842
2843	/*
2844	 * Grab a snapshot of the EDMA_EN flag setting,
2845	 * so that we have a consistent view for this port,
2846	 * even if something we call of our routines changes it.
2847	 */
2848	pp = ap->private_data;
2849	edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2850	/*
2851	 * Process completed CRPB response(s) before other events.
2852	 */
2853	if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2854		mv_process_crpb_entries(ap, pp);
2855		if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2856			mv_handle_fbs_ncq_dev_err(ap);
2857	}
2858	/*
2859	 * Handle chip-reported errors, or continue on to handle PIO.
2860	 */
2861	if (unlikely(port_cause & ERR_IRQ)) {
2862		mv_err_intr(ap);
2863	} else if (!edma_was_enabled) {
2864		struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2865		if (qc)
2866			ata_bmdma_port_intr(ap, qc);
2867		else
2868			mv_unexpected_intr(ap, edma_was_enabled);
2869	}
2870}
2871
2872/**
2873 *      mv_host_intr - Handle all interrupts on the given host controller
2874 *      @host: host specific structure
2875 *      @main_irq_cause: Main interrupt cause register for the chip.
2876 *
2877 *      LOCKING:
2878 *      Inherited from caller.
2879 */
2880static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2881{
2882	struct mv_host_priv *hpriv = host->private_data;
2883	void __iomem *mmio = hpriv->base, *hc_mmio;
2884	unsigned int handled = 0, port;
2885
2886	/* If asserted, clear the "all ports" IRQ coalescing bit */
2887	if (main_irq_cause & ALL_PORTS_COAL_DONE)
2888		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2889
2890	for (port = 0; port < hpriv->n_ports; port++) {
2891		struct ata_port *ap = host->ports[port];
2892		unsigned int p, shift, hardport, port_cause;
2893
2894		MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2895		/*
2896		 * Each hc within the host has its own hc_irq_cause register,
2897		 * where the interrupting ports bits get ack'd.
2898		 */
2899		if (hardport == 0) {	/* first port on this hc ? */
2900			u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2901			u32 port_mask, ack_irqs;
2902			/*
2903			 * Skip this entire hc if nothing pending for any ports
2904			 */
2905			if (!hc_cause) {
2906				port += MV_PORTS_PER_HC - 1;
2907				continue;
2908			}
2909			/*
2910			 * We don't need/want to read the hc_irq_cause register,
2911			 * because doing so hurts performance, and
2912			 * main_irq_cause already gives us everything we need.
2913			 *
2914			 * But we do have to *write* to the hc_irq_cause to ack
2915			 * the ports that we are handling this time through.
2916			 *
2917			 * This requires that we create a bitmap for those
2918			 * ports which interrupted us, and use that bitmap
2919			 * to ack (only) those ports via hc_irq_cause.
2920			 */
2921			ack_irqs = 0;
2922			if (hc_cause & PORTS_0_3_COAL_DONE)
2923				ack_irqs = HC_COAL_IRQ;
2924			for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2925				if ((port + p) >= hpriv->n_ports)
2926					break;
2927				port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2928				if (hc_cause & port_mask)
2929					ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2930			}
2931			hc_mmio = mv_hc_base_from_port(mmio, port);
2932			writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2933			handled = 1;
2934		}
2935		/*
2936		 * Handle interrupts signalled for this port:
2937		 */
2938		port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2939		if (port_cause)
2940			mv_port_intr(ap, port_cause);
2941	}
2942	return handled;
2943}
2944
2945static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2946{
2947	struct mv_host_priv *hpriv = host->private_data;
2948	struct ata_port *ap;
2949	struct ata_queued_cmd *qc;
2950	struct ata_eh_info *ehi;
2951	unsigned int i, err_mask, printed = 0;
2952	u32 err_cause;
2953
2954	err_cause = readl(mmio + hpriv->irq_cause_offset);
2955
2956	dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2957
2958	dev_dbg(host->dev, "%s: All regs @ PCI error\n", __func__);
2959	mv_dump_all_regs(mmio, to_pci_dev(host->dev));
2960
2961	writelfl(0, mmio + hpriv->irq_cause_offset);
2962
2963	for (i = 0; i < host->n_ports; i++) {
2964		ap = host->ports[i];
2965		if (!ata_link_offline(&ap->link)) {
2966			ehi = &ap->link.eh_info;
2967			ata_ehi_clear_desc(ehi);
2968			if (!printed++)
2969				ata_ehi_push_desc(ehi,
2970					"PCI err cause 0x%08x", err_cause);
2971			err_mask = AC_ERR_HOST_BUS;
2972			ehi->action = ATA_EH_RESET;
2973			qc = ata_qc_from_tag(ap, ap->link.active_tag);
2974			if (qc)
2975				qc->err_mask |= err_mask;
2976			else
2977				ehi->err_mask |= err_mask;
2978
2979			ata_port_freeze(ap);
2980		}
2981	}
2982	return 1;	/* handled */
2983}
2984
2985/**
2986 *      mv_interrupt - Main interrupt event handler
2987 *      @irq: unused
2988 *      @dev_instance: private data; in this case the host structure
2989 *
2990 *      Read the read only register to determine if any host
2991 *      controllers have pending interrupts.  If so, call lower level
2992 *      routine to handle.  Also check for PCI errors which are only
2993 *      reported here.
2994 *
2995 *      LOCKING:
2996 *      This routine holds the host lock while processing pending
2997 *      interrupts.
2998 */
2999static irqreturn_t mv_interrupt(int irq, void *dev_instance)
3000{
3001	struct ata_host *host = dev_instance;
3002	struct mv_host_priv *hpriv = host->private_data;
3003	unsigned int handled = 0;
3004	int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
3005	u32 main_irq_cause, pending_irqs;
3006
3007	spin_lock(&host->lock);
3008
3009	/* for MSI:  block new interrupts while in here */
3010	if (using_msi)
3011		mv_write_main_irq_mask(0, hpriv);
3012
3013	main_irq_cause = readl(hpriv->main_irq_cause_addr);
3014	pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
3015	/*
3016	 * Deal with cases where we either have nothing pending, or have read
3017	 * a bogus register value which can indicate HW removal or PCI fault.
3018	 */
3019	if (pending_irqs && main_irq_cause != 0xffffffffU) {
3020		if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3021			handled = mv_pci_error(host, hpriv->base);
3022		else
3023			handled = mv_host_intr(host, pending_irqs);
3024	}
3025
3026	/* for MSI: unmask; interrupt cause bits will retrigger now */
3027	if (using_msi)
3028		mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3029
3030	spin_unlock(&host->lock);
3031
3032	return IRQ_RETVAL(handled);
3033}
3034
3035static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3036{
3037	unsigned int ofs;
3038
3039	switch (sc_reg_in) {
3040	case SCR_STATUS:
3041	case SCR_ERROR:
3042	case SCR_CONTROL:
3043		ofs = sc_reg_in * sizeof(u32);
3044		break;
3045	default:
3046		ofs = 0xffffffffU;
3047		break;
3048	}
3049	return ofs;
3050}
3051
3052static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3053{
3054	struct mv_host_priv *hpriv = link->ap->host->private_data;
3055	void __iomem *mmio = hpriv->base;
3056	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3057	unsigned int ofs = mv5_scr_offset(sc_reg_in);
3058
3059	if (ofs != 0xffffffffU) {
3060		*val = readl(addr + ofs);
3061		return 0;
3062	} else
3063		return -EINVAL;
3064}
3065
3066static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3067{
3068	struct mv_host_priv *hpriv = link->ap->host->private_data;
3069	void __iomem *mmio = hpriv->base;
3070	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3071	unsigned int ofs = mv5_scr_offset(sc_reg_in);
3072
3073	if (ofs != 0xffffffffU) {
3074		writelfl(val, addr + ofs);
3075		return 0;
3076	} else
3077		return -EINVAL;
3078}
3079
3080static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3081{
3082	struct pci_dev *pdev = to_pci_dev(host->dev);
3083	int early_5080;
3084
3085	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3086
3087	if (!early_5080) {
3088		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3089		tmp |= (1 << 0);
3090		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3091	}
3092
3093	mv_reset_pci_bus(host, mmio);
3094}
3095
3096static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3097{
3098	writel(0x0fcfffff, mmio + FLASH_CTL);
3099}
3100
3101static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3102			   void __iomem *mmio)
3103{
3104	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3105	u32 tmp;
3106
3107	tmp = readl(phy_mmio + MV5_PHY_MODE);
3108
3109	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
3110	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
3111}
3112
3113static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3114{
3115	u32 tmp;
3116
3117	writel(0, mmio + GPIO_PORT_CTL);
3118
3119	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3120
3121	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3122	tmp |= ~(1 << 0);
3123	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3124}
3125
3126static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3127			   unsigned int port)
3128{
3129	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3130	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3131	u32 tmp;
3132	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3133
3134	if (fix_apm_sq) {
3135		tmp = readl(phy_mmio + MV5_LTMODE);
3136		tmp |= (1 << 19);
3137		writel(tmp, phy_mmio + MV5_LTMODE);
3138
3139		tmp = readl(phy_mmio + MV5_PHY_CTL);
3140		tmp &= ~0x3;
3141		tmp |= 0x1;
3142		writel(tmp, phy_mmio + MV5_PHY_CTL);
3143	}
3144
3145	tmp = readl(phy_mmio + MV5_PHY_MODE);
3146	tmp &= ~mask;
3147	tmp |= hpriv->signal[port].pre;
3148	tmp |= hpriv->signal[port].amps;
3149	writel(tmp, phy_mmio + MV5_PHY_MODE);
3150}
3151
3152
3153#undef ZERO
3154#define ZERO(reg) writel(0, port_mmio + (reg))
3155static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3156			     unsigned int port)
3157{
3158	void __iomem *port_mmio = mv_port_base(mmio, port);
3159
3160	mv_reset_channel(hpriv, mmio, port);
3161
3162	ZERO(0x028);	/* command */
3163	writel(0x11f, port_mmio + EDMA_CFG);
3164	ZERO(0x004);	/* timer */
3165	ZERO(0x008);	/* irq err cause */
3166	ZERO(0x00c);	/* irq err mask */
3167	ZERO(0x010);	/* rq bah */
3168	ZERO(0x014);	/* rq inp */
3169	ZERO(0x018);	/* rq outp */
3170	ZERO(0x01c);	/* respq bah */
3171	ZERO(0x024);	/* respq outp */
3172	ZERO(0x020);	/* respq inp */
3173	ZERO(0x02c);	/* test control */
3174	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3175}
3176#undef ZERO
3177
3178#define ZERO(reg) writel(0, hc_mmio + (reg))
3179static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3180			unsigned int hc)
3181{
3182	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3183	u32 tmp;
3184
3185	ZERO(0x00c);
3186	ZERO(0x010);
3187	ZERO(0x014);
3188	ZERO(0x018);
3189
3190	tmp = readl(hc_mmio + 0x20);
3191	tmp &= 0x1c1c1c1c;
3192	tmp |= 0x03030303;
3193	writel(tmp, hc_mmio + 0x20);
3194}
3195#undef ZERO
3196
3197static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
3198			unsigned int n_hc)
3199{
3200	struct mv_host_priv *hpriv = host->private_data;
3201	unsigned int hc, port;
3202
3203	for (hc = 0; hc < n_hc; hc++) {
3204		for (port = 0; port < MV_PORTS_PER_HC; port++)
3205			mv5_reset_hc_port(hpriv, mmio,
3206					  (hc * MV_PORTS_PER_HC) + port);
3207
3208		mv5_reset_one_hc(hpriv, mmio, hc);
3209	}
3210
3211	return 0;
3212}
3213
3214#undef ZERO
3215#define ZERO(reg) writel(0, mmio + (reg))
3216static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3217{
3218	struct mv_host_priv *hpriv = host->private_data;
3219	u32 tmp;
3220
3221	tmp = readl(mmio + MV_PCI_MODE);
3222	tmp &= 0xff00ffff;
3223	writel(tmp, mmio + MV_PCI_MODE);
3224
3225	ZERO(MV_PCI_DISC_TIMER);
3226	ZERO(MV_PCI_MSI_TRIGGER);
3227	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3228	ZERO(MV_PCI_SERR_MASK);
3229	ZERO(hpriv->irq_cause_offset);
3230	ZERO(hpriv->irq_mask_offset);
3231	ZERO(MV_PCI_ERR_LOW_ADDRESS);
3232	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3233	ZERO(MV_PCI_ERR_ATTRIBUTE);
3234	ZERO(MV_PCI_ERR_COMMAND);
3235}
3236#undef ZERO
3237
3238static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3239{
3240	u32 tmp;
3241
3242	mv5_reset_flash(hpriv, mmio);
3243
3244	tmp = readl(mmio + GPIO_PORT_CTL);
3245	tmp &= 0x3;
3246	tmp |= (1 << 5) | (1 << 6);
3247	writel(tmp, mmio + GPIO_PORT_CTL);
3248}
3249
3250/*
3251 *      mv6_reset_hc - Perform the 6xxx global soft reset
3252 *      @mmio: base address of the HBA
3253 *
3254 *      This routine only applies to 6xxx parts.
3255 *
3256 *      LOCKING:
3257 *      Inherited from caller.
3258 */
3259static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
3260			unsigned int n_hc)
3261{
3262	void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3263	int i, rc = 0;
3264	u32 t;
3265
3266	/* Following procedure defined in PCI "main command and status
3267	 * register" table.
3268	 */
3269	t = readl(reg);
3270	writel(t | STOP_PCI_MASTER, reg);
3271
3272	for (i = 0; i < 1000; i++) {
3273		udelay(1);
3274		t = readl(reg);
3275		if (PCI_MASTER_EMPTY & t)
3276			break;
3277	}
3278	if (!(PCI_MASTER_EMPTY & t)) {
3279		dev_err(host->dev, "PCI master won't flush\n");
3280		rc = 1;
3281		goto done;
3282	}
3283
3284	/* set reset */
3285	i = 5;
3286	do {
3287		writel(t | GLOB_SFT_RST, reg);
3288		t = readl(reg);
3289		udelay(1);
3290	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
3291
3292	if (!(GLOB_SFT_RST & t)) {
3293		dev_err(host->dev, "can't set global reset\n");
3294		rc = 1;
3295		goto done;
3296	}
3297
3298	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
3299	i = 5;
3300	do {
3301		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3302		t = readl(reg);
3303		udelay(1);
3304	} while ((GLOB_SFT_RST & t) && (i-- > 0));
3305
3306	if (GLOB_SFT_RST & t) {
3307		dev_err(host->dev, "can't clear global reset\n");
3308		rc = 1;
3309	}
3310done:
3311	return rc;
3312}
3313
3314static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3315			   void __iomem *mmio)
3316{
3317	void __iomem *port_mmio;
3318	u32 tmp;
3319
3320	tmp = readl(mmio + RESET_CFG);
3321	if ((tmp & (1 << 0)) == 0) {
3322		hpriv->signal[idx].amps = 0x7 << 8;
3323		hpriv->signal[idx].pre = 0x1 << 5;
3324		return;
3325	}
3326
3327	port_mmio = mv_port_base(mmio, idx);
3328	tmp = readl(port_mmio + PHY_MODE2);
3329
3330	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
3331	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
3332}
3333
3334static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3335{
3336	writel(0x00000060, mmio + GPIO_PORT_CTL);
3337}
3338
3339static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3340			   unsigned int port)
3341{
3342	void __iomem *port_mmio = mv_port_base(mmio, port);
3343
3344	u32 hp_flags = hpriv->hp_flags;
3345	int fix_phy_mode2 =
3346		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3347	int fix_phy_mode4 =
3348		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3349	u32 m2, m3;
3350
3351	if (fix_phy_mode2) {
3352		m2 = readl(port_mmio + PHY_MODE2);
3353		m2 &= ~(1 << 16);
3354		m2 |= (1 << 31);
3355		writel(m2, port_mmio + PHY_MODE2);
3356
3357		udelay(200);
3358
3359		m2 = readl(port_mmio + PHY_MODE2);
3360		m2 &= ~((1 << 16) | (1 << 31));
3361		writel(m2, port_mmio + PHY_MODE2);
3362
3363		udelay(200);
3364	}
3365
3366	/*
3367	 * Gen-II/IIe PHY_MODE3 errata RM#2:
3368	 * Achieves better receiver noise performance than the h/w default:
3369	 */
3370	m3 = readl(port_mmio + PHY_MODE3);
3371	m3 = (m3 & 0x1f) | (0x5555601 << 5);
3372
3373	/* Guideline 88F5182 (GL# SATA-S11) */
3374	if (IS_SOC(hpriv))
3375		m3 &= ~0x1c;
3376
3377	if (fix_phy_mode4) {
3378		u32 m4 = readl(port_mmio + PHY_MODE4);
3379		/*
3380		 * Enforce reserved-bit restrictions on GenIIe devices only.
3381		 * For earlier chipsets, force only the internal config field
3382		 *  (workaround for errata FEr SATA#10 part 1).
3383		 */
3384		if (IS_GEN_IIE(hpriv))
3385			m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3386		else
3387			m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3388		writel(m4, port_mmio + PHY_MODE4);
3389	}
3390	/*
3391	 * Workaround for 60x1-B2 errata SATA#13:
3392	 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3393	 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3394	 * Or ensure we use writelfl() when writing PHY_MODE4.
3395	 */
3396	writel(m3, port_mmio + PHY_MODE3);
3397
3398	/* Revert values of pre-emphasis and signal amps to the saved ones */
3399	m2 = readl(port_mmio + PHY_MODE2);
3400
3401	m2 &= ~MV_M2_PREAMP_MASK;
3402	m2 |= hpriv->signal[port].amps;
3403	m2 |= hpriv->signal[port].pre;
3404	m2 &= ~(1 << 16);
3405
3406	/* according to mvSata 3.6.1, some IIE values are fixed */
3407	if (IS_GEN_IIE(hpriv)) {
3408		m2 &= ~0xC30FF01F;
3409		m2 |= 0x0000900F;
3410	}
3411
3412	writel(m2, port_mmio + PHY_MODE2);
3413}
3414
3415/* TODO: use the generic LED interface to configure the SATA Presence */
3416/* & Acitivy LEDs on the board */
3417static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3418				      void __iomem *mmio)
3419{
3420	return;
3421}
3422
3423static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3424			   void __iomem *mmio)
3425{
3426	void __iomem *port_mmio;
3427	u32 tmp;
3428
3429	port_mmio = mv_port_base(mmio, idx);
3430	tmp = readl(port_mmio + PHY_MODE2);
3431
3432	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
3433	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
3434}
3435
3436#undef ZERO
3437#define ZERO(reg) writel(0, port_mmio + (reg))
3438static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3439					void __iomem *mmio, unsigned int port)
3440{
3441	void __iomem *port_mmio = mv_port_base(mmio, port);
3442
3443	mv_reset_channel(hpriv, mmio, port);
3444
3445	ZERO(0x028);		/* command */
3446	writel(0x101f, port_mmio + EDMA_CFG);
3447	ZERO(0x004);		/* timer */
3448	ZERO(0x008);		/* irq err cause */
3449	ZERO(0x00c);		/* irq err mask */
3450	ZERO(0x010);		/* rq bah */
3451	ZERO(0x014);		/* rq inp */
3452	ZERO(0x018);		/* rq outp */
3453	ZERO(0x01c);		/* respq bah */
3454	ZERO(0x024);		/* respq outp */
3455	ZERO(0x020);		/* respq inp */
3456	ZERO(0x02c);		/* test control */
3457	writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3458}
3459
3460#undef ZERO
3461
3462#define ZERO(reg) writel(0, hc_mmio + (reg))
3463static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3464				       void __iomem *mmio)
3465{
3466	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3467
3468	ZERO(0x00c);
3469	ZERO(0x010);
3470	ZERO(0x014);
3471
3472}
3473
3474#undef ZERO
3475
3476static int mv_soc_reset_hc(struct ata_host *host,
3477				  void __iomem *mmio, unsigned int n_hc)
3478{
3479	struct mv_host_priv *hpriv = host->private_data;
3480	unsigned int port;
3481
3482	for (port = 0; port < hpriv->n_ports; port++)
3483		mv_soc_reset_hc_port(hpriv, mmio, port);
3484
3485	mv_soc_reset_one_hc(hpriv, mmio);
3486
3487	return 0;
3488}
3489
3490static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3491				      void __iomem *mmio)
3492{
3493	return;
3494}
3495
3496static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3497{
3498	return;
3499}
3500
3501static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3502				  void __iomem *mmio, unsigned int port)
3503{
3504	void __iomem *port_mmio = mv_port_base(mmio, port);
3505	u32	reg;
3506
3507	reg = readl(port_mmio + PHY_MODE3);
3508	reg &= ~(0x3 << 27);	/* SELMUPF (bits 28:27) to 1 */
3509	reg |= (0x1 << 27);
3510	reg &= ~(0x3 << 29);	/* SELMUPI (bits 30:29) to 1 */
3511	reg |= (0x1 << 29);
3512	writel(reg, port_mmio + PHY_MODE3);
3513
3514	reg = readl(port_mmio + PHY_MODE4);
3515	reg &= ~0x1;	/* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3516	reg |= (0x1 << 16);
3517	writel(reg, port_mmio + PHY_MODE4);
3518
3519	reg = readl(port_mmio + PHY_MODE9_GEN2);
3520	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
3521	reg |= 0x8;
3522	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
3523	writel(reg, port_mmio + PHY_MODE9_GEN2);
3524
3525	reg = readl(port_mmio + PHY_MODE9_GEN1);
3526	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
3527	reg |= 0x8;
3528	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
3529	writel(reg, port_mmio + PHY_MODE9_GEN1);
3530}
3531
3532/*
3533 *	soc_is_65 - check if the soc is 65 nano device
3534 *
3535 *	Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3536 *	register, this register should contain non-zero value and it exists only
3537 *	in the 65 nano devices, when reading it from older devices we get 0.
3538 */
3539static bool soc_is_65n(struct mv_host_priv *hpriv)
3540{
3541	void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3542
3543	if (readl(port0_mmio + PHYCFG_OFS))
3544		return true;
3545	return false;
3546}
3547
3548static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3549{
3550	u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3551
3552	ifcfg = (ifcfg & 0xf7f) | 0x9b1000;	/* from chip spec */
3553	if (want_gen2i)
3554		ifcfg |= (1 << 7);		/* enable gen2i speed */
3555	writelfl(ifcfg, port_mmio + SATA_IFCFG);
3556}
3557
3558static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3559			     unsigned int port_no)
3560{
3561	void __iomem *port_mmio = mv_port_base(mmio, port_no);
3562
3563	/*
3564	 * The datasheet warns against setting EDMA_RESET when EDMA is active
3565	 * (but doesn't say what the problem might be).  So we first try
3566	 * to disable the EDMA engine before doing the EDMA_RESET operation.
3567	 */
3568	mv_stop_edma_engine(port_mmio);
3569	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3570
3571	if (!IS_GEN_I(hpriv)) {
3572		/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3573		mv_setup_ifcfg(port_mmio, 1);
3574	}
3575	/*
3576	 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3577	 * link, and physical layers.  It resets all SATA interface registers
3578	 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3579	 */
3580	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3581	udelay(25);	/* allow reset propagation */
3582	writelfl(0, port_mmio + EDMA_CMD);
3583
3584	hpriv->ops->phy_errata(hpriv, mmio, port_no);
3585
3586	if (IS_GEN_I(hpriv))
3587		usleep_range(500, 1000);
3588}
3589
3590static void mv_pmp_select(struct ata_port *ap, int pmp)
3591{
3592	if (sata_pmp_supported(ap)) {
3593		void __iomem *port_mmio = mv_ap_base(ap);
3594		u32 reg = readl(port_mmio + SATA_IFCTL);
3595		int old = reg & 0xf;
3596
3597		if (old != pmp) {
3598			reg = (reg & ~0xf) | pmp;
3599			writelfl(reg, port_mmio + SATA_IFCTL);
3600		}
3601	}
3602}
3603
3604static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3605				unsigned long deadline)
3606{
3607	mv_pmp_select(link->ap, sata_srst_pmp(link));
3608	return sata_std_hardreset(link, class, deadline);
3609}
3610
3611static int mv_softreset(struct ata_link *link, unsigned int *class,
3612				unsigned long deadline)
3613{
3614	mv_pmp_select(link->ap, sata_srst_pmp(link));
3615	return ata_sff_softreset(link, class, deadline);
3616}
3617
3618static int mv_hardreset(struct ata_link *link, unsigned int *class,
3619			unsigned long deadline)
3620{
3621	struct ata_port *ap = link->ap;
3622	struct mv_host_priv *hpriv = ap->host->private_data;
3623	struct mv_port_priv *pp = ap->private_data;
3624	void __iomem *mmio = hpriv->base;
3625	int rc, attempts = 0, extra = 0;
3626	u32 sstatus;
3627	bool online;
3628
3629	mv_reset_channel(hpriv, mmio, ap->port_no);
3630	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3631	pp->pp_flags &=
3632	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3633
3634	/* Workaround for errata FEr SATA#10 (part 2) */
3635	do {
3636		const unsigned int *timing =
3637				sata_ehc_deb_timing(&link->eh_context);
3638
3639		rc = sata_link_hardreset(link, timing, deadline + extra,
3640					 &online, NULL);
3641		rc = online ? -EAGAIN : rc;
3642		if (rc)
3643			return rc;
3644		sata_scr_read(link, SCR_STATUS, &sstatus);
3645		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3646			/* Force 1.5gb/s link speed and try again */
3647			mv_setup_ifcfg(mv_ap_base(ap), 0);
3648			if (time_after(jiffies + HZ, deadline))
3649				extra = HZ; /* only extend it once, max */
3650		}
3651	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3652	mv_save_cached_regs(ap);
3653	mv_edma_cfg(ap, 0, 0);
3654
3655	return rc;
3656}
3657
3658static void mv_eh_freeze(struct ata_port *ap)
3659{
3660	mv_stop_edma(ap);
3661	mv_enable_port_irqs(ap, 0);
3662}
3663
3664static void mv_eh_thaw(struct ata_port *ap)
3665{
3666	struct mv_host_priv *hpriv = ap->host->private_data;
3667	unsigned int port = ap->port_no;
3668	unsigned int hardport = mv_hardport_from_port(port);
3669	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3670	void __iomem *port_mmio = mv_ap_base(ap);
3671	u32 hc_irq_cause;
3672
3673	/* clear EDMA errors on this port */
3674	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3675
3676	/* clear pending irq events */
3677	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3678	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3679
3680	mv_enable_port_irqs(ap, ERR_IRQ);
3681}
3682
3683/**
3684 *      mv_port_init - Perform some early initialization on a single port.
3685 *      @port: libata data structure storing shadow register addresses
3686 *      @port_mmio: base address of the port
3687 *
3688 *      Initialize shadow register mmio addresses, clear outstanding
3689 *      interrupts on the port, and unmask interrupts for the future
3690 *      start of the port.
3691 *
3692 *      LOCKING:
3693 *      Inherited from caller.
3694 */
3695static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
3696{
3697	void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3698
3699	/* PIO related setup
3700	 */
3701	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3702	port->error_addr =
3703		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3704	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3705	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3706	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3707	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3708	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3709	port->status_addr =
3710		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3711	/* special case: control/altstatus doesn't have ATA_REG_ address */
3712	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3713
3714	/* Clear any currently outstanding port interrupt conditions */
3715	serr = port_mmio + mv_scr_offset(SCR_ERROR);
3716	writelfl(readl(serr), serr);
3717	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3718
3719	/* unmask all non-transient EDMA error interrupts */
3720	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
 
 
 
 
 
3721}
3722
3723static unsigned int mv_in_pcix_mode(struct ata_host *host)
3724{
3725	struct mv_host_priv *hpriv = host->private_data;
3726	void __iomem *mmio = hpriv->base;
3727	u32 reg;
3728
3729	if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3730		return 0;	/* not PCI-X capable */
3731	reg = readl(mmio + MV_PCI_MODE);
3732	if ((reg & MV_PCI_MODE_MASK) == 0)
3733		return 0;	/* conventional PCI mode */
3734	return 1;	/* chip is in PCI-X mode */
3735}
3736
3737static int mv_pci_cut_through_okay(struct ata_host *host)
3738{
3739	struct mv_host_priv *hpriv = host->private_data;
3740	void __iomem *mmio = hpriv->base;
3741	u32 reg;
3742
3743	if (!mv_in_pcix_mode(host)) {
3744		reg = readl(mmio + MV_PCI_COMMAND);
3745		if (reg & MV_PCI_COMMAND_MRDTRIG)
3746			return 0; /* not okay */
3747	}
3748	return 1; /* okay */
3749}
3750
3751static void mv_60x1b2_errata_pci7(struct ata_host *host)
3752{
3753	struct mv_host_priv *hpriv = host->private_data;
3754	void __iomem *mmio = hpriv->base;
3755
3756	/* workaround for 60x1-B2 errata PCI#7 */
3757	if (mv_in_pcix_mode(host)) {
3758		u32 reg = readl(mmio + MV_PCI_COMMAND);
3759		writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3760	}
3761}
3762
3763static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3764{
3765	struct pci_dev *pdev = to_pci_dev(host->dev);
3766	struct mv_host_priv *hpriv = host->private_data;
3767	u32 hp_flags = hpriv->hp_flags;
3768
3769	switch (board_idx) {
3770	case chip_5080:
3771		hpriv->ops = &mv5xxx_ops;
3772		hp_flags |= MV_HP_GEN_I;
3773
3774		switch (pdev->revision) {
3775		case 0x1:
3776			hp_flags |= MV_HP_ERRATA_50XXB0;
3777			break;
3778		case 0x3:
3779			hp_flags |= MV_HP_ERRATA_50XXB2;
3780			break;
3781		default:
3782			dev_warn(&pdev->dev,
3783				 "Applying 50XXB2 workarounds to unknown rev\n");
3784			hp_flags |= MV_HP_ERRATA_50XXB2;
3785			break;
3786		}
3787		break;
3788
3789	case chip_504x:
3790	case chip_508x:
3791		hpriv->ops = &mv5xxx_ops;
3792		hp_flags |= MV_HP_GEN_I;
3793
3794		switch (pdev->revision) {
3795		case 0x0:
3796			hp_flags |= MV_HP_ERRATA_50XXB0;
3797			break;
3798		case 0x3:
3799			hp_flags |= MV_HP_ERRATA_50XXB2;
3800			break;
3801		default:
3802			dev_warn(&pdev->dev,
3803				 "Applying B2 workarounds to unknown rev\n");
3804			hp_flags |= MV_HP_ERRATA_50XXB2;
3805			break;
3806		}
3807		break;
3808
3809	case chip_604x:
3810	case chip_608x:
3811		hpriv->ops = &mv6xxx_ops;
3812		hp_flags |= MV_HP_GEN_II;
3813
3814		switch (pdev->revision) {
3815		case 0x7:
3816			mv_60x1b2_errata_pci7(host);
3817			hp_flags |= MV_HP_ERRATA_60X1B2;
3818			break;
3819		case 0x9:
3820			hp_flags |= MV_HP_ERRATA_60X1C0;
3821			break;
3822		default:
3823			dev_warn(&pdev->dev,
3824				 "Applying B2 workarounds to unknown rev\n");
3825			hp_flags |= MV_HP_ERRATA_60X1B2;
3826			break;
3827		}
3828		break;
3829
3830	case chip_7042:
3831		hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3832		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3833		    (pdev->device == 0x2300 || pdev->device == 0x2310))
3834		{
3835			/*
3836			 * Highpoint RocketRAID PCIe 23xx series cards:
3837			 *
3838			 * Unconfigured drives are treated as "Legacy"
3839			 * by the BIOS, and it overwrites sector 8 with
3840			 * a "Lgcy" metadata block prior to Linux boot.
3841			 *
3842			 * Configured drives (RAID or JBOD) leave sector 8
3843			 * alone, but instead overwrite a high numbered
3844			 * sector for the RAID metadata.  This sector can
3845			 * be determined exactly, by truncating the physical
3846			 * drive capacity to a nice even GB value.
3847			 *
3848			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3849			 *
3850			 * Warn the user, lest they think we're just buggy.
3851			 */
3852			dev_warn(&pdev->dev, "Highpoint RocketRAID"
3853				" BIOS CORRUPTS DATA on all attached drives,"
3854				" regardless of if/how they are configured."
3855				" BEWARE!\n");
3856			dev_warn(&pdev->dev, "For data safety, do not"
3857				" use sectors 8-9 on \"Legacy\" drives,"
3858				" and avoid the final two gigabytes on"
3859				" all RocketRAID BIOS initialized drives.\n");
3860		}
3861		fallthrough;
3862	case chip_6042:
3863		hpriv->ops = &mv6xxx_ops;
3864		hp_flags |= MV_HP_GEN_IIE;
3865		if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3866			hp_flags |= MV_HP_CUT_THROUGH;
3867
3868		switch (pdev->revision) {
3869		case 0x2: /* Rev.B0: the first/only public release */
3870			hp_flags |= MV_HP_ERRATA_60X1C0;
3871			break;
3872		default:
3873			dev_warn(&pdev->dev,
3874				 "Applying 60X1C0 workarounds to unknown rev\n");
3875			hp_flags |= MV_HP_ERRATA_60X1C0;
3876			break;
3877		}
3878		break;
3879	case chip_soc:
3880		if (soc_is_65n(hpriv))
3881			hpriv->ops = &mv_soc_65n_ops;
3882		else
3883			hpriv->ops = &mv_soc_ops;
3884		hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3885			MV_HP_ERRATA_60X1C0;
3886		break;
3887
3888	default:
3889		dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
3890		return -EINVAL;
3891	}
3892
3893	hpriv->hp_flags = hp_flags;
3894	if (hp_flags & MV_HP_PCIE) {
3895		hpriv->irq_cause_offset	= PCIE_IRQ_CAUSE;
3896		hpriv->irq_mask_offset	= PCIE_IRQ_MASK;
3897		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
3898	} else {
3899		hpriv->irq_cause_offset	= PCI_IRQ_CAUSE;
3900		hpriv->irq_mask_offset	= PCI_IRQ_MASK;
3901		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
3902	}
3903
3904	return 0;
3905}
3906
3907/**
3908 *      mv_init_host - Perform some early initialization of the host.
3909 *	@host: ATA host to initialize
3910 *
3911 *      If possible, do an early global reset of the host.  Then do
3912 *      our port init and clear/unmask all/relevant host interrupts.
3913 *
3914 *      LOCKING:
3915 *      Inherited from caller.
3916 */
3917static int mv_init_host(struct ata_host *host)
3918{
3919	int rc = 0, n_hc, port, hc;
3920	struct mv_host_priv *hpriv = host->private_data;
3921	void __iomem *mmio = hpriv->base;
3922
3923	rc = mv_chip_id(host, hpriv->board_idx);
3924	if (rc)
3925		goto done;
3926
3927	if (IS_SOC(hpriv)) {
3928		hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3929		hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK;
3930	} else {
3931		hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3932		hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK;
3933	}
3934
3935	/* initialize shadow irq mask with register's value */
3936	hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3937
3938	/* global interrupt mask: 0 == mask everything */
3939	mv_set_main_irq_mask(host, ~0, 0);
3940
3941	n_hc = mv_get_hc_count(host->ports[0]->flags);
3942
3943	for (port = 0; port < host->n_ports; port++)
3944		if (hpriv->ops->read_preamp)
3945			hpriv->ops->read_preamp(hpriv, port, mmio);
3946
3947	rc = hpriv->ops->reset_hc(host, mmio, n_hc);
3948	if (rc)
3949		goto done;
3950
3951	hpriv->ops->reset_flash(hpriv, mmio);
3952	hpriv->ops->reset_bus(host, mmio);
3953	hpriv->ops->enable_leds(hpriv, mmio);
3954
3955	for (port = 0; port < host->n_ports; port++) {
3956		struct ata_port *ap = host->ports[port];
3957		void __iomem *port_mmio = mv_port_base(mmio, port);
3958
3959		mv_port_init(&ap->ioaddr, port_mmio);
3960	}
3961
3962	for (hc = 0; hc < n_hc; hc++) {
3963		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3964
3965		dev_dbg(host->dev, "HC%i: HC config=0x%08x HC IRQ cause "
3966			"(before clear)=0x%08x\n", hc,
3967			readl(hc_mmio + HC_CFG),
3968			readl(hc_mmio + HC_IRQ_CAUSE));
3969
3970		/* Clear any currently outstanding hc interrupt conditions */
3971		writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3972	}
3973
3974	if (!IS_SOC(hpriv)) {
3975		/* Clear any currently outstanding host interrupt conditions */
3976		writelfl(0, mmio + hpriv->irq_cause_offset);
3977
3978		/* and unmask interrupt generation for host regs */
3979		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3980	}
3981
3982	/*
3983	 * enable only global host interrupts for now.
3984	 * The per-port interrupts get done later as ports are set up.
3985	 */
3986	mv_set_main_irq_mask(host, 0, PCI_ERR);
3987	mv_set_irq_coalescing(host, irq_coalescing_io_count,
3988				    irq_coalescing_usecs);
3989done:
3990	return rc;
3991}
3992
3993static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3994{
3995	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3996							     MV_CRQB_Q_SZ, 0);
3997	if (!hpriv->crqb_pool)
3998		return -ENOMEM;
3999
4000	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
4001							     MV_CRPB_Q_SZ, 0);
4002	if (!hpriv->crpb_pool)
4003		return -ENOMEM;
4004
4005	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
4006							     MV_SG_TBL_SZ, 0);
4007	if (!hpriv->sg_tbl_pool)
4008		return -ENOMEM;
4009
4010	return 0;
4011}
4012
4013static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
4014				 const struct mbus_dram_target_info *dram)
4015{
4016	int i;
4017
4018	for (i = 0; i < 4; i++) {
4019		writel(0, hpriv->base + WINDOW_CTRL(i));
4020		writel(0, hpriv->base + WINDOW_BASE(i));
4021	}
4022
4023	for (i = 0; i < dram->num_cs; i++) {
4024		const struct mbus_dram_window *cs = dram->cs + i;
4025
4026		writel(((cs->size - 1) & 0xffff0000) |
4027			(cs->mbus_attr << 8) |
4028			(dram->mbus_dram_target_id << 4) | 1,
4029			hpriv->base + WINDOW_CTRL(i));
4030		writel(cs->base, hpriv->base + WINDOW_BASE(i));
4031	}
4032}
4033
4034/**
4035 *      mv_platform_probe - handle a positive probe of an soc Marvell
4036 *      host
4037 *      @pdev: platform device found
4038 *
4039 *      LOCKING:
4040 *      Inherited from caller.
4041 */
4042static int mv_platform_probe(struct platform_device *pdev)
4043{
4044	const struct mv_sata_platform_data *mv_platform_data;
4045	const struct mbus_dram_target_info *dram;
4046	const struct ata_port_info *ppi[] =
4047	    { &mv_port_info[chip_soc], NULL };
4048	struct ata_host *host;
4049	struct mv_host_priv *hpriv;
4050	struct resource *res;
4051	int n_ports = 0, irq = 0;
4052	int rc;
4053	int port;
4054
4055	ata_print_version_once(&pdev->dev, DRV_VERSION);
4056
4057	/*
4058	 * Simple resource validation ..
4059	 */
4060	if (unlikely(pdev->num_resources != 1)) {
4061		dev_err(&pdev->dev, "invalid number of resources\n");
4062		return -EINVAL;
4063	}
4064
4065	/*
4066	 * Get the register base first
4067	 */
4068	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4069	if (res == NULL)
4070		return -EINVAL;
4071
4072	/* allocate host */
4073	if (pdev->dev.of_node) {
4074		rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
4075					   &n_ports);
4076		if (rc) {
4077			dev_err(&pdev->dev,
4078				"error parsing nr-ports property: %d\n", rc);
4079			return rc;
4080		}
4081
4082		if (n_ports <= 0) {
4083			dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
4084				n_ports);
4085			return -EINVAL;
4086		}
4087
4088		irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4089	} else {
4090		mv_platform_data = dev_get_platdata(&pdev->dev);
4091		n_ports = mv_platform_data->n_ports;
4092		irq = platform_get_irq(pdev, 0);
4093	}
4094	if (irq < 0)
4095		return irq;
4096	if (!irq)
4097		return -EINVAL;
4098
4099	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4100	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4101
4102	if (!host || !hpriv)
4103		return -ENOMEM;
4104	hpriv->port_clks = devm_kcalloc(&pdev->dev,
4105					n_ports, sizeof(struct clk *),
4106					GFP_KERNEL);
4107	if (!hpriv->port_clks)
4108		return -ENOMEM;
4109	hpriv->port_phys = devm_kcalloc(&pdev->dev,
4110					n_ports, sizeof(struct phy *),
4111					GFP_KERNEL);
4112	if (!hpriv->port_phys)
4113		return -ENOMEM;
4114	host->private_data = hpriv;
 
4115	hpriv->board_idx = chip_soc;
4116
4117	host->iomap = NULL;
4118	hpriv->base = devm_ioremap(&pdev->dev, res->start,
4119				   resource_size(res));
4120	if (!hpriv->base)
4121		return -ENOMEM;
4122
4123	hpriv->base -= SATAHC0_REG_BASE;
4124
 
4125	hpriv->clk = clk_get(&pdev->dev, NULL);
4126	if (IS_ERR(hpriv->clk)) {
4127		dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4128	} else {
4129		rc = clk_prepare_enable(hpriv->clk);
4130		if (rc)
4131			goto err;
4132	}
4133
4134	for (port = 0; port < n_ports; port++) {
4135		char port_number[16];
4136		sprintf(port_number, "%d", port);
4137		hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4138		if (!IS_ERR(hpriv->port_clks[port]))
4139			clk_prepare_enable(hpriv->port_clks[port]);
4140
4141		sprintf(port_number, "port%d", port);
4142		hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
4143							       port_number);
4144		if (IS_ERR(hpriv->port_phys[port])) {
4145			rc = PTR_ERR(hpriv->port_phys[port]);
4146			hpriv->port_phys[port] = NULL;
4147			if (rc != -EPROBE_DEFER)
4148				dev_warn(&pdev->dev, "error getting phy %d", rc);
4149
4150			/* Cleanup only the initialized ports */
4151			hpriv->n_ports = port;
4152			goto err;
4153		} else
4154			phy_power_on(hpriv->port_phys[port]);
4155	}
4156
4157	/* All the ports have been initialized */
4158	hpriv->n_ports = n_ports;
4159
4160	/*
4161	 * (Re-)program MBUS remapping windows if we are asked to.
4162	 */
4163	dram = mv_mbus_dram_info();
4164	if (dram)
4165		mv_conf_mbus_windows(hpriv, dram);
4166
4167	rc = mv_create_dma_pools(hpriv, &pdev->dev);
4168	if (rc)
4169		goto err;
4170
4171	/*
4172	 * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
4173	 * updated in the LP_PHY_CTL register.
4174	 */
4175	if (pdev->dev.of_node &&
4176		of_device_is_compatible(pdev->dev.of_node,
4177					"marvell,armada-370-sata"))
4178		hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
4179
4180	/* initialize adapter */
4181	rc = mv_init_host(host);
4182	if (rc)
4183		goto err;
4184
4185	dev_info(&pdev->dev, "slots %u ports %d\n",
4186		 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4187
4188	rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4189	if (!rc)
4190		return 0;
4191
4192err:
 
4193	if (!IS_ERR(hpriv->clk)) {
4194		clk_disable_unprepare(hpriv->clk);
4195		clk_put(hpriv->clk);
4196	}
4197	for (port = 0; port < hpriv->n_ports; port++) {
4198		if (!IS_ERR(hpriv->port_clks[port])) {
4199			clk_disable_unprepare(hpriv->port_clks[port]);
4200			clk_put(hpriv->port_clks[port]);
4201		}
4202		phy_power_off(hpriv->port_phys[port]);
4203	}
4204
4205	return rc;
4206}
4207
4208/*
4209 *
4210 *      mv_platform_remove    -       unplug a platform interface
4211 *      @pdev: platform device
4212 *
4213 *      A platform bus SATA device has been unplugged. Perform the needed
4214 *      cleanup. Also called on module unload for any active devices.
4215 */
4216static void mv_platform_remove(struct platform_device *pdev)
4217{
4218	struct ata_host *host = platform_get_drvdata(pdev);
 
 
4219	struct mv_host_priv *hpriv = host->private_data;
4220	int port;
4221	ata_host_detach(host);
4222
 
4223	if (!IS_ERR(hpriv->clk)) {
4224		clk_disable_unprepare(hpriv->clk);
4225		clk_put(hpriv->clk);
4226	}
4227	for (port = 0; port < host->n_ports; port++) {
4228		if (!IS_ERR(hpriv->port_clks[port])) {
4229			clk_disable_unprepare(hpriv->port_clks[port]);
4230			clk_put(hpriv->port_clks[port]);
4231		}
4232		phy_power_off(hpriv->port_phys[port]);
4233	}
4234}
4235
4236#ifdef CONFIG_PM_SLEEP
4237static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4238{
4239	struct ata_host *host = platform_get_drvdata(pdev);
4240
4241	if (host)
4242		ata_host_suspend(host, state);
4243	return 0;
 
4244}
4245
4246static int mv_platform_resume(struct platform_device *pdev)
4247{
4248	struct ata_host *host = platform_get_drvdata(pdev);
4249	const struct mbus_dram_target_info *dram;
4250	int ret;
4251
4252	if (host) {
4253		struct mv_host_priv *hpriv = host->private_data;
4254
 
4255		/*
4256		 * (Re-)program MBUS remapping windows if we are asked to.
4257		 */
4258		dram = mv_mbus_dram_info();
4259		if (dram)
4260			mv_conf_mbus_windows(hpriv, dram);
4261
4262		/* initialize adapter */
4263		ret = mv_init_host(host);
4264		if (ret) {
4265			dev_err(&pdev->dev, "Error during HW init\n");
4266			return ret;
4267		}
4268		ata_host_resume(host);
4269	}
4270
4271	return 0;
4272}
4273#else
4274#define mv_platform_suspend NULL
4275#define mv_platform_resume NULL
4276#endif
4277
4278#ifdef CONFIG_OF
4279static const struct of_device_id mv_sata_dt_ids[] = {
4280	{ .compatible = "marvell,armada-370-sata", },
4281	{ .compatible = "marvell,orion-sata", },
4282	{ /* sentinel */ }
4283};
4284MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4285#endif
4286
4287static struct platform_driver mv_platform_driver = {
4288	.probe		= mv_platform_probe,
4289	.remove_new	= mv_platform_remove,
4290	.suspend	= mv_platform_suspend,
4291	.resume		= mv_platform_resume,
4292	.driver		= {
4293		.name = DRV_NAME,
4294		.of_match_table = of_match_ptr(mv_sata_dt_ids),
4295	},
4296};
4297
4298
4299#ifdef CONFIG_PCI
4300static int mv_pci_init_one(struct pci_dev *pdev,
4301			   const struct pci_device_id *ent);
4302#ifdef CONFIG_PM_SLEEP
4303static int mv_pci_device_resume(struct pci_dev *pdev);
4304#endif
4305
4306
4307static struct pci_driver mv_pci_driver = {
4308	.name			= DRV_NAME,
4309	.id_table		= mv_pci_tbl,
4310	.probe			= mv_pci_init_one,
4311	.remove			= ata_pci_remove_one,
4312#ifdef CONFIG_PM_SLEEP
4313	.suspend		= ata_pci_device_suspend,
4314	.resume			= mv_pci_device_resume,
4315#endif
4316
4317};
4318
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4319/**
4320 *      mv_print_info - Dump key info to kernel log for perusal.
4321 *      @host: ATA host to print info about
4322 *
4323 *      FIXME: complete this.
4324 *
4325 *      LOCKING:
4326 *      Inherited from caller.
4327 */
4328static void mv_print_info(struct ata_host *host)
4329{
4330	struct pci_dev *pdev = to_pci_dev(host->dev);
4331	struct mv_host_priv *hpriv = host->private_data;
4332	u8 scc;
4333	const char *scc_s, *gen;
4334
4335	/* Use this to determine the HW stepping of the chip so we know
4336	 * what errata to workaround
4337	 */
4338	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4339	if (scc == 0)
4340		scc_s = "SCSI";
4341	else if (scc == 0x01)
4342		scc_s = "RAID";
4343	else
4344		scc_s = "?";
4345
4346	if (IS_GEN_I(hpriv))
4347		gen = "I";
4348	else if (IS_GEN_II(hpriv))
4349		gen = "II";
4350	else if (IS_GEN_IIE(hpriv))
4351		gen = "IIE";
4352	else
4353		gen = "?";
4354
4355	dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4356		 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4357		 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4358}
4359
4360/**
4361 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
4362 *      @pdev: PCI device found
4363 *      @ent: PCI device ID entry for the matched host
4364 *
4365 *      LOCKING:
4366 *      Inherited from caller.
4367 */
4368static int mv_pci_init_one(struct pci_dev *pdev,
4369			   const struct pci_device_id *ent)
4370{
4371	unsigned int board_idx = (unsigned int)ent->driver_data;
4372	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4373	struct ata_host *host;
4374	struct mv_host_priv *hpriv;
4375	int n_ports, port, rc;
4376
4377	ata_print_version_once(&pdev->dev, DRV_VERSION);
4378
4379	/* allocate host */
4380	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4381
4382	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4383	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4384	if (!host || !hpriv)
4385		return -ENOMEM;
4386	host->private_data = hpriv;
4387	hpriv->n_ports = n_ports;
4388	hpriv->board_idx = board_idx;
4389
4390	/* acquire resources */
4391	rc = pcim_enable_device(pdev);
4392	if (rc)
4393		return rc;
4394
4395	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4396	if (rc == -EBUSY)
4397		pcim_pin_device(pdev);
4398	if (rc)
4399		return rc;
4400	host->iomap = pcim_iomap_table(pdev);
4401	hpriv->base = host->iomap[MV_PRIMARY_BAR];
4402
4403	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4404	if (rc) {
4405		dev_err(&pdev->dev, "DMA enable failed\n");
4406		return rc;
4407	}
4408
4409	rc = mv_create_dma_pools(hpriv, &pdev->dev);
4410	if (rc)
4411		return rc;
4412
4413	for (port = 0; port < host->n_ports; port++) {
4414		struct ata_port *ap = host->ports[port];
4415		void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4416		unsigned int offset = port_mmio - hpriv->base;
4417
4418		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4419		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4420	}
4421
4422	/* initialize adapter */
4423	rc = mv_init_host(host);
4424	if (rc)
4425		return rc;
4426
4427	/* Enable message-switched interrupts, if requested */
4428	if (msi && pci_enable_msi(pdev) == 0)
4429		hpriv->hp_flags |= MV_HP_FLAG_MSI;
4430
4431	mv_dump_pci_cfg(pdev, 0x68);
4432	mv_print_info(host);
4433
4434	pci_set_master(pdev);
4435	pci_try_set_mwi(pdev);
4436	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4437				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4438}
4439
4440#ifdef CONFIG_PM_SLEEP
4441static int mv_pci_device_resume(struct pci_dev *pdev)
4442{
4443	struct ata_host *host = pci_get_drvdata(pdev);
4444	int rc;
4445
4446	rc = ata_pci_device_do_resume(pdev);
4447	if (rc)
4448		return rc;
4449
4450	/* initialize adapter */
4451	rc = mv_init_host(host);
4452	if (rc)
4453		return rc;
4454
4455	ata_host_resume(host);
4456
4457	return 0;
4458}
4459#endif
4460#endif
4461
 
 
 
4462static int __init mv_init(void)
4463{
4464	int rc = -ENODEV;
4465#ifdef CONFIG_PCI
4466	rc = pci_register_driver(&mv_pci_driver);
4467	if (rc < 0)
4468		return rc;
4469#endif
4470	rc = platform_driver_register(&mv_platform_driver);
4471
4472#ifdef CONFIG_PCI
4473	if (rc < 0)
4474		pci_unregister_driver(&mv_pci_driver);
4475#endif
4476	return rc;
4477}
4478
4479static void __exit mv_exit(void)
4480{
4481#ifdef CONFIG_PCI
4482	pci_unregister_driver(&mv_pci_driver);
4483#endif
4484	platform_driver_unregister(&mv_platform_driver);
4485}
4486
4487MODULE_AUTHOR("Brett Russ");
4488MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4489MODULE_LICENSE("GPL v2");
4490MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4491MODULE_VERSION(DRV_VERSION);
4492MODULE_ALIAS("platform:" DRV_NAME);
4493
4494module_init(mv_init);
4495module_exit(mv_exit);
v3.1
 
   1/*
   2 * sata_mv.c - Marvell SATA support
   3 *
   4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
   5 * Copyright 2005: EMC Corporation, all rights reserved.
   6 * Copyright 2005 Red Hat, Inc.  All rights reserved.
   7 *
   8 * Originally written by Brett Russ.
   9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
  10 *
  11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; version 2 of the License.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; if not, write to the Free Software
  24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  25 *
  26 */
  27
  28/*
  29 * sata_mv TODO list:
  30 *
  31 * --> Develop a low-power-consumption strategy, and implement it.
  32 *
  33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
  34 *
  35 * --> [Experiment, Marvell value added] Is it possible to use target
  36 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
  37 *       creating LibATA target mode support would be very interesting.
  38 *
  39 *       Target mode, for those without docs, is the ability to directly
  40 *       connect two SATA ports.
  41 */
  42
  43/*
  44 * 80x1-B2 errata PCI#11:
  45 *
  46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
  47 * should be careful to insert those cards only onto PCI-X bus #0,
  48 * and only in device slots 0..7, not higher.  The chips may not
  49 * work correctly otherwise  (note: this is a pretty rare condition).
  50 */
  51
  52#include <linux/kernel.h>
  53#include <linux/module.h>
  54#include <linux/pci.h>
  55#include <linux/init.h>
  56#include <linux/blkdev.h>
  57#include <linux/delay.h>
  58#include <linux/interrupt.h>
  59#include <linux/dmapool.h>
  60#include <linux/dma-mapping.h>
  61#include <linux/device.h>
  62#include <linux/clk.h>
 
  63#include <linux/platform_device.h>
  64#include <linux/ata_platform.h>
  65#include <linux/mbus.h>
  66#include <linux/bitops.h>
  67#include <linux/gfp.h>
 
 
  68#include <scsi/scsi_host.h>
  69#include <scsi/scsi_cmnd.h>
  70#include <scsi/scsi_device.h>
  71#include <linux/libata.h>
  72
  73#define DRV_NAME	"sata_mv"
  74#define DRV_VERSION	"1.28"
  75
  76/*
  77 * module options
  78 */
  79
 
  80static int msi;
  81#ifdef CONFIG_PCI
  82module_param(msi, int, S_IRUGO);
  83MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  84#endif
  85
  86static int irq_coalescing_io_count;
  87module_param(irq_coalescing_io_count, int, S_IRUGO);
  88MODULE_PARM_DESC(irq_coalescing_io_count,
  89		 "IRQ coalescing I/O count threshold (0..255)");
  90
  91static int irq_coalescing_usecs;
  92module_param(irq_coalescing_usecs, int, S_IRUGO);
  93MODULE_PARM_DESC(irq_coalescing_usecs,
  94		 "IRQ coalescing time threshold in usecs");
  95
  96enum {
  97	/* BAR's are enumerated in terms of pci_resource_start() terms */
  98	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
  99	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
 100	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
 101
 102	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
 103	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
 104
 105	/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
 106	COAL_CLOCKS_PER_USEC	= 150,		/* for calculating COAL_TIMEs */
 107	MAX_COAL_TIME_THRESHOLD	= ((1 << 24) - 1), /* internal clocks count */
 108	MAX_COAL_IO_COUNT	= 255,		/* completed I/O count */
 109
 110	MV_PCI_REG_BASE		= 0,
 111
 112	/*
 113	 * Per-chip ("all ports") interrupt coalescing feature.
 114	 * This is only for GEN_II / GEN_IIE hardware.
 115	 *
 116	 * Coalescing defers the interrupt until either the IO_THRESHOLD
 117	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 118	 */
 119	COAL_REG_BASE		= 0x18000,
 120	IRQ_COAL_CAUSE		= (COAL_REG_BASE + 0x08),
 121	ALL_PORTS_COAL_IRQ	= (1 << 4),	/* all ports irq event */
 122
 123	IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
 124	IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
 125
 126	/*
 127	 * Registers for the (unused here) transaction coalescing feature:
 128	 */
 129	TRAN_COAL_CAUSE_LO	= (COAL_REG_BASE + 0x88),
 130	TRAN_COAL_CAUSE_HI	= (COAL_REG_BASE + 0x8c),
 131
 132	SATAHC0_REG_BASE	= 0x20000,
 133	FLASH_CTL		= 0x1046c,
 134	GPIO_PORT_CTL		= 0x104f0,
 135	RESET_CFG		= 0x180d8,
 136
 137	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
 138	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
 139	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
 140	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
 141
 142	MV_MAX_Q_DEPTH		= 32,
 143	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
 144
 145	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
 146	 * CRPB needs alignment on a 256B boundary. Size == 256B
 147	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
 148	 */
 149	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
 150	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
 151	MV_MAX_SG_CT		= 256,
 152	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
 153
 154	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
 155	MV_PORT_HC_SHIFT	= 2,
 156	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
 157	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
 158	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
 159
 160	/* Host Flags */
 161	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
 162
 163	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
 164
 165	MV_GEN_I_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
 166
 167	MV_GEN_II_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NCQ |
 168				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
 169
 170	MV_GEN_IIE_FLAGS	= MV_GEN_II_FLAGS | ATA_FLAG_AN,
 171
 172	CRQB_FLAG_READ		= (1 << 0),
 173	CRQB_TAG_SHIFT		= 1,
 174	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
 175	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
 176	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
 177	CRQB_CMD_ADDR_SHIFT	= 8,
 178	CRQB_CMD_CS		= (0x2 << 11),
 179	CRQB_CMD_LAST		= (1 << 15),
 180
 181	CRPB_FLAG_STATUS_SHIFT	= 8,
 182	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
 183	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
 184
 185	EPRD_FLAG_END_OF_TBL	= (1 << 31),
 186
 187	/* PCI interface registers */
 188
 189	MV_PCI_COMMAND		= 0xc00,
 190	MV_PCI_COMMAND_MWRCOM	= (1 << 4),	/* PCI Master Write Combining */
 191	MV_PCI_COMMAND_MRDTRIG	= (1 << 7),	/* PCI Master Read Trigger */
 192
 193	PCI_MAIN_CMD_STS	= 0xd30,
 194	STOP_PCI_MASTER		= (1 << 2),
 195	PCI_MASTER_EMPTY	= (1 << 3),
 196	GLOB_SFT_RST		= (1 << 4),
 197
 198	MV_PCI_MODE		= 0xd00,
 199	MV_PCI_MODE_MASK	= 0x30,
 200
 201	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
 202	MV_PCI_DISC_TIMER	= 0xd04,
 203	MV_PCI_MSI_TRIGGER	= 0xc38,
 204	MV_PCI_SERR_MASK	= 0xc28,
 205	MV_PCI_XBAR_TMOUT	= 0x1d04,
 206	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
 207	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
 208	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
 209	MV_PCI_ERR_COMMAND	= 0x1d50,
 210
 211	PCI_IRQ_CAUSE		= 0x1d58,
 212	PCI_IRQ_MASK		= 0x1d5c,
 213	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
 214
 215	PCIE_IRQ_CAUSE		= 0x1900,
 216	PCIE_IRQ_MASK		= 0x1910,
 217	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
 218
 219	/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
 220	PCI_HC_MAIN_IRQ_CAUSE	= 0x1d60,
 221	PCI_HC_MAIN_IRQ_MASK	= 0x1d64,
 222	SOC_HC_MAIN_IRQ_CAUSE	= 0x20020,
 223	SOC_HC_MAIN_IRQ_MASK	= 0x20024,
 224	ERR_IRQ			= (1 << 0),	/* shift by (2 * port #) */
 225	DONE_IRQ		= (1 << 1),	/* shift by (2 * port #) */
 226	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
 227	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
 228	DONE_IRQ_0_3		= 0x000000aa,	/* DONE_IRQ ports 0,1,2,3 */
 229	DONE_IRQ_4_7		= (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
 230	PCI_ERR			= (1 << 18),
 231	TRAN_COAL_LO_DONE	= (1 << 19),	/* transaction coalescing */
 232	TRAN_COAL_HI_DONE	= (1 << 20),	/* transaction coalescing */
 233	PORTS_0_3_COAL_DONE	= (1 << 8),	/* HC0 IRQ coalescing */
 234	PORTS_4_7_COAL_DONE	= (1 << 17),	/* HC1 IRQ coalescing */
 235	ALL_PORTS_COAL_DONE	= (1 << 21),	/* GEN_II(E) IRQ coalescing */
 236	GPIO_INT		= (1 << 22),
 237	SELF_INT		= (1 << 23),
 238	TWSI_INT		= (1 << 24),
 239	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
 240	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
 241	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
 242
 243	/* SATAHC registers */
 244	HC_CFG			= 0x00,
 245
 246	HC_IRQ_CAUSE		= 0x14,
 247	DMA_IRQ			= (1 << 0),	/* shift by port # */
 248	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
 249	DEV_IRQ			= (1 << 8),	/* shift by port # */
 250
 251	/*
 252	 * Per-HC (Host-Controller) interrupt coalescing feature.
 253	 * This is present on all chip generations.
 254	 *
 255	 * Coalescing defers the interrupt until either the IO_THRESHOLD
 256	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
 257	 */
 258	HC_IRQ_COAL_IO_THRESHOLD	= 0x000c,
 259	HC_IRQ_COAL_TIME_THRESHOLD	= 0x0010,
 260
 261	SOC_LED_CTRL		= 0x2c,
 262	SOC_LED_CTRL_BLINK	= (1 << 0),	/* Active LED blink */
 263	SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),	/* Multiplex dev presence */
 264						/*  with dev activity LED */
 265
 266	/* Shadow block registers */
 267	SHD_BLK			= 0x100,
 268	SHD_CTL_AST		= 0x20,		/* ofs from SHD_BLK */
 269
 270	/* SATA registers */
 271	SATA_STATUS		= 0x300,  /* ctrl, err regs follow status */
 272	SATA_ACTIVE		= 0x350,
 273	FIS_IRQ_CAUSE		= 0x364,
 274	FIS_IRQ_CAUSE_AN	= (1 << 9),	/* async notification */
 275
 276	LTMODE			= 0x30c,	/* requires read-after-write */
 277	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
 278
 279	PHY_MODE2		= 0x330,
 280	PHY_MODE3		= 0x310,
 281
 282	PHY_MODE4		= 0x314,	/* requires read-after-write */
 283	PHY_MODE4_CFG_MASK	= 0x00000003,	/* phy internal config field */
 284	PHY_MODE4_CFG_VALUE	= 0x00000001,	/* phy internal config field */
 285	PHY_MODE4_RSVD_ZEROS	= 0x5de3fffa,	/* Gen2e always write zeros */
 286	PHY_MODE4_RSVD_ONES	= 0x00000005,	/* Gen2e always write ones */
 287
 288	SATA_IFCTL		= 0x344,
 289	SATA_TESTCTL		= 0x348,
 290	SATA_IFSTAT		= 0x34c,
 291	VENDOR_UNIQUE_FIS	= 0x35c,
 292
 293	FISCFG			= 0x360,
 294	FISCFG_WAIT_DEV_ERR	= (1 << 8),	/* wait for host on DevErr */
 295	FISCFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
 296
 297	PHY_MODE9_GEN2		= 0x398,
 298	PHY_MODE9_GEN1		= 0x39c,
 299	PHYCFG_OFS		= 0x3a0,	/* only in 65n devices */
 300
 301	MV5_PHY_MODE		= 0x74,
 302	MV5_LTMODE		= 0x30,
 303	MV5_PHY_CTL		= 0x0C,
 304	SATA_IFCFG		= 0x050,
 
 
 
 
 
 
 305
 306	MV_M2_PREAMP_MASK	= 0x7e0,
 307
 308	/* Port registers */
 309	EDMA_CFG		= 0,
 310	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
 311	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
 312	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
 313	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
 314	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
 315	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
 316	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
 317
 318	EDMA_ERR_IRQ_CAUSE	= 0x8,
 319	EDMA_ERR_IRQ_MASK	= 0xc,
 320	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
 321	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
 322	EDMA_ERR_DEV		= (1 << 2),	/* device error */
 323	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
 324	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
 325	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
 326	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
 327	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
 328	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
 329	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
 330	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
 331	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
 332	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
 333	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
 334
 335	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
 336	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
 337	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
 338	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
 339	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
 340
 341	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
 342
 343	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
 344	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
 345	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
 346	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
 347	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
 348	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
 349
 350	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
 351
 352	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
 353	EDMA_ERR_OVERRUN_5	= (1 << 5),
 354	EDMA_ERR_UNDERRUN_5	= (1 << 6),
 355
 356	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
 357				  EDMA_ERR_LNK_CTRL_RX_1 |
 358				  EDMA_ERR_LNK_CTRL_RX_3 |
 359				  EDMA_ERR_LNK_CTRL_TX,
 360
 361	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
 362				  EDMA_ERR_PRD_PAR |
 363				  EDMA_ERR_DEV_DCON |
 364				  EDMA_ERR_DEV_CON |
 365				  EDMA_ERR_SERR |
 366				  EDMA_ERR_SELF_DIS |
 367				  EDMA_ERR_CRQB_PAR |
 368				  EDMA_ERR_CRPB_PAR |
 369				  EDMA_ERR_INTRL_PAR |
 370				  EDMA_ERR_IORDY |
 371				  EDMA_ERR_LNK_CTRL_RX_2 |
 372				  EDMA_ERR_LNK_DATA_RX |
 373				  EDMA_ERR_LNK_DATA_TX |
 374				  EDMA_ERR_TRANS_PROTO,
 375
 376	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
 377				  EDMA_ERR_PRD_PAR |
 378				  EDMA_ERR_DEV_DCON |
 379				  EDMA_ERR_DEV_CON |
 380				  EDMA_ERR_OVERRUN_5 |
 381				  EDMA_ERR_UNDERRUN_5 |
 382				  EDMA_ERR_SELF_DIS_5 |
 383				  EDMA_ERR_CRQB_PAR |
 384				  EDMA_ERR_CRPB_PAR |
 385				  EDMA_ERR_INTRL_PAR |
 386				  EDMA_ERR_IORDY,
 387
 388	EDMA_REQ_Q_BASE_HI	= 0x10,
 389	EDMA_REQ_Q_IN_PTR	= 0x14,		/* also contains BASE_LO */
 390
 391	EDMA_REQ_Q_OUT_PTR	= 0x18,
 392	EDMA_REQ_Q_PTR_SHIFT	= 5,
 393
 394	EDMA_RSP_Q_BASE_HI	= 0x1c,
 395	EDMA_RSP_Q_IN_PTR	= 0x20,
 396	EDMA_RSP_Q_OUT_PTR	= 0x24,		/* also contains BASE_LO */
 397	EDMA_RSP_Q_PTR_SHIFT	= 3,
 398
 399	EDMA_CMD		= 0x28,		/* EDMA command register */
 400	EDMA_EN			= (1 << 0),	/* enable EDMA */
 401	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
 402	EDMA_RESET		= (1 << 2),	/* reset eng/trans/link/phy */
 403
 404	EDMA_STATUS		= 0x30,		/* EDMA engine status */
 405	EDMA_STATUS_CACHE_EMPTY	= (1 << 6),	/* GenIIe command cache empty */
 406	EDMA_STATUS_IDLE	= (1 << 7),	/* GenIIe EDMA enabled/idle */
 407
 408	EDMA_IORDY_TMOUT	= 0x34,
 409	EDMA_ARB_CFG		= 0x38,
 410
 411	EDMA_HALTCOND		= 0x60,		/* GenIIe halt conditions */
 412	EDMA_UNKNOWN_RSVD	= 0x6C,		/* GenIIe unknown/reserved */
 413
 414	BMDMA_CMD		= 0x224,	/* bmdma command register */
 415	BMDMA_STATUS		= 0x228,	/* bmdma status register */
 416	BMDMA_PRD_LOW		= 0x22c,	/* bmdma PRD addr 31:0 */
 417	BMDMA_PRD_HIGH		= 0x230,	/* bmdma PRD addr 63:32 */
 418
 419	/* Host private flags (hp_flags) */
 420	MV_HP_FLAG_MSI		= (1 << 0),
 421	MV_HP_ERRATA_50XXB0	= (1 << 1),
 422	MV_HP_ERRATA_50XXB2	= (1 << 2),
 423	MV_HP_ERRATA_60X1B2	= (1 << 3),
 424	MV_HP_ERRATA_60X1C0	= (1 << 4),
 425	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
 426	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
 427	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
 428	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
 429	MV_HP_CUT_THROUGH	= (1 << 10),	/* can use EDMA cut-through */
 430	MV_HP_FLAG_SOC		= (1 << 11),	/* SystemOnChip, no PCI */
 431	MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),	/* is led blinking enabled? */
 
 432
 433	/* Port private flags (pp_flags) */
 434	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
 435	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
 436	MV_PP_FLAG_FBS_EN	= (1 << 2),	/* is EDMA set up for FBS? */
 437	MV_PP_FLAG_DELAYED_EH	= (1 << 3),	/* delayed dev err handling */
 438	MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),	/* ignore initial ATA_DRDY */
 439};
 440
 441#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
 442#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
 443#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
 444#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
 445#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
 446
 447#define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
 448#define WINDOW_BASE(i)		(0x20034 + ((i) << 4))
 449
 450enum {
 451	/* DMA boundary 0xffff is required by the s/g splitting
 452	 * we need on /length/ in mv_fill-sg().
 453	 */
 454	MV_DMA_BOUNDARY		= 0xffffU,
 455
 456	/* mask of register bits containing lower 32 bits
 457	 * of EDMA request queue DMA address
 458	 */
 459	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
 460
 461	/* ditto, for response queue */
 462	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
 463};
 464
 465enum chip_type {
 466	chip_504x,
 467	chip_508x,
 468	chip_5080,
 469	chip_604x,
 470	chip_608x,
 471	chip_6042,
 472	chip_7042,
 473	chip_soc,
 474};
 475
 476/* Command ReQuest Block: 32B */
 477struct mv_crqb {
 478	__le32			sg_addr;
 479	__le32			sg_addr_hi;
 480	__le16			ctrl_flags;
 481	__le16			ata_cmd[11];
 482};
 483
 484struct mv_crqb_iie {
 485	__le32			addr;
 486	__le32			addr_hi;
 487	__le32			flags;
 488	__le32			len;
 489	__le32			ata_cmd[4];
 490};
 491
 492/* Command ResPonse Block: 8B */
 493struct mv_crpb {
 494	__le16			id;
 495	__le16			flags;
 496	__le32			tmstmp;
 497};
 498
 499/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
 500struct mv_sg {
 501	__le32			addr;
 502	__le32			flags_size;
 503	__le32			addr_hi;
 504	__le32			reserved;
 505};
 506
 507/*
 508 * We keep a local cache of a few frequently accessed port
 509 * registers here, to avoid having to read them (very slow)
 510 * when switching between EDMA and non-EDMA modes.
 511 */
 512struct mv_cached_regs {
 513	u32			fiscfg;
 514	u32			ltmode;
 515	u32			haltcond;
 516	u32			unknown_rsvd;
 517};
 518
 519struct mv_port_priv {
 520	struct mv_crqb		*crqb;
 521	dma_addr_t		crqb_dma;
 522	struct mv_crpb		*crpb;
 523	dma_addr_t		crpb_dma;
 524	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
 525	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
 526
 527	unsigned int		req_idx;
 528	unsigned int		resp_idx;
 529
 530	u32			pp_flags;
 531	struct mv_cached_regs	cached;
 532	unsigned int		delayed_eh_pmp_map;
 533};
 534
 535struct mv_port_signal {
 536	u32			amps;
 537	u32			pre;
 538};
 539
 540struct mv_host_priv {
 541	u32			hp_flags;
 542	unsigned int 		board_idx;
 543	u32			main_irq_mask;
 544	struct mv_port_signal	signal[8];
 545	const struct mv_hw_ops	*ops;
 546	int			n_ports;
 547	void __iomem		*base;
 548	void __iomem		*main_irq_cause_addr;
 549	void __iomem		*main_irq_mask_addr;
 550	u32			irq_cause_offset;
 551	u32			irq_mask_offset;
 552	u32			unmask_all_irqs;
 553
 554#if defined(CONFIG_HAVE_CLK)
 
 
 
 
 
 
 555	struct clk		*clk;
 556#endif
 
 
 
 
 
 
 557	/*
 558	 * These consistent DMA memory pools give us guaranteed
 559	 * alignment for hardware-accessed data structures,
 560	 * and less memory waste in accomplishing the alignment.
 561	 */
 562	struct dma_pool		*crqb_pool;
 563	struct dma_pool		*crpb_pool;
 564	struct dma_pool		*sg_tbl_pool;
 565};
 566
 567struct mv_hw_ops {
 568	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
 569			   unsigned int port);
 570	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
 571	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
 572			   void __iomem *mmio);
 573	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
 574			unsigned int n_hc);
 575	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
 576	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
 577};
 578
 579static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 580static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 581static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
 582static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
 583static int mv_port_start(struct ata_port *ap);
 584static void mv_port_stop(struct ata_port *ap);
 585static int mv_qc_defer(struct ata_queued_cmd *qc);
 586static void mv_qc_prep(struct ata_queued_cmd *qc);
 587static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
 588static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
 589static int mv_hardreset(struct ata_link *link, unsigned int *class,
 590			unsigned long deadline);
 591static void mv_eh_freeze(struct ata_port *ap);
 592static void mv_eh_thaw(struct ata_port *ap);
 593static void mv6_dev_config(struct ata_device *dev);
 594
 595static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 596			   unsigned int port);
 597static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 598static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
 599			   void __iomem *mmio);
 600static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 601			unsigned int n_hc);
 602static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 603static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
 604
 605static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
 606			   unsigned int port);
 607static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
 608static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
 609			   void __iomem *mmio);
 610static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 611			unsigned int n_hc);
 612static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 613static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
 614				      void __iomem *mmio);
 615static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
 616				      void __iomem *mmio);
 617static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
 618				  void __iomem *mmio, unsigned int n_hc);
 619static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
 620				      void __iomem *mmio);
 621static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
 622static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
 623				  void __iomem *mmio, unsigned int port);
 624static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
 625static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
 626			     unsigned int port_no);
 627static int mv_stop_edma(struct ata_port *ap);
 628static int mv_stop_edma_engine(void __iomem *port_mmio);
 629static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
 630
 631static void mv_pmp_select(struct ata_port *ap, int pmp);
 632static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
 633				unsigned long deadline);
 634static int  mv_softreset(struct ata_link *link, unsigned int *class,
 635				unsigned long deadline);
 636static void mv_pmp_error_handler(struct ata_port *ap);
 637static void mv_process_crpb_entries(struct ata_port *ap,
 638					struct mv_port_priv *pp);
 639
 640static void mv_sff_irq_clear(struct ata_port *ap);
 641static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
 642static void mv_bmdma_setup(struct ata_queued_cmd *qc);
 643static void mv_bmdma_start(struct ata_queued_cmd *qc);
 644static void mv_bmdma_stop(struct ata_queued_cmd *qc);
 645static u8   mv_bmdma_status(struct ata_port *ap);
 646static u8 mv_sff_check_status(struct ata_port *ap);
 647
 648/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
 649 * because we have to allow room for worst case splitting of
 650 * PRDs for 64K boundaries in mv_fill_sg().
 651 */
 652static struct scsi_host_template mv5_sht = {
 
 653	ATA_BASE_SHT(DRV_NAME),
 654	.sg_tablesize		= MV_MAX_SG_CT / 2,
 655	.dma_boundary		= MV_DMA_BOUNDARY,
 656};
 657
 658static struct scsi_host_template mv6_sht = {
 659	ATA_NCQ_SHT(DRV_NAME),
 660	.can_queue		= MV_MAX_Q_DEPTH - 1,
 661	.sg_tablesize		= MV_MAX_SG_CT / 2,
 662	.dma_boundary		= MV_DMA_BOUNDARY,
 
 
 
 
 663};
 664
 665static struct ata_port_operations mv5_ops = {
 666	.inherits		= &ata_sff_port_ops,
 667
 668	.lost_interrupt		= ATA_OP_NULL,
 669
 670	.qc_defer		= mv_qc_defer,
 671	.qc_prep		= mv_qc_prep,
 672	.qc_issue		= mv_qc_issue,
 673
 674	.freeze			= mv_eh_freeze,
 675	.thaw			= mv_eh_thaw,
 676	.hardreset		= mv_hardreset,
 677
 678	.scr_read		= mv5_scr_read,
 679	.scr_write		= mv5_scr_write,
 680
 681	.port_start		= mv_port_start,
 682	.port_stop		= mv_port_stop,
 683};
 684
 685static struct ata_port_operations mv6_ops = {
 686	.inherits		= &ata_bmdma_port_ops,
 687
 688	.lost_interrupt		= ATA_OP_NULL,
 689
 690	.qc_defer		= mv_qc_defer,
 691	.qc_prep		= mv_qc_prep,
 692	.qc_issue		= mv_qc_issue,
 693
 694	.dev_config             = mv6_dev_config,
 695
 696	.freeze			= mv_eh_freeze,
 697	.thaw			= mv_eh_thaw,
 698	.hardreset		= mv_hardreset,
 699	.softreset		= mv_softreset,
 700	.pmp_hardreset		= mv_pmp_hardreset,
 701	.pmp_softreset		= mv_softreset,
 702	.error_handler		= mv_pmp_error_handler,
 703
 704	.scr_read		= mv_scr_read,
 705	.scr_write		= mv_scr_write,
 706
 707	.sff_check_status	= mv_sff_check_status,
 708	.sff_irq_clear		= mv_sff_irq_clear,
 709	.check_atapi_dma	= mv_check_atapi_dma,
 710	.bmdma_setup		= mv_bmdma_setup,
 711	.bmdma_start		= mv_bmdma_start,
 712	.bmdma_stop		= mv_bmdma_stop,
 713	.bmdma_status		= mv_bmdma_status,
 714
 715	.port_start		= mv_port_start,
 716	.port_stop		= mv_port_stop,
 717};
 718
 719static struct ata_port_operations mv_iie_ops = {
 720	.inherits		= &mv6_ops,
 721	.dev_config		= ATA_OP_NULL,
 722	.qc_prep		= mv_qc_prep_iie,
 723};
 724
 725static const struct ata_port_info mv_port_info[] = {
 726	{  /* chip_504x */
 727		.flags		= MV_GEN_I_FLAGS,
 728		.pio_mask	= ATA_PIO4,
 729		.udma_mask	= ATA_UDMA6,
 730		.port_ops	= &mv5_ops,
 731	},
 732	{  /* chip_508x */
 733		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 734		.pio_mask	= ATA_PIO4,
 735		.udma_mask	= ATA_UDMA6,
 736		.port_ops	= &mv5_ops,
 737	},
 738	{  /* chip_5080 */
 739		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
 740		.pio_mask	= ATA_PIO4,
 741		.udma_mask	= ATA_UDMA6,
 742		.port_ops	= &mv5_ops,
 743	},
 744	{  /* chip_604x */
 745		.flags		= MV_GEN_II_FLAGS,
 746		.pio_mask	= ATA_PIO4,
 747		.udma_mask	= ATA_UDMA6,
 748		.port_ops	= &mv6_ops,
 749	},
 750	{  /* chip_608x */
 751		.flags		= MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
 752		.pio_mask	= ATA_PIO4,
 753		.udma_mask	= ATA_UDMA6,
 754		.port_ops	= &mv6_ops,
 755	},
 756	{  /* chip_6042 */
 757		.flags		= MV_GEN_IIE_FLAGS,
 758		.pio_mask	= ATA_PIO4,
 759		.udma_mask	= ATA_UDMA6,
 760		.port_ops	= &mv_iie_ops,
 761	},
 762	{  /* chip_7042 */
 763		.flags		= MV_GEN_IIE_FLAGS,
 764		.pio_mask	= ATA_PIO4,
 765		.udma_mask	= ATA_UDMA6,
 766		.port_ops	= &mv_iie_ops,
 767	},
 768	{  /* chip_soc */
 769		.flags		= MV_GEN_IIE_FLAGS,
 770		.pio_mask	= ATA_PIO4,
 771		.udma_mask	= ATA_UDMA6,
 772		.port_ops	= &mv_iie_ops,
 773	},
 774};
 775
 776static const struct pci_device_id mv_pci_tbl[] = {
 777	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
 778	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
 779	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
 780	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
 781	/* RocketRAID 1720/174x have different identifiers */
 782	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
 783	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
 784	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
 785
 786	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
 787	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
 788	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
 789	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
 790	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
 791
 792	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
 793
 794	/* Adaptec 1430SA */
 795	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
 796
 797	/* Marvell 7042 support */
 798	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
 799
 800	/* Highpoint RocketRAID PCIe series */
 801	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
 802	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
 803
 804	{ }			/* terminate list */
 805};
 806
 807static const struct mv_hw_ops mv5xxx_ops = {
 808	.phy_errata		= mv5_phy_errata,
 809	.enable_leds		= mv5_enable_leds,
 810	.read_preamp		= mv5_read_preamp,
 811	.reset_hc		= mv5_reset_hc,
 812	.reset_flash		= mv5_reset_flash,
 813	.reset_bus		= mv5_reset_bus,
 814};
 815
 816static const struct mv_hw_ops mv6xxx_ops = {
 817	.phy_errata		= mv6_phy_errata,
 818	.enable_leds		= mv6_enable_leds,
 819	.read_preamp		= mv6_read_preamp,
 820	.reset_hc		= mv6_reset_hc,
 821	.reset_flash		= mv6_reset_flash,
 822	.reset_bus		= mv_reset_pci_bus,
 823};
 824
 825static const struct mv_hw_ops mv_soc_ops = {
 826	.phy_errata		= mv6_phy_errata,
 827	.enable_leds		= mv_soc_enable_leds,
 828	.read_preamp		= mv_soc_read_preamp,
 829	.reset_hc		= mv_soc_reset_hc,
 830	.reset_flash		= mv_soc_reset_flash,
 831	.reset_bus		= mv_soc_reset_bus,
 832};
 833
 834static const struct mv_hw_ops mv_soc_65n_ops = {
 835	.phy_errata		= mv_soc_65n_phy_errata,
 836	.enable_leds		= mv_soc_enable_leds,
 837	.reset_hc		= mv_soc_reset_hc,
 838	.reset_flash		= mv_soc_reset_flash,
 839	.reset_bus		= mv_soc_reset_bus,
 840};
 841
 842/*
 843 * Functions
 844 */
 845
 846static inline void writelfl(unsigned long data, void __iomem *addr)
 847{
 848	writel(data, addr);
 849	(void) readl(addr);	/* flush to avoid PCI posted write */
 850}
 851
 852static inline unsigned int mv_hc_from_port(unsigned int port)
 853{
 854	return port >> MV_PORT_HC_SHIFT;
 855}
 856
 857static inline unsigned int mv_hardport_from_port(unsigned int port)
 858{
 859	return port & MV_PORT_MASK;
 860}
 861
 862/*
 863 * Consolidate some rather tricky bit shift calculations.
 864 * This is hot-path stuff, so not a function.
 865 * Simple code, with two return values, so macro rather than inline.
 866 *
 867 * port is the sole input, in range 0..7.
 868 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
 869 * hardport is the other output, in range 0..3.
 870 *
 871 * Note that port and hardport may be the same variable in some cases.
 872 */
 873#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
 874{								\
 875	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
 876	hardport = mv_hardport_from_port(port);			\
 877	shift   += hardport * 2;				\
 878}
 879
 880static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
 881{
 882	return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
 883}
 884
 885static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
 886						 unsigned int port)
 887{
 888	return mv_hc_base(base, mv_hc_from_port(port));
 889}
 890
 891static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
 892{
 893	return  mv_hc_base_from_port(base, port) +
 894		MV_SATAHC_ARBTR_REG_SZ +
 895		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
 896}
 897
 898static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
 899{
 900	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
 901	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
 902
 903	return hc_mmio + ofs;
 904}
 905
 906static inline void __iomem *mv_host_base(struct ata_host *host)
 907{
 908	struct mv_host_priv *hpriv = host->private_data;
 909	return hpriv->base;
 910}
 911
 912static inline void __iomem *mv_ap_base(struct ata_port *ap)
 913{
 914	return mv_port_base(mv_host_base(ap->host), ap->port_no);
 915}
 916
 917static inline int mv_get_hc_count(unsigned long port_flags)
 918{
 919	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
 920}
 921
 922/**
 923 *      mv_save_cached_regs - (re-)initialize cached port registers
 924 *      @ap: the port whose registers we are caching
 925 *
 926 *	Initialize the local cache of port registers,
 927 *	so that reading them over and over again can
 928 *	be avoided on the hotter paths of this driver.
 929 *	This saves a few microseconds each time we switch
 930 *	to/from EDMA mode to perform (eg.) a drive cache flush.
 931 */
 932static void mv_save_cached_regs(struct ata_port *ap)
 933{
 934	void __iomem *port_mmio = mv_ap_base(ap);
 935	struct mv_port_priv *pp = ap->private_data;
 936
 937	pp->cached.fiscfg = readl(port_mmio + FISCFG);
 938	pp->cached.ltmode = readl(port_mmio + LTMODE);
 939	pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
 940	pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
 941}
 942
 943/**
 944 *      mv_write_cached_reg - write to a cached port register
 945 *      @addr: hardware address of the register
 946 *      @old: pointer to cached value of the register
 947 *      @new: new value for the register
 948 *
 949 *	Write a new value to a cached register,
 950 *	but only if the value is different from before.
 951 */
 952static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
 953{
 954	if (new != *old) {
 955		unsigned long laddr;
 956		*old = new;
 957		/*
 958		 * Workaround for 88SX60x1-B2 FEr SATA#13:
 959		 * Read-after-write is needed to prevent generating 64-bit
 960		 * write cycles on the PCI bus for SATA interface registers
 961		 * at offsets ending in 0x4 or 0xc.
 962		 *
 963		 * Looks like a lot of fuss, but it avoids an unnecessary
 964		 * +1 usec read-after-write delay for unaffected registers.
 965		 */
 966		laddr = (long)addr & 0xffff;
 967		if (laddr >= 0x300 && laddr <= 0x33c) {
 968			laddr &= 0x000f;
 969			if (laddr == 0x4 || laddr == 0xc) {
 970				writelfl(new, addr); /* read after write */
 971				return;
 972			}
 973		}
 974		writel(new, addr); /* unaffected by the errata */
 975	}
 976}
 977
 978static void mv_set_edma_ptrs(void __iomem *port_mmio,
 979			     struct mv_host_priv *hpriv,
 980			     struct mv_port_priv *pp)
 981{
 982	u32 index;
 983
 984	/*
 985	 * initialize request queue
 986	 */
 987	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
 988	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
 989
 990	WARN_ON(pp->crqb_dma & 0x3ff);
 991	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
 992	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
 993		 port_mmio + EDMA_REQ_Q_IN_PTR);
 994	writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
 995
 996	/*
 997	 * initialize response queue
 998	 */
 999	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
1000	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1001
1002	WARN_ON(pp->crpb_dma & 0xff);
1003	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1004	writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1005	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1006		 port_mmio + EDMA_RSP_Q_OUT_PTR);
1007}
1008
1009static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1010{
1011	/*
1012	 * When writing to the main_irq_mask in hardware,
1013	 * we must ensure exclusivity between the interrupt coalescing bits
1014	 * and the corresponding individual port DONE_IRQ bits.
1015	 *
1016	 * Note that this register is really an "IRQ enable" register,
1017	 * not an "IRQ mask" register as Marvell's naming might suggest.
1018	 */
1019	if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1020		mask &= ~DONE_IRQ_0_3;
1021	if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1022		mask &= ~DONE_IRQ_4_7;
1023	writelfl(mask, hpriv->main_irq_mask_addr);
1024}
1025
1026static void mv_set_main_irq_mask(struct ata_host *host,
1027				 u32 disable_bits, u32 enable_bits)
1028{
1029	struct mv_host_priv *hpriv = host->private_data;
1030	u32 old_mask, new_mask;
1031
1032	old_mask = hpriv->main_irq_mask;
1033	new_mask = (old_mask & ~disable_bits) | enable_bits;
1034	if (new_mask != old_mask) {
1035		hpriv->main_irq_mask = new_mask;
1036		mv_write_main_irq_mask(new_mask, hpriv);
1037	}
1038}
1039
1040static void mv_enable_port_irqs(struct ata_port *ap,
1041				     unsigned int port_bits)
1042{
1043	unsigned int shift, hardport, port = ap->port_no;
1044	u32 disable_bits, enable_bits;
1045
1046	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1047
1048	disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1049	enable_bits  = port_bits << shift;
1050	mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1051}
1052
1053static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1054					  void __iomem *port_mmio,
1055					  unsigned int port_irqs)
1056{
1057	struct mv_host_priv *hpriv = ap->host->private_data;
1058	int hardport = mv_hardport_from_port(ap->port_no);
1059	void __iomem *hc_mmio = mv_hc_base_from_port(
1060				mv_host_base(ap->host), ap->port_no);
1061	u32 hc_irq_cause;
1062
1063	/* clear EDMA event indicators, if any */
1064	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1065
1066	/* clear pending irq events */
1067	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1068	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1069
1070	/* clear FIS IRQ Cause */
1071	if (IS_GEN_IIE(hpriv))
1072		writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1073
1074	mv_enable_port_irqs(ap, port_irqs);
1075}
1076
1077static void mv_set_irq_coalescing(struct ata_host *host,
1078				  unsigned int count, unsigned int usecs)
1079{
1080	struct mv_host_priv *hpriv = host->private_data;
1081	void __iomem *mmio = hpriv->base, *hc_mmio;
1082	u32 coal_enable = 0;
1083	unsigned long flags;
1084	unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1085	const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1086							ALL_PORTS_COAL_DONE;
1087
1088	/* Disable IRQ coalescing if either threshold is zero */
1089	if (!usecs || !count) {
1090		clks = count = 0;
1091	} else {
1092		/* Respect maximum limits of the hardware */
1093		clks = usecs * COAL_CLOCKS_PER_USEC;
1094		if (clks > MAX_COAL_TIME_THRESHOLD)
1095			clks = MAX_COAL_TIME_THRESHOLD;
1096		if (count > MAX_COAL_IO_COUNT)
1097			count = MAX_COAL_IO_COUNT;
1098	}
1099
1100	spin_lock_irqsave(&host->lock, flags);
1101	mv_set_main_irq_mask(host, coal_disable, 0);
1102
1103	if (is_dual_hc && !IS_GEN_I(hpriv)) {
1104		/*
1105		 * GEN_II/GEN_IIE with dual host controllers:
1106		 * one set of global thresholds for the entire chip.
1107		 */
1108		writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
1109		writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1110		/* clear leftover coal IRQ bit */
1111		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1112		if (count)
1113			coal_enable = ALL_PORTS_COAL_DONE;
1114		clks = count = 0; /* force clearing of regular regs below */
1115	}
1116
1117	/*
1118	 * All chips: independent thresholds for each HC on the chip.
1119	 */
1120	hc_mmio = mv_hc_base_from_port(mmio, 0);
1121	writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1122	writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1123	writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1124	if (count)
1125		coal_enable |= PORTS_0_3_COAL_DONE;
1126	if (is_dual_hc) {
1127		hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1128		writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1129		writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1130		writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1131		if (count)
1132			coal_enable |= PORTS_4_7_COAL_DONE;
1133	}
1134
1135	mv_set_main_irq_mask(host, 0, coal_enable);
1136	spin_unlock_irqrestore(&host->lock, flags);
1137}
1138
1139/**
1140 *      mv_start_edma - Enable eDMA engine
1141 *      @base: port base address
1142 *      @pp: port private data
1143 *
1144 *      Verify the local cache of the eDMA state is accurate with a
1145 *      WARN_ON.
1146 *
1147 *      LOCKING:
1148 *      Inherited from caller.
1149 */
1150static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1151			 struct mv_port_priv *pp, u8 protocol)
1152{
1153	int want_ncq = (protocol == ATA_PROT_NCQ);
1154
1155	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1156		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1157		if (want_ncq != using_ncq)
1158			mv_stop_edma(ap);
1159	}
1160	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1161		struct mv_host_priv *hpriv = ap->host->private_data;
1162
1163		mv_edma_cfg(ap, want_ncq, 1);
1164
1165		mv_set_edma_ptrs(port_mmio, hpriv, pp);
1166		mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1167
1168		writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1169		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1170	}
1171}
1172
1173static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1174{
1175	void __iomem *port_mmio = mv_ap_base(ap);
1176	const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1177	const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1178	int i;
1179
1180	/*
1181	 * Wait for the EDMA engine to finish transactions in progress.
1182	 * No idea what a good "timeout" value might be, but measurements
1183	 * indicate that it often requires hundreds of microseconds
1184	 * with two drives in-use.  So we use the 15msec value above
1185	 * as a rough guess at what even more drives might require.
1186	 */
1187	for (i = 0; i < timeout; ++i) {
1188		u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1189		if ((edma_stat & empty_idle) == empty_idle)
1190			break;
1191		udelay(per_loop);
1192	}
1193	/* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
1194}
1195
1196/**
1197 *      mv_stop_edma_engine - Disable eDMA engine
1198 *      @port_mmio: io base address
1199 *
1200 *      LOCKING:
1201 *      Inherited from caller.
1202 */
1203static int mv_stop_edma_engine(void __iomem *port_mmio)
1204{
1205	int i;
1206
1207	/* Disable eDMA.  The disable bit auto clears. */
1208	writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1209
1210	/* Wait for the chip to confirm eDMA is off. */
1211	for (i = 10000; i > 0; i--) {
1212		u32 reg = readl(port_mmio + EDMA_CMD);
1213		if (!(reg & EDMA_EN))
1214			return 0;
1215		udelay(10);
1216	}
1217	return -EIO;
1218}
1219
1220static int mv_stop_edma(struct ata_port *ap)
1221{
1222	void __iomem *port_mmio = mv_ap_base(ap);
1223	struct mv_port_priv *pp = ap->private_data;
1224	int err = 0;
1225
1226	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1227		return 0;
1228	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1229	mv_wait_for_edma_empty_idle(ap);
1230	if (mv_stop_edma_engine(port_mmio)) {
1231		ata_port_err(ap, "Unable to stop eDMA\n");
1232		err = -EIO;
1233	}
1234	mv_edma_cfg(ap, 0, 0);
1235	return err;
1236}
1237
1238#ifdef ATA_DEBUG
1239static void mv_dump_mem(void __iomem *start, unsigned bytes)
1240{
1241	int b, w;
 
 
1242	for (b = 0; b < bytes; ) {
1243		DPRINTK("%p: ", start + b);
1244		for (w = 0; b < bytes && w < 4; w++) {
1245			printk("%08x ", readl(start + b));
1246			b += sizeof(u32);
1247		}
1248		printk("\n");
 
1249	}
1250}
1251#endif
1252
1253static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1254{
1255#ifdef ATA_DEBUG
1256	int b, w;
1257	u32 dw;
 
1258	for (b = 0; b < bytes; ) {
1259		DPRINTK("%02x: ", b);
1260		for (w = 0; b < bytes && w < 4; w++) {
1261			(void) pci_read_config_dword(pdev, b, &dw);
1262			printk("%08x ", dw);
 
1263			b += sizeof(u32);
1264		}
1265		printk("\n");
 
1266	}
1267#endif
1268}
1269static void mv_dump_all_regs(void __iomem *mmio_base, int port,
 
1270			     struct pci_dev *pdev)
1271{
1272#ifdef ATA_DEBUG
1273	void __iomem *hc_base = mv_hc_base(mmio_base,
1274					   port >> MV_PORT_HC_SHIFT);
1275	void __iomem *port_base;
1276	int start_port, num_ports, p, start_hc, num_hcs, hc;
1277
1278	if (0 > port) {
1279		start_hc = start_port = 0;
1280		num_ports = 8;		/* shld be benign for 4 port devs */
1281		num_hcs = 2;
1282	} else {
1283		start_hc = port >> MV_PORT_HC_SHIFT;
1284		start_port = port;
1285		num_ports = num_hcs = 1;
1286	}
1287	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1288		num_ports > 1 ? num_ports - 1 : start_port);
1289
1290	if (NULL != pdev) {
1291		DPRINTK("PCI config space regs:\n");
1292		mv_dump_pci_cfg(pdev, 0x68);
1293	}
1294	DPRINTK("PCI regs:\n");
1295	mv_dump_mem(mmio_base+0xc00, 0x3c);
1296	mv_dump_mem(mmio_base+0xd00, 0x34);
1297	mv_dump_mem(mmio_base+0xf00, 0x4);
1298	mv_dump_mem(mmio_base+0x1d00, 0x6c);
1299	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1300		hc_base = mv_hc_base(mmio_base, hc);
1301		DPRINTK("HC regs (HC %i):\n", hc);
1302		mv_dump_mem(hc_base, 0x1c);
1303	}
1304	for (p = start_port; p < start_port + num_ports; p++) {
1305		port_base = mv_port_base(mmio_base, p);
1306		DPRINTK("EDMA regs (port %i):\n", p);
1307		mv_dump_mem(port_base, 0x54);
1308		DPRINTK("SATA regs (port %i):\n", p);
1309		mv_dump_mem(port_base+0x300, 0x60);
1310	}
1311#endif
1312}
1313
1314static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1315{
1316	unsigned int ofs;
1317
1318	switch (sc_reg_in) {
1319	case SCR_STATUS:
1320	case SCR_CONTROL:
1321	case SCR_ERROR:
1322		ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1323		break;
1324	case SCR_ACTIVE:
1325		ofs = SATA_ACTIVE;   /* active is not with the others */
1326		break;
1327	default:
1328		ofs = 0xffffffffU;
1329		break;
1330	}
1331	return ofs;
1332}
1333
1334static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1335{
1336	unsigned int ofs = mv_scr_offset(sc_reg_in);
1337
1338	if (ofs != 0xffffffffU) {
1339		*val = readl(mv_ap_base(link->ap) + ofs);
1340		return 0;
1341	} else
1342		return -EINVAL;
1343}
1344
1345static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1346{
1347	unsigned int ofs = mv_scr_offset(sc_reg_in);
1348
1349	if (ofs != 0xffffffffU) {
1350		void __iomem *addr = mv_ap_base(link->ap) + ofs;
 
1351		if (sc_reg_in == SCR_CONTROL) {
1352			/*
1353			 * Workaround for 88SX60x1 FEr SATA#26:
1354			 *
1355			 * COMRESETs have to take care not to accidentally
1356			 * put the drive to sleep when writing SCR_CONTROL.
1357			 * Setting bits 12..15 prevents this problem.
1358			 *
1359			 * So if we see an outbound COMMRESET, set those bits.
1360			 * Ditto for the followup write that clears the reset.
1361			 *
1362			 * The proprietary driver does this for
1363			 * all chip versions, and so do we.
1364			 */
1365			if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1366				val |= 0xf000;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1367		}
1368		writelfl(val, addr);
1369		return 0;
1370	} else
1371		return -EINVAL;
1372}
1373
1374static void mv6_dev_config(struct ata_device *adev)
1375{
1376	/*
1377	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1378	 *
1379	 * Gen-II does not support NCQ over a port multiplier
1380	 *  (no FIS-based switching).
1381	 */
1382	if (adev->flags & ATA_DFLAG_NCQ) {
1383		if (sata_pmp_attached(adev->link->ap)) {
1384			adev->flags &= ~ATA_DFLAG_NCQ;
1385			ata_dev_info(adev,
1386				"NCQ disabled for command-based switching\n");
1387		}
1388	}
1389}
1390
1391static int mv_qc_defer(struct ata_queued_cmd *qc)
1392{
1393	struct ata_link *link = qc->dev->link;
1394	struct ata_port *ap = link->ap;
1395	struct mv_port_priv *pp = ap->private_data;
1396
1397	/*
1398	 * Don't allow new commands if we're in a delayed EH state
1399	 * for NCQ and/or FIS-based switching.
1400	 */
1401	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1402		return ATA_DEFER_PORT;
1403
1404	/* PIO commands need exclusive link: no other commands [DMA or PIO]
1405	 * can run concurrently.
1406	 * set excl_link when we want to send a PIO command in DMA mode
1407	 * or a non-NCQ command in NCQ mode.
1408	 * When we receive a command from that link, and there are no
1409	 * outstanding commands, mark a flag to clear excl_link and let
1410	 * the command go through.
1411	 */
1412	if (unlikely(ap->excl_link)) {
1413		if (link == ap->excl_link) {
1414			if (ap->nr_active_links)
1415				return ATA_DEFER_PORT;
1416			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1417			return 0;
1418		} else
1419			return ATA_DEFER_PORT;
1420	}
1421
1422	/*
1423	 * If the port is completely idle, then allow the new qc.
1424	 */
1425	if (ap->nr_active_links == 0)
1426		return 0;
1427
1428	/*
1429	 * The port is operating in host queuing mode (EDMA) with NCQ
1430	 * enabled, allow multiple NCQ commands.  EDMA also allows
1431	 * queueing multiple DMA commands but libata core currently
1432	 * doesn't allow it.
1433	 */
1434	if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1435	    (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1436		if (ata_is_ncq(qc->tf.protocol))
1437			return 0;
1438		else {
1439			ap->excl_link = link;
1440			return ATA_DEFER_PORT;
1441		}
1442	}
1443
1444	return ATA_DEFER_PORT;
1445}
1446
1447static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1448{
1449	struct mv_port_priv *pp = ap->private_data;
1450	void __iomem *port_mmio;
1451
1452	u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
1453	u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
1454	u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1455
1456	ltmode   = *old_ltmode & ~LTMODE_BIT8;
1457	haltcond = *old_haltcond | EDMA_ERR_DEV;
1458
1459	if (want_fbs) {
1460		fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1461		ltmode = *old_ltmode | LTMODE_BIT8;
1462		if (want_ncq)
1463			haltcond &= ~EDMA_ERR_DEV;
1464		else
1465			fiscfg |=  FISCFG_WAIT_DEV_ERR;
1466	} else {
1467		fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1468	}
1469
1470	port_mmio = mv_ap_base(ap);
1471	mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1472	mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1473	mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1474}
1475
1476static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1477{
1478	struct mv_host_priv *hpriv = ap->host->private_data;
1479	u32 old, new;
1480
1481	/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1482	old = readl(hpriv->base + GPIO_PORT_CTL);
1483	if (want_ncq)
1484		new = old | (1 << 22);
1485	else
1486		new = old & ~(1 << 22);
1487	if (new != old)
1488		writel(new, hpriv->base + GPIO_PORT_CTL);
1489}
1490
1491/**
1492 *	mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1493 *	@ap: Port being initialized
1494 *
1495 *	There are two DMA modes on these chips:  basic DMA, and EDMA.
1496 *
1497 *	Bit-0 of the "EDMA RESERVED" register enables/disables use
1498 *	of basic DMA on the GEN_IIE versions of the chips.
1499 *
1500 *	This bit survives EDMA resets, and must be set for basic DMA
1501 *	to function, and should be cleared when EDMA is active.
1502 */
1503static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1504{
1505	struct mv_port_priv *pp = ap->private_data;
1506	u32 new, *old = &pp->cached.unknown_rsvd;
1507
1508	if (enable_bmdma)
1509		new = *old | 1;
1510	else
1511		new = *old & ~1;
1512	mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1513}
1514
1515/*
1516 * SOC chips have an issue whereby the HDD LEDs don't always blink
1517 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1518 * of the SOC takes care of it, generating a steady blink rate when
1519 * any drive on the chip is active.
1520 *
1521 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1522 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1523 *
1524 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1525 * LED operation works then, and provides better (more accurate) feedback.
1526 *
1527 * Note that this code assumes that an SOC never has more than one HC onboard.
1528 */
1529static void mv_soc_led_blink_enable(struct ata_port *ap)
1530{
1531	struct ata_host *host = ap->host;
1532	struct mv_host_priv *hpriv = host->private_data;
1533	void __iomem *hc_mmio;
1534	u32 led_ctrl;
1535
1536	if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1537		return;
1538	hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1539	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1540	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1541	writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1542}
1543
1544static void mv_soc_led_blink_disable(struct ata_port *ap)
1545{
1546	struct ata_host *host = ap->host;
1547	struct mv_host_priv *hpriv = host->private_data;
1548	void __iomem *hc_mmio;
1549	u32 led_ctrl;
1550	unsigned int port;
1551
1552	if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1553		return;
1554
1555	/* disable led-blink only if no ports are using NCQ */
1556	for (port = 0; port < hpriv->n_ports; port++) {
1557		struct ata_port *this_ap = host->ports[port];
1558		struct mv_port_priv *pp = this_ap->private_data;
1559
1560		if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1561			return;
1562	}
1563
1564	hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1565	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1566	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1567	writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1568}
1569
1570static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1571{
1572	u32 cfg;
1573	struct mv_port_priv *pp    = ap->private_data;
1574	struct mv_host_priv *hpriv = ap->host->private_data;
1575	void __iomem *port_mmio    = mv_ap_base(ap);
1576
1577	/* set up non-NCQ EDMA configuration */
1578	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
1579	pp->pp_flags &=
1580	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1581
1582	if (IS_GEN_I(hpriv))
1583		cfg |= (1 << 8);	/* enab config burst size mask */
1584
1585	else if (IS_GEN_II(hpriv)) {
1586		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1587		mv_60x1_errata_sata25(ap, want_ncq);
1588
1589	} else if (IS_GEN_IIE(hpriv)) {
1590		int want_fbs = sata_pmp_attached(ap);
1591		/*
1592		 * Possible future enhancement:
1593		 *
1594		 * The chip can use FBS with non-NCQ, if we allow it,
1595		 * But first we need to have the error handling in place
1596		 * for this mode (datasheet section 7.3.15.4.2.3).
1597		 * So disallow non-NCQ FBS for now.
1598		 */
1599		want_fbs &= want_ncq;
1600
1601		mv_config_fbs(ap, want_ncq, want_fbs);
1602
1603		if (want_fbs) {
1604			pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1605			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1606		}
1607
1608		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1609		if (want_edma) {
1610			cfg |= (1 << 22); /* enab 4-entry host queue cache */
1611			if (!IS_SOC(hpriv))
1612				cfg |= (1 << 18); /* enab early completion */
1613		}
1614		if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1615			cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1616		mv_bmdma_enable_iie(ap, !want_edma);
1617
1618		if (IS_SOC(hpriv)) {
1619			if (want_ncq)
1620				mv_soc_led_blink_enable(ap);
1621			else
1622				mv_soc_led_blink_disable(ap);
1623		}
1624	}
1625
1626	if (want_ncq) {
1627		cfg |= EDMA_CFG_NCQ;
1628		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1629	}
1630
1631	writelfl(cfg, port_mmio + EDMA_CFG);
1632}
1633
1634static void mv_port_free_dma_mem(struct ata_port *ap)
1635{
1636	struct mv_host_priv *hpriv = ap->host->private_data;
1637	struct mv_port_priv *pp = ap->private_data;
1638	int tag;
1639
1640	if (pp->crqb) {
1641		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1642		pp->crqb = NULL;
1643	}
1644	if (pp->crpb) {
1645		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1646		pp->crpb = NULL;
1647	}
1648	/*
1649	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1650	 * For later hardware, we have one unique sg_tbl per NCQ tag.
1651	 */
1652	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1653		if (pp->sg_tbl[tag]) {
1654			if (tag == 0 || !IS_GEN_I(hpriv))
1655				dma_pool_free(hpriv->sg_tbl_pool,
1656					      pp->sg_tbl[tag],
1657					      pp->sg_tbl_dma[tag]);
1658			pp->sg_tbl[tag] = NULL;
1659		}
1660	}
1661}
1662
1663/**
1664 *      mv_port_start - Port specific init/start routine.
1665 *      @ap: ATA channel to manipulate
1666 *
1667 *      Allocate and point to DMA memory, init port private memory,
1668 *      zero indices.
1669 *
1670 *      LOCKING:
1671 *      Inherited from caller.
1672 */
1673static int mv_port_start(struct ata_port *ap)
1674{
1675	struct device *dev = ap->host->dev;
1676	struct mv_host_priv *hpriv = ap->host->private_data;
1677	struct mv_port_priv *pp;
1678	unsigned long flags;
1679	int tag;
1680
1681	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1682	if (!pp)
1683		return -ENOMEM;
1684	ap->private_data = pp;
1685
1686	pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1687	if (!pp->crqb)
1688		return -ENOMEM;
1689	memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1690
1691	pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1692	if (!pp->crpb)
1693		goto out_port_free_dma_mem;
1694	memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1695
1696	/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1697	if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1698		ap->flags |= ATA_FLAG_AN;
1699	/*
1700	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1701	 * For later hardware, we need one unique sg_tbl per NCQ tag.
1702	 */
1703	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1704		if (tag == 0 || !IS_GEN_I(hpriv)) {
1705			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1706					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1707			if (!pp->sg_tbl[tag])
1708				goto out_port_free_dma_mem;
1709		} else {
1710			pp->sg_tbl[tag]     = pp->sg_tbl[0];
1711			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1712		}
1713	}
1714
1715	spin_lock_irqsave(ap->lock, flags);
1716	mv_save_cached_regs(ap);
1717	mv_edma_cfg(ap, 0, 0);
1718	spin_unlock_irqrestore(ap->lock, flags);
1719
1720	return 0;
1721
1722out_port_free_dma_mem:
1723	mv_port_free_dma_mem(ap);
1724	return -ENOMEM;
1725}
1726
1727/**
1728 *      mv_port_stop - Port specific cleanup/stop routine.
1729 *      @ap: ATA channel to manipulate
1730 *
1731 *      Stop DMA, cleanup port memory.
1732 *
1733 *      LOCKING:
1734 *      This routine uses the host lock to protect the DMA stop.
1735 */
1736static void mv_port_stop(struct ata_port *ap)
1737{
1738	unsigned long flags;
1739
1740	spin_lock_irqsave(ap->lock, flags);
1741	mv_stop_edma(ap);
1742	mv_enable_port_irqs(ap, 0);
1743	spin_unlock_irqrestore(ap->lock, flags);
1744	mv_port_free_dma_mem(ap);
1745}
1746
1747/**
1748 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1749 *      @qc: queued command whose SG list to source from
1750 *
1751 *      Populate the SG list and mark the last entry.
1752 *
1753 *      LOCKING:
1754 *      Inherited from caller.
1755 */
1756static void mv_fill_sg(struct ata_queued_cmd *qc)
1757{
1758	struct mv_port_priv *pp = qc->ap->private_data;
1759	struct scatterlist *sg;
1760	struct mv_sg *mv_sg, *last_sg = NULL;
1761	unsigned int si;
1762
1763	mv_sg = pp->sg_tbl[qc->tag];
1764	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1765		dma_addr_t addr = sg_dma_address(sg);
1766		u32 sg_len = sg_dma_len(sg);
1767
1768		while (sg_len) {
1769			u32 offset = addr & 0xffff;
1770			u32 len = sg_len;
1771
1772			if (offset + len > 0x10000)
1773				len = 0x10000 - offset;
1774
1775			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1776			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1777			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1778			mv_sg->reserved = 0;
1779
1780			sg_len -= len;
1781			addr += len;
1782
1783			last_sg = mv_sg;
1784			mv_sg++;
1785		}
1786	}
1787
1788	if (likely(last_sg))
1789		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1790	mb(); /* ensure data structure is visible to the chipset */
1791}
1792
1793static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1794{
1795	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1796		(last ? CRQB_CMD_LAST : 0);
1797	*cmdw = cpu_to_le16(tmp);
1798}
1799
1800/**
1801 *	mv_sff_irq_clear - Clear hardware interrupt after DMA.
1802 *	@ap: Port associated with this ATA transaction.
1803 *
1804 *	We need this only for ATAPI bmdma transactions,
1805 *	as otherwise we experience spurious interrupts
1806 *	after libata-sff handles the bmdma interrupts.
1807 */
1808static void mv_sff_irq_clear(struct ata_port *ap)
1809{
1810	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1811}
1812
1813/**
1814 *	mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1815 *	@qc: queued command to check for chipset/DMA compatibility.
1816 *
1817 *	The bmdma engines cannot handle speculative data sizes
1818 *	(bytecount under/over flow).  So only allow DMA for
1819 *	data transfer commands with known data sizes.
1820 *
1821 *	LOCKING:
1822 *	Inherited from caller.
1823 */
1824static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1825{
1826	struct scsi_cmnd *scmd = qc->scsicmd;
1827
1828	if (scmd) {
1829		switch (scmd->cmnd[0]) {
1830		case READ_6:
1831		case READ_10:
1832		case READ_12:
1833		case WRITE_6:
1834		case WRITE_10:
1835		case WRITE_12:
1836		case GPCMD_READ_CD:
1837		case GPCMD_SEND_DVD_STRUCTURE:
1838		case GPCMD_SEND_CUE_SHEET:
1839			return 0; /* DMA is safe */
1840		}
1841	}
1842	return -EOPNOTSUPP; /* use PIO instead */
1843}
1844
1845/**
1846 *	mv_bmdma_setup - Set up BMDMA transaction
1847 *	@qc: queued command to prepare DMA for.
1848 *
1849 *	LOCKING:
1850 *	Inherited from caller.
1851 */
1852static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1853{
1854	struct ata_port *ap = qc->ap;
1855	void __iomem *port_mmio = mv_ap_base(ap);
1856	struct mv_port_priv *pp = ap->private_data;
1857
1858	mv_fill_sg(qc);
1859
1860	/* clear all DMA cmd bits */
1861	writel(0, port_mmio + BMDMA_CMD);
1862
1863	/* load PRD table addr. */
1864	writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1865		port_mmio + BMDMA_PRD_HIGH);
1866	writelfl(pp->sg_tbl_dma[qc->tag],
1867		port_mmio + BMDMA_PRD_LOW);
1868
1869	/* issue r/w command */
1870	ap->ops->sff_exec_command(ap, &qc->tf);
1871}
1872
1873/**
1874 *	mv_bmdma_start - Start a BMDMA transaction
1875 *	@qc: queued command to start DMA on.
1876 *
1877 *	LOCKING:
1878 *	Inherited from caller.
1879 */
1880static void mv_bmdma_start(struct ata_queued_cmd *qc)
1881{
1882	struct ata_port *ap = qc->ap;
1883	void __iomem *port_mmio = mv_ap_base(ap);
1884	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1885	u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1886
1887	/* start host DMA transaction */
1888	writelfl(cmd, port_mmio + BMDMA_CMD);
1889}
1890
1891/**
1892 *	mv_bmdma_stop - Stop BMDMA transfer
1893 *	@qc: queued command to stop DMA on.
1894 *
1895 *	Clears the ATA_DMA_START flag in the bmdma control register
1896 *
1897 *	LOCKING:
1898 *	Inherited from caller.
1899 */
1900static void mv_bmdma_stop_ap(struct ata_port *ap)
1901{
1902	void __iomem *port_mmio = mv_ap_base(ap);
1903	u32 cmd;
1904
1905	/* clear start/stop bit */
1906	cmd = readl(port_mmio + BMDMA_CMD);
1907	if (cmd & ATA_DMA_START) {
1908		cmd &= ~ATA_DMA_START;
1909		writelfl(cmd, port_mmio + BMDMA_CMD);
1910
1911		/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1912		ata_sff_dma_pause(ap);
1913	}
1914}
1915
1916static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1917{
1918	mv_bmdma_stop_ap(qc->ap);
1919}
1920
1921/**
1922 *	mv_bmdma_status - Read BMDMA status
1923 *	@ap: port for which to retrieve DMA status.
1924 *
1925 *	Read and return equivalent of the sff BMDMA status register.
1926 *
1927 *	LOCKING:
1928 *	Inherited from caller.
1929 */
1930static u8 mv_bmdma_status(struct ata_port *ap)
1931{
1932	void __iomem *port_mmio = mv_ap_base(ap);
1933	u32 reg, status;
1934
1935	/*
1936	 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1937	 * and the ATA_DMA_INTR bit doesn't exist.
1938	 */
1939	reg = readl(port_mmio + BMDMA_STATUS);
1940	if (reg & ATA_DMA_ACTIVE)
1941		status = ATA_DMA_ACTIVE;
1942	else if (reg & ATA_DMA_ERR)
1943		status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1944	else {
1945		/*
1946		 * Just because DMA_ACTIVE is 0 (DMA completed),
1947		 * this does _not_ mean the device is "done".
1948		 * So we should not yet be signalling ATA_DMA_INTR
1949		 * in some cases.  Eg. DSM/TRIM, and perhaps others.
1950		 */
1951		mv_bmdma_stop_ap(ap);
1952		if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1953			status = 0;
1954		else
1955			status = ATA_DMA_INTR;
1956	}
1957	return status;
1958}
1959
1960static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1961{
1962	struct ata_taskfile *tf = &qc->tf;
1963	/*
1964	 * Workaround for 88SX60x1 FEr SATA#24.
1965	 *
1966	 * Chip may corrupt WRITEs if multi_count >= 4kB.
1967	 * Note that READs are unaffected.
1968	 *
1969	 * It's not clear if this errata really means "4K bytes",
1970	 * or if it always happens for multi_count > 7
1971	 * regardless of device sector_size.
1972	 *
1973	 * So, for safety, any write with multi_count > 7
1974	 * gets converted here into a regular PIO write instead:
1975	 */
1976	if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1977		if (qc->dev->multi_count > 7) {
1978			switch (tf->command) {
1979			case ATA_CMD_WRITE_MULTI:
1980				tf->command = ATA_CMD_PIO_WRITE;
1981				break;
1982			case ATA_CMD_WRITE_MULTI_FUA_EXT:
1983				tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
1984				/* fall through */
1985			case ATA_CMD_WRITE_MULTI_EXT:
1986				tf->command = ATA_CMD_PIO_WRITE_EXT;
1987				break;
1988			}
1989		}
1990	}
1991}
1992
1993/**
1994 *      mv_qc_prep - Host specific command preparation.
1995 *      @qc: queued command to prepare
1996 *
1997 *      This routine simply redirects to the general purpose routine
1998 *      if command is not DMA.  Else, it handles prep of the CRQB
1999 *      (command request block), does some sanity checking, and calls
2000 *      the SG load routine.
2001 *
2002 *      LOCKING:
2003 *      Inherited from caller.
2004 */
2005static void mv_qc_prep(struct ata_queued_cmd *qc)
2006{
2007	struct ata_port *ap = qc->ap;
2008	struct mv_port_priv *pp = ap->private_data;
2009	__le16 *cw;
2010	struct ata_taskfile *tf = &qc->tf;
2011	u16 flags = 0;
2012	unsigned in_index;
2013
2014	switch (tf->protocol) {
2015	case ATA_PROT_DMA:
2016		if (tf->command == ATA_CMD_DSM)
2017			return;
2018		/* fall-thru */
2019	case ATA_PROT_NCQ:
2020		break;	/* continue below */
2021	case ATA_PROT_PIO:
2022		mv_rw_multi_errata_sata24(qc);
2023		return;
2024	default:
2025		return;
2026	}
2027
2028	/* Fill in command request block
2029	 */
2030	if (!(tf->flags & ATA_TFLAG_WRITE))
2031		flags |= CRQB_FLAG_READ;
2032	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2033	flags |= qc->tag << CRQB_TAG_SHIFT;
2034	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2035
2036	/* get current queue index from software */
2037	in_index = pp->req_idx;
2038
2039	pp->crqb[in_index].sg_addr =
2040		cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2041	pp->crqb[in_index].sg_addr_hi =
2042		cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2043	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2044
2045	cw = &pp->crqb[in_index].ata_cmd[0];
2046
2047	/* Sadly, the CRQB cannot accommodate all registers--there are
2048	 * only 11 bytes...so we must pick and choose required
2049	 * registers based on the command.  So, we drop feature and
2050	 * hob_feature for [RW] DMA commands, but they are needed for
2051	 * NCQ.  NCQ will drop hob_nsect, which is not needed there
2052	 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2053	 */
2054	switch (tf->command) {
2055	case ATA_CMD_READ:
2056	case ATA_CMD_READ_EXT:
2057	case ATA_CMD_WRITE:
2058	case ATA_CMD_WRITE_EXT:
2059	case ATA_CMD_WRITE_FUA_EXT:
2060		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2061		break;
2062	case ATA_CMD_FPDMA_READ:
2063	case ATA_CMD_FPDMA_WRITE:
2064		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2065		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2066		break;
2067	default:
2068		/* The only other commands EDMA supports in non-queued and
2069		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2070		 * of which are defined/used by Linux.  If we get here, this
2071		 * driver needs work.
2072		 *
2073		 * FIXME: modify libata to give qc_prep a return value and
2074		 * return error here.
2075		 */
2076		BUG_ON(tf->command);
2077		break;
 
2078	}
2079	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2080	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2081	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2082	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2083	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2084	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2085	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2086	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2087	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
2088
2089	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2090		return;
2091	mv_fill_sg(qc);
 
 
2092}
2093
2094/**
2095 *      mv_qc_prep_iie - Host specific command preparation.
2096 *      @qc: queued command to prepare
2097 *
2098 *      This routine simply redirects to the general purpose routine
2099 *      if command is not DMA.  Else, it handles prep of the CRQB
2100 *      (command request block), does some sanity checking, and calls
2101 *      the SG load routine.
2102 *
2103 *      LOCKING:
2104 *      Inherited from caller.
2105 */
2106static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2107{
2108	struct ata_port *ap = qc->ap;
2109	struct mv_port_priv *pp = ap->private_data;
2110	struct mv_crqb_iie *crqb;
2111	struct ata_taskfile *tf = &qc->tf;
2112	unsigned in_index;
2113	u32 flags = 0;
2114
2115	if ((tf->protocol != ATA_PROT_DMA) &&
2116	    (tf->protocol != ATA_PROT_NCQ))
2117		return;
2118	if (tf->command == ATA_CMD_DSM)
2119		return;  /* use bmdma for this */
2120
2121	/* Fill in Gen IIE command request block */
2122	if (!(tf->flags & ATA_TFLAG_WRITE))
2123		flags |= CRQB_FLAG_READ;
2124
2125	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2126	flags |= qc->tag << CRQB_TAG_SHIFT;
2127	flags |= qc->tag << CRQB_HOSTQ_SHIFT;
2128	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2129
2130	/* get current queue index from software */
2131	in_index = pp->req_idx;
2132
2133	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2134	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2135	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2136	crqb->flags = cpu_to_le32(flags);
2137
2138	crqb->ata_cmd[0] = cpu_to_le32(
2139			(tf->command << 16) |
2140			(tf->feature << 24)
2141		);
2142	crqb->ata_cmd[1] = cpu_to_le32(
2143			(tf->lbal << 0) |
2144			(tf->lbam << 8) |
2145			(tf->lbah << 16) |
2146			(tf->device << 24)
2147		);
2148	crqb->ata_cmd[2] = cpu_to_le32(
2149			(tf->hob_lbal << 0) |
2150			(tf->hob_lbam << 8) |
2151			(tf->hob_lbah << 16) |
2152			(tf->hob_feature << 24)
2153		);
2154	crqb->ata_cmd[3] = cpu_to_le32(
2155			(tf->nsect << 0) |
2156			(tf->hob_nsect << 8)
2157		);
2158
2159	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2160		return;
2161	mv_fill_sg(qc);
 
 
2162}
2163
2164/**
2165 *	mv_sff_check_status - fetch device status, if valid
2166 *	@ap: ATA port to fetch status from
2167 *
2168 *	When using command issue via mv_qc_issue_fis(),
2169 *	the initial ATA_BUSY state does not show up in the
2170 *	ATA status (shadow) register.  This can confuse libata!
2171 *
2172 *	So we have a hook here to fake ATA_BUSY for that situation,
2173 *	until the first time a BUSY, DRQ, or ERR bit is seen.
2174 *
2175 *	The rest of the time, it simply returns the ATA status register.
2176 */
2177static u8 mv_sff_check_status(struct ata_port *ap)
2178{
2179	u8 stat = ioread8(ap->ioaddr.status_addr);
2180	struct mv_port_priv *pp = ap->private_data;
2181
2182	if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2183		if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2184			pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2185		else
2186			stat = ATA_BUSY;
2187	}
2188	return stat;
2189}
2190
2191/**
2192 *	mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
 
2193 *	@fis: fis to be sent
2194 *	@nwords: number of 32-bit words in the fis
2195 */
2196static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2197{
2198	void __iomem *port_mmio = mv_ap_base(ap);
2199	u32 ifctl, old_ifctl, ifstat;
2200	int i, timeout = 200, final_word = nwords - 1;
2201
2202	/* Initiate FIS transmission mode */
2203	old_ifctl = readl(port_mmio + SATA_IFCTL);
2204	ifctl = 0x100 | (old_ifctl & 0xf);
2205	writelfl(ifctl, port_mmio + SATA_IFCTL);
2206
2207	/* Send all words of the FIS except for the final word */
2208	for (i = 0; i < final_word; ++i)
2209		writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2210
2211	/* Flag end-of-transmission, and then send the final word */
2212	writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2213	writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2214
2215	/*
2216	 * Wait for FIS transmission to complete.
2217	 * This typically takes just a single iteration.
2218	 */
2219	do {
2220		ifstat = readl(port_mmio + SATA_IFSTAT);
2221	} while (!(ifstat & 0x1000) && --timeout);
2222
2223	/* Restore original port configuration */
2224	writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2225
2226	/* See if it worked */
2227	if ((ifstat & 0x3000) != 0x1000) {
2228		ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2229			      __func__, ifstat);
2230		return AC_ERR_OTHER;
2231	}
2232	return 0;
2233}
2234
2235/**
2236 *	mv_qc_issue_fis - Issue a command directly as a FIS
2237 *	@qc: queued command to start
2238 *
2239 *	Note that the ATA shadow registers are not updated
2240 *	after command issue, so the device will appear "READY"
2241 *	if polled, even while it is BUSY processing the command.
2242 *
2243 *	So we use a status hook to fake ATA_BUSY until the drive changes state.
2244 *
2245 *	Note: we don't get updated shadow regs on *completion*
2246 *	of non-data commands. So avoid sending them via this function,
2247 *	as they will appear to have completed immediately.
2248 *
2249 *	GEN_IIE has special registers that we could get the result tf from,
2250 *	but earlier chipsets do not.  For now, we ignore those registers.
2251 */
2252static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2253{
2254	struct ata_port *ap = qc->ap;
2255	struct mv_port_priv *pp = ap->private_data;
2256	struct ata_link *link = qc->dev->link;
2257	u32 fis[5];
2258	int err = 0;
2259
2260	ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2261	err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2262	if (err)
2263		return err;
2264
2265	switch (qc->tf.protocol) {
2266	case ATAPI_PROT_PIO:
2267		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2268		/* fall through */
2269	case ATAPI_PROT_NODATA:
2270		ap->hsm_task_state = HSM_ST_FIRST;
2271		break;
2272	case ATA_PROT_PIO:
2273		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2274		if (qc->tf.flags & ATA_TFLAG_WRITE)
2275			ap->hsm_task_state = HSM_ST_FIRST;
2276		else
2277			ap->hsm_task_state = HSM_ST;
2278		break;
2279	default:
2280		ap->hsm_task_state = HSM_ST_LAST;
2281		break;
2282	}
2283
2284	if (qc->tf.flags & ATA_TFLAG_POLLING)
2285		ata_sff_queue_pio_task(link, 0);
2286	return 0;
2287}
2288
2289/**
2290 *      mv_qc_issue - Initiate a command to the host
2291 *      @qc: queued command to start
2292 *
2293 *      This routine simply redirects to the general purpose routine
2294 *      if command is not DMA.  Else, it sanity checks our local
2295 *      caches of the request producer/consumer indices then enables
2296 *      DMA and bumps the request producer index.
2297 *
2298 *      LOCKING:
2299 *      Inherited from caller.
2300 */
2301static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2302{
2303	static int limit_warnings = 10;
2304	struct ata_port *ap = qc->ap;
2305	void __iomem *port_mmio = mv_ap_base(ap);
2306	struct mv_port_priv *pp = ap->private_data;
2307	u32 in_index;
2308	unsigned int port_irqs;
2309
2310	pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2311
2312	switch (qc->tf.protocol) {
2313	case ATA_PROT_DMA:
2314		if (qc->tf.command == ATA_CMD_DSM) {
2315			if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
2316				return AC_ERR_OTHER;
2317			break;  /* use bmdma for this */
2318		}
2319		/* fall thru */
2320	case ATA_PROT_NCQ:
2321		mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2322		pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2323		in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2324
2325		/* Write the request in pointer to kick the EDMA to life */
2326		writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2327					port_mmio + EDMA_REQ_Q_IN_PTR);
2328		return 0;
2329
2330	case ATA_PROT_PIO:
2331		/*
2332		 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2333		 *
2334		 * Someday, we might implement special polling workarounds
2335		 * for these, but it all seems rather unnecessary since we
2336		 * normally use only DMA for commands which transfer more
2337		 * than a single block of data.
2338		 *
2339		 * Much of the time, this could just work regardless.
2340		 * So for now, just log the incident, and allow the attempt.
2341		 */
2342		if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2343			--limit_warnings;
2344			ata_link_warn(qc->dev->link, DRV_NAME
2345				      ": attempting PIO w/multiple DRQ: "
2346				      "this may fail due to h/w errata\n");
2347		}
2348		/* drop through */
2349	case ATA_PROT_NODATA:
2350	case ATAPI_PROT_PIO:
2351	case ATAPI_PROT_NODATA:
2352		if (ap->flags & ATA_FLAG_PIO_POLLING)
2353			qc->tf.flags |= ATA_TFLAG_POLLING;
2354		break;
2355	}
2356
2357	if (qc->tf.flags & ATA_TFLAG_POLLING)
2358		port_irqs = ERR_IRQ;	/* mask device interrupt when polling */
2359	else
2360		port_irqs = ERR_IRQ | DONE_IRQ;	/* unmask all interrupts */
2361
2362	/*
2363	 * We're about to send a non-EDMA capable command to the
2364	 * port.  Turn off EDMA so there won't be problems accessing
2365	 * shadow block, etc registers.
2366	 */
2367	mv_stop_edma(ap);
2368	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2369	mv_pmp_select(ap, qc->dev->link->pmp);
2370
2371	if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2372		struct mv_host_priv *hpriv = ap->host->private_data;
2373		/*
2374		 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2375		 *
2376		 * After any NCQ error, the READ_LOG_EXT command
2377		 * from libata-eh *must* use mv_qc_issue_fis().
2378		 * Otherwise it might fail, due to chip errata.
2379		 *
2380		 * Rather than special-case it, we'll just *always*
2381		 * use this method here for READ_LOG_EXT, making for
2382		 * easier testing.
2383		 */
2384		if (IS_GEN_II(hpriv))
2385			return mv_qc_issue_fis(qc);
2386	}
2387	return ata_bmdma_qc_issue(qc);
2388}
2389
2390static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2391{
2392	struct mv_port_priv *pp = ap->private_data;
2393	struct ata_queued_cmd *qc;
2394
2395	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2396		return NULL;
2397	qc = ata_qc_from_tag(ap, ap->link.active_tag);
2398	if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2399		return qc;
2400	return NULL;
2401}
2402
2403static void mv_pmp_error_handler(struct ata_port *ap)
2404{
2405	unsigned int pmp, pmp_map;
2406	struct mv_port_priv *pp = ap->private_data;
2407
2408	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2409		/*
2410		 * Perform NCQ error analysis on failed PMPs
2411		 * before we freeze the port entirely.
2412		 *
2413		 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2414		 */
2415		pmp_map = pp->delayed_eh_pmp_map;
2416		pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2417		for (pmp = 0; pmp_map != 0; pmp++) {
2418			unsigned int this_pmp = (1 << pmp);
2419			if (pmp_map & this_pmp) {
2420				struct ata_link *link = &ap->pmp_link[pmp];
2421				pmp_map &= ~this_pmp;
2422				ata_eh_analyze_ncq_error(link);
2423			}
2424		}
2425		ata_port_freeze(ap);
2426	}
2427	sata_pmp_error_handler(ap);
2428}
2429
2430static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2431{
2432	void __iomem *port_mmio = mv_ap_base(ap);
2433
2434	return readl(port_mmio + SATA_TESTCTL) >> 16;
2435}
2436
2437static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2438{
2439	struct ata_eh_info *ehi;
2440	unsigned int pmp;
2441
2442	/*
2443	 * Initialize EH info for PMPs which saw device errors
2444	 */
2445	ehi = &ap->link.eh_info;
2446	for (pmp = 0; pmp_map != 0; pmp++) {
2447		unsigned int this_pmp = (1 << pmp);
2448		if (pmp_map & this_pmp) {
2449			struct ata_link *link = &ap->pmp_link[pmp];
 
2450
2451			pmp_map &= ~this_pmp;
2452			ehi = &link->eh_info;
2453			ata_ehi_clear_desc(ehi);
2454			ata_ehi_push_desc(ehi, "dev err");
2455			ehi->err_mask |= AC_ERR_DEV;
2456			ehi->action |= ATA_EH_RESET;
2457			ata_link_abort(link);
2458		}
2459	}
2460}
2461
2462static int mv_req_q_empty(struct ata_port *ap)
2463{
2464	void __iomem *port_mmio = mv_ap_base(ap);
2465	u32 in_ptr, out_ptr;
2466
2467	in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2468			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2469	out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2470			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2471	return (in_ptr == out_ptr);	/* 1 == queue_is_empty */
2472}
2473
2474static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2475{
2476	struct mv_port_priv *pp = ap->private_data;
2477	int failed_links;
2478	unsigned int old_map, new_map;
2479
2480	/*
2481	 * Device error during FBS+NCQ operation:
2482	 *
2483	 * Set a port flag to prevent further I/O being enqueued.
2484	 * Leave the EDMA running to drain outstanding commands from this port.
2485	 * Perform the post-mortem/EH only when all responses are complete.
2486	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2487	 */
2488	if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2489		pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2490		pp->delayed_eh_pmp_map = 0;
2491	}
2492	old_map = pp->delayed_eh_pmp_map;
2493	new_map = old_map | mv_get_err_pmp_map(ap);
2494
2495	if (old_map != new_map) {
2496		pp->delayed_eh_pmp_map = new_map;
2497		mv_pmp_eh_prep(ap, new_map & ~old_map);
2498	}
2499	failed_links = hweight16(new_map);
2500
2501	ata_port_info(ap,
2502		      "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
2503		      __func__, pp->delayed_eh_pmp_map,
2504		      ap->qc_active, failed_links,
2505		      ap->nr_active_links);
2506
2507	if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2508		mv_process_crpb_entries(ap, pp);
2509		mv_stop_edma(ap);
2510		mv_eh_freeze(ap);
2511		ata_port_info(ap, "%s: done\n", __func__);
2512		return 1;	/* handled */
2513	}
2514	ata_port_info(ap, "%s: waiting\n", __func__);
2515	return 1;	/* handled */
2516}
2517
2518static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2519{
2520	/*
2521	 * Possible future enhancement:
2522	 *
2523	 * FBS+non-NCQ operation is not yet implemented.
2524	 * See related notes in mv_edma_cfg().
2525	 *
2526	 * Device error during FBS+non-NCQ operation:
2527	 *
2528	 * We need to snapshot the shadow registers for each failed command.
2529	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2530	 */
2531	return 0;	/* not handled */
2532}
2533
2534static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2535{
2536	struct mv_port_priv *pp = ap->private_data;
2537
2538	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2539		return 0;	/* EDMA was not active: not handled */
2540	if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2541		return 0;	/* FBS was not active: not handled */
2542
2543	if (!(edma_err_cause & EDMA_ERR_DEV))
2544		return 0;	/* non DEV error: not handled */
2545	edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2546	if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2547		return 0;	/* other problems: not handled */
2548
2549	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2550		/*
2551		 * EDMA should NOT have self-disabled for this case.
2552		 * If it did, then something is wrong elsewhere,
2553		 * and we cannot handle it here.
2554		 */
2555		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2556			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2557				      __func__, edma_err_cause, pp->pp_flags);
2558			return 0; /* not handled */
2559		}
2560		return mv_handle_fbs_ncq_dev_err(ap);
2561	} else {
2562		/*
2563		 * EDMA should have self-disabled for this case.
2564		 * If it did not, then something is wrong elsewhere,
2565		 * and we cannot handle it here.
2566		 */
2567		if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2568			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2569				      __func__, edma_err_cause, pp->pp_flags);
2570			return 0; /* not handled */
2571		}
2572		return mv_handle_fbs_non_ncq_dev_err(ap);
2573	}
2574	return 0;	/* not handled */
2575}
2576
2577static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2578{
2579	struct ata_eh_info *ehi = &ap->link.eh_info;
2580	char *when = "idle";
2581
2582	ata_ehi_clear_desc(ehi);
2583	if (edma_was_enabled) {
2584		when = "EDMA enabled";
2585	} else {
2586		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2587		if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2588			when = "polling";
2589	}
2590	ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2591	ehi->err_mask |= AC_ERR_OTHER;
2592	ehi->action   |= ATA_EH_RESET;
2593	ata_port_freeze(ap);
2594}
2595
2596/**
2597 *      mv_err_intr - Handle error interrupts on the port
2598 *      @ap: ATA channel to manipulate
2599 *
2600 *      Most cases require a full reset of the chip's state machine,
2601 *      which also performs a COMRESET.
2602 *      Also, if the port disabled DMA, update our cached copy to match.
2603 *
2604 *      LOCKING:
2605 *      Inherited from caller.
2606 */
2607static void mv_err_intr(struct ata_port *ap)
2608{
2609	void __iomem *port_mmio = mv_ap_base(ap);
2610	u32 edma_err_cause, eh_freeze_mask, serr = 0;
2611	u32 fis_cause = 0;
2612	struct mv_port_priv *pp = ap->private_data;
2613	struct mv_host_priv *hpriv = ap->host->private_data;
2614	unsigned int action = 0, err_mask = 0;
2615	struct ata_eh_info *ehi = &ap->link.eh_info;
2616	struct ata_queued_cmd *qc;
2617	int abort = 0;
2618
2619	/*
2620	 * Read and clear the SError and err_cause bits.
2621	 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2622	 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2623	 */
2624	sata_scr_read(&ap->link, SCR_ERROR, &serr);
2625	sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2626
2627	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2628	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2629		fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2630		writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2631	}
2632	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2633
2634	if (edma_err_cause & EDMA_ERR_DEV) {
2635		/*
2636		 * Device errors during FIS-based switching operation
2637		 * require special handling.
2638		 */
2639		if (mv_handle_dev_err(ap, edma_err_cause))
2640			return;
2641	}
2642
2643	qc = mv_get_active_qc(ap);
2644	ata_ehi_clear_desc(ehi);
2645	ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2646			  edma_err_cause, pp->pp_flags);
2647
2648	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2649		ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2650		if (fis_cause & FIS_IRQ_CAUSE_AN) {
2651			u32 ec = edma_err_cause &
2652			       ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2653			sata_async_notification(ap);
2654			if (!ec)
2655				return; /* Just an AN; no need for the nukes */
2656			ata_ehi_push_desc(ehi, "SDB notify");
2657		}
2658	}
2659	/*
2660	 * All generations share these EDMA error cause bits:
2661	 */
2662	if (edma_err_cause & EDMA_ERR_DEV) {
2663		err_mask |= AC_ERR_DEV;
2664		action |= ATA_EH_RESET;
2665		ata_ehi_push_desc(ehi, "dev error");
2666	}
2667	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2668			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2669			EDMA_ERR_INTRL_PAR)) {
2670		err_mask |= AC_ERR_ATA_BUS;
2671		action |= ATA_EH_RESET;
2672		ata_ehi_push_desc(ehi, "parity error");
2673	}
2674	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2675		ata_ehi_hotplugged(ehi);
2676		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2677			"dev disconnect" : "dev connect");
2678		action |= ATA_EH_RESET;
2679	}
2680
2681	/*
2682	 * Gen-I has a different SELF_DIS bit,
2683	 * different FREEZE bits, and no SERR bit:
2684	 */
2685	if (IS_GEN_I(hpriv)) {
2686		eh_freeze_mask = EDMA_EH_FREEZE_5;
2687		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2688			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2689			ata_ehi_push_desc(ehi, "EDMA self-disable");
2690		}
2691	} else {
2692		eh_freeze_mask = EDMA_EH_FREEZE;
2693		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2694			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2695			ata_ehi_push_desc(ehi, "EDMA self-disable");
2696		}
2697		if (edma_err_cause & EDMA_ERR_SERR) {
2698			ata_ehi_push_desc(ehi, "SError=%08x", serr);
2699			err_mask |= AC_ERR_ATA_BUS;
2700			action |= ATA_EH_RESET;
2701		}
2702	}
2703
2704	if (!err_mask) {
2705		err_mask = AC_ERR_OTHER;
2706		action |= ATA_EH_RESET;
2707	}
2708
2709	ehi->serror |= serr;
2710	ehi->action |= action;
2711
2712	if (qc)
2713		qc->err_mask |= err_mask;
2714	else
2715		ehi->err_mask |= err_mask;
2716
2717	if (err_mask == AC_ERR_DEV) {
2718		/*
2719		 * Cannot do ata_port_freeze() here,
2720		 * because it would kill PIO access,
2721		 * which is needed for further diagnosis.
2722		 */
2723		mv_eh_freeze(ap);
2724		abort = 1;
2725	} else if (edma_err_cause & eh_freeze_mask) {
2726		/*
2727		 * Note to self: ata_port_freeze() calls ata_port_abort()
2728		 */
2729		ata_port_freeze(ap);
2730	} else {
2731		abort = 1;
2732	}
2733
2734	if (abort) {
2735		if (qc)
2736			ata_link_abort(qc->dev->link);
2737		else
2738			ata_port_abort(ap);
2739	}
2740}
2741
2742static bool mv_process_crpb_response(struct ata_port *ap,
2743		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2744{
2745	u8 ata_status;
2746	u16 edma_status = le16_to_cpu(response->flags);
2747
2748	/*
2749	 * edma_status from a response queue entry:
2750	 *   LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2751	 *   MSB is saved ATA status from command completion.
2752	 */
2753	if (!ncq_enabled) {
2754		u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2755		if (err_cause) {
2756			/*
2757			 * Error will be seen/handled by
2758			 * mv_err_intr().  So do nothing at all here.
2759			 */
2760			return false;
2761		}
2762	}
2763	ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2764	if (!ac_err_mask(ata_status))
2765		return true;
2766	/* else: leave it for mv_err_intr() */
2767	return false;
2768}
2769
2770static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2771{
2772	void __iomem *port_mmio = mv_ap_base(ap);
2773	struct mv_host_priv *hpriv = ap->host->private_data;
2774	u32 in_index;
2775	bool work_done = false;
2776	u32 done_mask = 0;
2777	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2778
2779	/* Get the hardware queue position index */
2780	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2781			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2782
2783	/* Process new responses from since the last time we looked */
2784	while (in_index != pp->resp_idx) {
2785		unsigned int tag;
2786		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2787
2788		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2789
2790		if (IS_GEN_I(hpriv)) {
2791			/* 50xx: no NCQ, only one command active at a time */
2792			tag = ap->link.active_tag;
2793		} else {
2794			/* Gen II/IIE: get command tag from CRPB entry */
2795			tag = le16_to_cpu(response->id) & 0x1f;
2796		}
2797		if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2798			done_mask |= 1 << tag;
2799		work_done = true;
2800	}
2801
2802	if (work_done) {
2803		ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2804
2805		/* Update the software queue position index in hardware */
2806		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2807			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2808			 port_mmio + EDMA_RSP_Q_OUT_PTR);
2809	}
2810}
2811
2812static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2813{
2814	struct mv_port_priv *pp;
2815	int edma_was_enabled;
2816
2817	/*
2818	 * Grab a snapshot of the EDMA_EN flag setting,
2819	 * so that we have a consistent view for this port,
2820	 * even if something we call of our routines changes it.
2821	 */
2822	pp = ap->private_data;
2823	edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2824	/*
2825	 * Process completed CRPB response(s) before other events.
2826	 */
2827	if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2828		mv_process_crpb_entries(ap, pp);
2829		if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2830			mv_handle_fbs_ncq_dev_err(ap);
2831	}
2832	/*
2833	 * Handle chip-reported errors, or continue on to handle PIO.
2834	 */
2835	if (unlikely(port_cause & ERR_IRQ)) {
2836		mv_err_intr(ap);
2837	} else if (!edma_was_enabled) {
2838		struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2839		if (qc)
2840			ata_bmdma_port_intr(ap, qc);
2841		else
2842			mv_unexpected_intr(ap, edma_was_enabled);
2843	}
2844}
2845
2846/**
2847 *      mv_host_intr - Handle all interrupts on the given host controller
2848 *      @host: host specific structure
2849 *      @main_irq_cause: Main interrupt cause register for the chip.
2850 *
2851 *      LOCKING:
2852 *      Inherited from caller.
2853 */
2854static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2855{
2856	struct mv_host_priv *hpriv = host->private_data;
2857	void __iomem *mmio = hpriv->base, *hc_mmio;
2858	unsigned int handled = 0, port;
2859
2860	/* If asserted, clear the "all ports" IRQ coalescing bit */
2861	if (main_irq_cause & ALL_PORTS_COAL_DONE)
2862		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2863
2864	for (port = 0; port < hpriv->n_ports; port++) {
2865		struct ata_port *ap = host->ports[port];
2866		unsigned int p, shift, hardport, port_cause;
2867
2868		MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2869		/*
2870		 * Each hc within the host has its own hc_irq_cause register,
2871		 * where the interrupting ports bits get ack'd.
2872		 */
2873		if (hardport == 0) {	/* first port on this hc ? */
2874			u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2875			u32 port_mask, ack_irqs;
2876			/*
2877			 * Skip this entire hc if nothing pending for any ports
2878			 */
2879			if (!hc_cause) {
2880				port += MV_PORTS_PER_HC - 1;
2881				continue;
2882			}
2883			/*
2884			 * We don't need/want to read the hc_irq_cause register,
2885			 * because doing so hurts performance, and
2886			 * main_irq_cause already gives us everything we need.
2887			 *
2888			 * But we do have to *write* to the hc_irq_cause to ack
2889			 * the ports that we are handling this time through.
2890			 *
2891			 * This requires that we create a bitmap for those
2892			 * ports which interrupted us, and use that bitmap
2893			 * to ack (only) those ports via hc_irq_cause.
2894			 */
2895			ack_irqs = 0;
2896			if (hc_cause & PORTS_0_3_COAL_DONE)
2897				ack_irqs = HC_COAL_IRQ;
2898			for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2899				if ((port + p) >= hpriv->n_ports)
2900					break;
2901				port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2902				if (hc_cause & port_mask)
2903					ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2904			}
2905			hc_mmio = mv_hc_base_from_port(mmio, port);
2906			writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2907			handled = 1;
2908		}
2909		/*
2910		 * Handle interrupts signalled for this port:
2911		 */
2912		port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2913		if (port_cause)
2914			mv_port_intr(ap, port_cause);
2915	}
2916	return handled;
2917}
2918
2919static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2920{
2921	struct mv_host_priv *hpriv = host->private_data;
2922	struct ata_port *ap;
2923	struct ata_queued_cmd *qc;
2924	struct ata_eh_info *ehi;
2925	unsigned int i, err_mask, printed = 0;
2926	u32 err_cause;
2927
2928	err_cause = readl(mmio + hpriv->irq_cause_offset);
2929
2930	dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2931
2932	DPRINTK("All regs @ PCI error\n");
2933	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2934
2935	writelfl(0, mmio + hpriv->irq_cause_offset);
2936
2937	for (i = 0; i < host->n_ports; i++) {
2938		ap = host->ports[i];
2939		if (!ata_link_offline(&ap->link)) {
2940			ehi = &ap->link.eh_info;
2941			ata_ehi_clear_desc(ehi);
2942			if (!printed++)
2943				ata_ehi_push_desc(ehi,
2944					"PCI err cause 0x%08x", err_cause);
2945			err_mask = AC_ERR_HOST_BUS;
2946			ehi->action = ATA_EH_RESET;
2947			qc = ata_qc_from_tag(ap, ap->link.active_tag);
2948			if (qc)
2949				qc->err_mask |= err_mask;
2950			else
2951				ehi->err_mask |= err_mask;
2952
2953			ata_port_freeze(ap);
2954		}
2955	}
2956	return 1;	/* handled */
2957}
2958
2959/**
2960 *      mv_interrupt - Main interrupt event handler
2961 *      @irq: unused
2962 *      @dev_instance: private data; in this case the host structure
2963 *
2964 *      Read the read only register to determine if any host
2965 *      controllers have pending interrupts.  If so, call lower level
2966 *      routine to handle.  Also check for PCI errors which are only
2967 *      reported here.
2968 *
2969 *      LOCKING:
2970 *      This routine holds the host lock while processing pending
2971 *      interrupts.
2972 */
2973static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2974{
2975	struct ata_host *host = dev_instance;
2976	struct mv_host_priv *hpriv = host->private_data;
2977	unsigned int handled = 0;
2978	int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2979	u32 main_irq_cause, pending_irqs;
2980
2981	spin_lock(&host->lock);
2982
2983	/* for MSI:  block new interrupts while in here */
2984	if (using_msi)
2985		mv_write_main_irq_mask(0, hpriv);
2986
2987	main_irq_cause = readl(hpriv->main_irq_cause_addr);
2988	pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
2989	/*
2990	 * Deal with cases where we either have nothing pending, or have read
2991	 * a bogus register value which can indicate HW removal or PCI fault.
2992	 */
2993	if (pending_irqs && main_irq_cause != 0xffffffffU) {
2994		if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2995			handled = mv_pci_error(host, hpriv->base);
2996		else
2997			handled = mv_host_intr(host, pending_irqs);
2998	}
2999
3000	/* for MSI: unmask; interrupt cause bits will retrigger now */
3001	if (using_msi)
3002		mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3003
3004	spin_unlock(&host->lock);
3005
3006	return IRQ_RETVAL(handled);
3007}
3008
3009static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3010{
3011	unsigned int ofs;
3012
3013	switch (sc_reg_in) {
3014	case SCR_STATUS:
3015	case SCR_ERROR:
3016	case SCR_CONTROL:
3017		ofs = sc_reg_in * sizeof(u32);
3018		break;
3019	default:
3020		ofs = 0xffffffffU;
3021		break;
3022	}
3023	return ofs;
3024}
3025
3026static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3027{
3028	struct mv_host_priv *hpriv = link->ap->host->private_data;
3029	void __iomem *mmio = hpriv->base;
3030	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3031	unsigned int ofs = mv5_scr_offset(sc_reg_in);
3032
3033	if (ofs != 0xffffffffU) {
3034		*val = readl(addr + ofs);
3035		return 0;
3036	} else
3037		return -EINVAL;
3038}
3039
3040static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3041{
3042	struct mv_host_priv *hpriv = link->ap->host->private_data;
3043	void __iomem *mmio = hpriv->base;
3044	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3045	unsigned int ofs = mv5_scr_offset(sc_reg_in);
3046
3047	if (ofs != 0xffffffffU) {
3048		writelfl(val, addr + ofs);
3049		return 0;
3050	} else
3051		return -EINVAL;
3052}
3053
3054static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3055{
3056	struct pci_dev *pdev = to_pci_dev(host->dev);
3057	int early_5080;
3058
3059	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3060
3061	if (!early_5080) {
3062		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3063		tmp |= (1 << 0);
3064		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3065	}
3066
3067	mv_reset_pci_bus(host, mmio);
3068}
3069
3070static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3071{
3072	writel(0x0fcfffff, mmio + FLASH_CTL);
3073}
3074
3075static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3076			   void __iomem *mmio)
3077{
3078	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3079	u32 tmp;
3080
3081	tmp = readl(phy_mmio + MV5_PHY_MODE);
3082
3083	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
3084	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
3085}
3086
3087static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3088{
3089	u32 tmp;
3090
3091	writel(0, mmio + GPIO_PORT_CTL);
3092
3093	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3094
3095	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3096	tmp |= ~(1 << 0);
3097	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3098}
3099
3100static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3101			   unsigned int port)
3102{
3103	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3104	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3105	u32 tmp;
3106	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3107
3108	if (fix_apm_sq) {
3109		tmp = readl(phy_mmio + MV5_LTMODE);
3110		tmp |= (1 << 19);
3111		writel(tmp, phy_mmio + MV5_LTMODE);
3112
3113		tmp = readl(phy_mmio + MV5_PHY_CTL);
3114		tmp &= ~0x3;
3115		tmp |= 0x1;
3116		writel(tmp, phy_mmio + MV5_PHY_CTL);
3117	}
3118
3119	tmp = readl(phy_mmio + MV5_PHY_MODE);
3120	tmp &= ~mask;
3121	tmp |= hpriv->signal[port].pre;
3122	tmp |= hpriv->signal[port].amps;
3123	writel(tmp, phy_mmio + MV5_PHY_MODE);
3124}
3125
3126
3127#undef ZERO
3128#define ZERO(reg) writel(0, port_mmio + (reg))
3129static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3130			     unsigned int port)
3131{
3132	void __iomem *port_mmio = mv_port_base(mmio, port);
3133
3134	mv_reset_channel(hpriv, mmio, port);
3135
3136	ZERO(0x028);	/* command */
3137	writel(0x11f, port_mmio + EDMA_CFG);
3138	ZERO(0x004);	/* timer */
3139	ZERO(0x008);	/* irq err cause */
3140	ZERO(0x00c);	/* irq err mask */
3141	ZERO(0x010);	/* rq bah */
3142	ZERO(0x014);	/* rq inp */
3143	ZERO(0x018);	/* rq outp */
3144	ZERO(0x01c);	/* respq bah */
3145	ZERO(0x024);	/* respq outp */
3146	ZERO(0x020);	/* respq inp */
3147	ZERO(0x02c);	/* test control */
3148	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3149}
3150#undef ZERO
3151
3152#define ZERO(reg) writel(0, hc_mmio + (reg))
3153static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3154			unsigned int hc)
3155{
3156	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3157	u32 tmp;
3158
3159	ZERO(0x00c);
3160	ZERO(0x010);
3161	ZERO(0x014);
3162	ZERO(0x018);
3163
3164	tmp = readl(hc_mmio + 0x20);
3165	tmp &= 0x1c1c1c1c;
3166	tmp |= 0x03030303;
3167	writel(tmp, hc_mmio + 0x20);
3168}
3169#undef ZERO
3170
3171static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3172			unsigned int n_hc)
3173{
 
3174	unsigned int hc, port;
3175
3176	for (hc = 0; hc < n_hc; hc++) {
3177		for (port = 0; port < MV_PORTS_PER_HC; port++)
3178			mv5_reset_hc_port(hpriv, mmio,
3179					  (hc * MV_PORTS_PER_HC) + port);
3180
3181		mv5_reset_one_hc(hpriv, mmio, hc);
3182	}
3183
3184	return 0;
3185}
3186
3187#undef ZERO
3188#define ZERO(reg) writel(0, mmio + (reg))
3189static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3190{
3191	struct mv_host_priv *hpriv = host->private_data;
3192	u32 tmp;
3193
3194	tmp = readl(mmio + MV_PCI_MODE);
3195	tmp &= 0xff00ffff;
3196	writel(tmp, mmio + MV_PCI_MODE);
3197
3198	ZERO(MV_PCI_DISC_TIMER);
3199	ZERO(MV_PCI_MSI_TRIGGER);
3200	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3201	ZERO(MV_PCI_SERR_MASK);
3202	ZERO(hpriv->irq_cause_offset);
3203	ZERO(hpriv->irq_mask_offset);
3204	ZERO(MV_PCI_ERR_LOW_ADDRESS);
3205	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3206	ZERO(MV_PCI_ERR_ATTRIBUTE);
3207	ZERO(MV_PCI_ERR_COMMAND);
3208}
3209#undef ZERO
3210
3211static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3212{
3213	u32 tmp;
3214
3215	mv5_reset_flash(hpriv, mmio);
3216
3217	tmp = readl(mmio + GPIO_PORT_CTL);
3218	tmp &= 0x3;
3219	tmp |= (1 << 5) | (1 << 6);
3220	writel(tmp, mmio + GPIO_PORT_CTL);
3221}
3222
3223/**
3224 *      mv6_reset_hc - Perform the 6xxx global soft reset
3225 *      @mmio: base address of the HBA
3226 *
3227 *      This routine only applies to 6xxx parts.
3228 *
3229 *      LOCKING:
3230 *      Inherited from caller.
3231 */
3232static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3233			unsigned int n_hc)
3234{
3235	void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3236	int i, rc = 0;
3237	u32 t;
3238
3239	/* Following procedure defined in PCI "main command and status
3240	 * register" table.
3241	 */
3242	t = readl(reg);
3243	writel(t | STOP_PCI_MASTER, reg);
3244
3245	for (i = 0; i < 1000; i++) {
3246		udelay(1);
3247		t = readl(reg);
3248		if (PCI_MASTER_EMPTY & t)
3249			break;
3250	}
3251	if (!(PCI_MASTER_EMPTY & t)) {
3252		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3253		rc = 1;
3254		goto done;
3255	}
3256
3257	/* set reset */
3258	i = 5;
3259	do {
3260		writel(t | GLOB_SFT_RST, reg);
3261		t = readl(reg);
3262		udelay(1);
3263	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
3264
3265	if (!(GLOB_SFT_RST & t)) {
3266		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3267		rc = 1;
3268		goto done;
3269	}
3270
3271	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
3272	i = 5;
3273	do {
3274		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3275		t = readl(reg);
3276		udelay(1);
3277	} while ((GLOB_SFT_RST & t) && (i-- > 0));
3278
3279	if (GLOB_SFT_RST & t) {
3280		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3281		rc = 1;
3282	}
3283done:
3284	return rc;
3285}
3286
3287static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3288			   void __iomem *mmio)
3289{
3290	void __iomem *port_mmio;
3291	u32 tmp;
3292
3293	tmp = readl(mmio + RESET_CFG);
3294	if ((tmp & (1 << 0)) == 0) {
3295		hpriv->signal[idx].amps = 0x7 << 8;
3296		hpriv->signal[idx].pre = 0x1 << 5;
3297		return;
3298	}
3299
3300	port_mmio = mv_port_base(mmio, idx);
3301	tmp = readl(port_mmio + PHY_MODE2);
3302
3303	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
3304	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
3305}
3306
3307static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3308{
3309	writel(0x00000060, mmio + GPIO_PORT_CTL);
3310}
3311
3312static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3313			   unsigned int port)
3314{
3315	void __iomem *port_mmio = mv_port_base(mmio, port);
3316
3317	u32 hp_flags = hpriv->hp_flags;
3318	int fix_phy_mode2 =
3319		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3320	int fix_phy_mode4 =
3321		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3322	u32 m2, m3;
3323
3324	if (fix_phy_mode2) {
3325		m2 = readl(port_mmio + PHY_MODE2);
3326		m2 &= ~(1 << 16);
3327		m2 |= (1 << 31);
3328		writel(m2, port_mmio + PHY_MODE2);
3329
3330		udelay(200);
3331
3332		m2 = readl(port_mmio + PHY_MODE2);
3333		m2 &= ~((1 << 16) | (1 << 31));
3334		writel(m2, port_mmio + PHY_MODE2);
3335
3336		udelay(200);
3337	}
3338
3339	/*
3340	 * Gen-II/IIe PHY_MODE3 errata RM#2:
3341	 * Achieves better receiver noise performance than the h/w default:
3342	 */
3343	m3 = readl(port_mmio + PHY_MODE3);
3344	m3 = (m3 & 0x1f) | (0x5555601 << 5);
3345
3346	/* Guideline 88F5182 (GL# SATA-S11) */
3347	if (IS_SOC(hpriv))
3348		m3 &= ~0x1c;
3349
3350	if (fix_phy_mode4) {
3351		u32 m4 = readl(port_mmio + PHY_MODE4);
3352		/*
3353		 * Enforce reserved-bit restrictions on GenIIe devices only.
3354		 * For earlier chipsets, force only the internal config field
3355		 *  (workaround for errata FEr SATA#10 part 1).
3356		 */
3357		if (IS_GEN_IIE(hpriv))
3358			m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3359		else
3360			m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3361		writel(m4, port_mmio + PHY_MODE4);
3362	}
3363	/*
3364	 * Workaround for 60x1-B2 errata SATA#13:
3365	 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3366	 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3367	 * Or ensure we use writelfl() when writing PHY_MODE4.
3368	 */
3369	writel(m3, port_mmio + PHY_MODE3);
3370
3371	/* Revert values of pre-emphasis and signal amps to the saved ones */
3372	m2 = readl(port_mmio + PHY_MODE2);
3373
3374	m2 &= ~MV_M2_PREAMP_MASK;
3375	m2 |= hpriv->signal[port].amps;
3376	m2 |= hpriv->signal[port].pre;
3377	m2 &= ~(1 << 16);
3378
3379	/* according to mvSata 3.6.1, some IIE values are fixed */
3380	if (IS_GEN_IIE(hpriv)) {
3381		m2 &= ~0xC30FF01F;
3382		m2 |= 0x0000900F;
3383	}
3384
3385	writel(m2, port_mmio + PHY_MODE2);
3386}
3387
3388/* TODO: use the generic LED interface to configure the SATA Presence */
3389/* & Acitivy LEDs on the board */
3390static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3391				      void __iomem *mmio)
3392{
3393	return;
3394}
3395
3396static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3397			   void __iomem *mmio)
3398{
3399	void __iomem *port_mmio;
3400	u32 tmp;
3401
3402	port_mmio = mv_port_base(mmio, idx);
3403	tmp = readl(port_mmio + PHY_MODE2);
3404
3405	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
3406	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
3407}
3408
3409#undef ZERO
3410#define ZERO(reg) writel(0, port_mmio + (reg))
3411static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3412					void __iomem *mmio, unsigned int port)
3413{
3414	void __iomem *port_mmio = mv_port_base(mmio, port);
3415
3416	mv_reset_channel(hpriv, mmio, port);
3417
3418	ZERO(0x028);		/* command */
3419	writel(0x101f, port_mmio + EDMA_CFG);
3420	ZERO(0x004);		/* timer */
3421	ZERO(0x008);		/* irq err cause */
3422	ZERO(0x00c);		/* irq err mask */
3423	ZERO(0x010);		/* rq bah */
3424	ZERO(0x014);		/* rq inp */
3425	ZERO(0x018);		/* rq outp */
3426	ZERO(0x01c);		/* respq bah */
3427	ZERO(0x024);		/* respq outp */
3428	ZERO(0x020);		/* respq inp */
3429	ZERO(0x02c);		/* test control */
3430	writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3431}
3432
3433#undef ZERO
3434
3435#define ZERO(reg) writel(0, hc_mmio + (reg))
3436static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3437				       void __iomem *mmio)
3438{
3439	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3440
3441	ZERO(0x00c);
3442	ZERO(0x010);
3443	ZERO(0x014);
3444
3445}
3446
3447#undef ZERO
3448
3449static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3450				  void __iomem *mmio, unsigned int n_hc)
3451{
 
3452	unsigned int port;
3453
3454	for (port = 0; port < hpriv->n_ports; port++)
3455		mv_soc_reset_hc_port(hpriv, mmio, port);
3456
3457	mv_soc_reset_one_hc(hpriv, mmio);
3458
3459	return 0;
3460}
3461
3462static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3463				      void __iomem *mmio)
3464{
3465	return;
3466}
3467
3468static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3469{
3470	return;
3471}
3472
3473static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3474				  void __iomem *mmio, unsigned int port)
3475{
3476	void __iomem *port_mmio = mv_port_base(mmio, port);
3477	u32	reg;
3478
3479	reg = readl(port_mmio + PHY_MODE3);
3480	reg &= ~(0x3 << 27);	/* SELMUPF (bits 28:27) to 1 */
3481	reg |= (0x1 << 27);
3482	reg &= ~(0x3 << 29);	/* SELMUPI (bits 30:29) to 1 */
3483	reg |= (0x1 << 29);
3484	writel(reg, port_mmio + PHY_MODE3);
3485
3486	reg = readl(port_mmio + PHY_MODE4);
3487	reg &= ~0x1;	/* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3488	reg |= (0x1 << 16);
3489	writel(reg, port_mmio + PHY_MODE4);
3490
3491	reg = readl(port_mmio + PHY_MODE9_GEN2);
3492	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
3493	reg |= 0x8;
3494	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
3495	writel(reg, port_mmio + PHY_MODE9_GEN2);
3496
3497	reg = readl(port_mmio + PHY_MODE9_GEN1);
3498	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
3499	reg |= 0x8;
3500	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
3501	writel(reg, port_mmio + PHY_MODE9_GEN1);
3502}
3503
3504/**
3505 *	soc_is_65 - check if the soc is 65 nano device
3506 *
3507 *	Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3508 *	register, this register should contain non-zero value and it exists only
3509 *	in the 65 nano devices, when reading it from older devices we get 0.
3510 */
3511static bool soc_is_65n(struct mv_host_priv *hpriv)
3512{
3513	void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3514
3515	if (readl(port0_mmio + PHYCFG_OFS))
3516		return true;
3517	return false;
3518}
3519
3520static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3521{
3522	u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3523
3524	ifcfg = (ifcfg & 0xf7f) | 0x9b1000;	/* from chip spec */
3525	if (want_gen2i)
3526		ifcfg |= (1 << 7);		/* enable gen2i speed */
3527	writelfl(ifcfg, port_mmio + SATA_IFCFG);
3528}
3529
3530static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3531			     unsigned int port_no)
3532{
3533	void __iomem *port_mmio = mv_port_base(mmio, port_no);
3534
3535	/*
3536	 * The datasheet warns against setting EDMA_RESET when EDMA is active
3537	 * (but doesn't say what the problem might be).  So we first try
3538	 * to disable the EDMA engine before doing the EDMA_RESET operation.
3539	 */
3540	mv_stop_edma_engine(port_mmio);
3541	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3542
3543	if (!IS_GEN_I(hpriv)) {
3544		/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3545		mv_setup_ifcfg(port_mmio, 1);
3546	}
3547	/*
3548	 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3549	 * link, and physical layers.  It resets all SATA interface registers
3550	 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3551	 */
3552	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3553	udelay(25);	/* allow reset propagation */
3554	writelfl(0, port_mmio + EDMA_CMD);
3555
3556	hpriv->ops->phy_errata(hpriv, mmio, port_no);
3557
3558	if (IS_GEN_I(hpriv))
3559		mdelay(1);
3560}
3561
3562static void mv_pmp_select(struct ata_port *ap, int pmp)
3563{
3564	if (sata_pmp_supported(ap)) {
3565		void __iomem *port_mmio = mv_ap_base(ap);
3566		u32 reg = readl(port_mmio + SATA_IFCTL);
3567		int old = reg & 0xf;
3568
3569		if (old != pmp) {
3570			reg = (reg & ~0xf) | pmp;
3571			writelfl(reg, port_mmio + SATA_IFCTL);
3572		}
3573	}
3574}
3575
3576static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3577				unsigned long deadline)
3578{
3579	mv_pmp_select(link->ap, sata_srst_pmp(link));
3580	return sata_std_hardreset(link, class, deadline);
3581}
3582
3583static int mv_softreset(struct ata_link *link, unsigned int *class,
3584				unsigned long deadline)
3585{
3586	mv_pmp_select(link->ap, sata_srst_pmp(link));
3587	return ata_sff_softreset(link, class, deadline);
3588}
3589
3590static int mv_hardreset(struct ata_link *link, unsigned int *class,
3591			unsigned long deadline)
3592{
3593	struct ata_port *ap = link->ap;
3594	struct mv_host_priv *hpriv = ap->host->private_data;
3595	struct mv_port_priv *pp = ap->private_data;
3596	void __iomem *mmio = hpriv->base;
3597	int rc, attempts = 0, extra = 0;
3598	u32 sstatus;
3599	bool online;
3600
3601	mv_reset_channel(hpriv, mmio, ap->port_no);
3602	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3603	pp->pp_flags &=
3604	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3605
3606	/* Workaround for errata FEr SATA#10 (part 2) */
3607	do {
3608		const unsigned long *timing =
3609				sata_ehc_deb_timing(&link->eh_context);
3610
3611		rc = sata_link_hardreset(link, timing, deadline + extra,
3612					 &online, NULL);
3613		rc = online ? -EAGAIN : rc;
3614		if (rc)
3615			return rc;
3616		sata_scr_read(link, SCR_STATUS, &sstatus);
3617		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3618			/* Force 1.5gb/s link speed and try again */
3619			mv_setup_ifcfg(mv_ap_base(ap), 0);
3620			if (time_after(jiffies + HZ, deadline))
3621				extra = HZ; /* only extend it once, max */
3622		}
3623	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3624	mv_save_cached_regs(ap);
3625	mv_edma_cfg(ap, 0, 0);
3626
3627	return rc;
3628}
3629
3630static void mv_eh_freeze(struct ata_port *ap)
3631{
3632	mv_stop_edma(ap);
3633	mv_enable_port_irqs(ap, 0);
3634}
3635
3636static void mv_eh_thaw(struct ata_port *ap)
3637{
3638	struct mv_host_priv *hpriv = ap->host->private_data;
3639	unsigned int port = ap->port_no;
3640	unsigned int hardport = mv_hardport_from_port(port);
3641	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3642	void __iomem *port_mmio = mv_ap_base(ap);
3643	u32 hc_irq_cause;
3644
3645	/* clear EDMA errors on this port */
3646	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3647
3648	/* clear pending irq events */
3649	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3650	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3651
3652	mv_enable_port_irqs(ap, ERR_IRQ);
3653}
3654
3655/**
3656 *      mv_port_init - Perform some early initialization on a single port.
3657 *      @port: libata data structure storing shadow register addresses
3658 *      @port_mmio: base address of the port
3659 *
3660 *      Initialize shadow register mmio addresses, clear outstanding
3661 *      interrupts on the port, and unmask interrupts for the future
3662 *      start of the port.
3663 *
3664 *      LOCKING:
3665 *      Inherited from caller.
3666 */
3667static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
3668{
3669	void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3670
3671	/* PIO related setup
3672	 */
3673	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3674	port->error_addr =
3675		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3676	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3677	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3678	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3679	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3680	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3681	port->status_addr =
3682		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3683	/* special case: control/altstatus doesn't have ATA_REG_ address */
3684	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3685
3686	/* Clear any currently outstanding port interrupt conditions */
3687	serr = port_mmio + mv_scr_offset(SCR_ERROR);
3688	writelfl(readl(serr), serr);
3689	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3690
3691	/* unmask all non-transient EDMA error interrupts */
3692	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3693
3694	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3695		readl(port_mmio + EDMA_CFG),
3696		readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3697		readl(port_mmio + EDMA_ERR_IRQ_MASK));
3698}
3699
3700static unsigned int mv_in_pcix_mode(struct ata_host *host)
3701{
3702	struct mv_host_priv *hpriv = host->private_data;
3703	void __iomem *mmio = hpriv->base;
3704	u32 reg;
3705
3706	if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3707		return 0;	/* not PCI-X capable */
3708	reg = readl(mmio + MV_PCI_MODE);
3709	if ((reg & MV_PCI_MODE_MASK) == 0)
3710		return 0;	/* conventional PCI mode */
3711	return 1;	/* chip is in PCI-X mode */
3712}
3713
3714static int mv_pci_cut_through_okay(struct ata_host *host)
3715{
3716	struct mv_host_priv *hpriv = host->private_data;
3717	void __iomem *mmio = hpriv->base;
3718	u32 reg;
3719
3720	if (!mv_in_pcix_mode(host)) {
3721		reg = readl(mmio + MV_PCI_COMMAND);
3722		if (reg & MV_PCI_COMMAND_MRDTRIG)
3723			return 0; /* not okay */
3724	}
3725	return 1; /* okay */
3726}
3727
3728static void mv_60x1b2_errata_pci7(struct ata_host *host)
3729{
3730	struct mv_host_priv *hpriv = host->private_data;
3731	void __iomem *mmio = hpriv->base;
3732
3733	/* workaround for 60x1-B2 errata PCI#7 */
3734	if (mv_in_pcix_mode(host)) {
3735		u32 reg = readl(mmio + MV_PCI_COMMAND);
3736		writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3737	}
3738}
3739
3740static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3741{
3742	struct pci_dev *pdev = to_pci_dev(host->dev);
3743	struct mv_host_priv *hpriv = host->private_data;
3744	u32 hp_flags = hpriv->hp_flags;
3745
3746	switch (board_idx) {
3747	case chip_5080:
3748		hpriv->ops = &mv5xxx_ops;
3749		hp_flags |= MV_HP_GEN_I;
3750
3751		switch (pdev->revision) {
3752		case 0x1:
3753			hp_flags |= MV_HP_ERRATA_50XXB0;
3754			break;
3755		case 0x3:
3756			hp_flags |= MV_HP_ERRATA_50XXB2;
3757			break;
3758		default:
3759			dev_warn(&pdev->dev,
3760				 "Applying 50XXB2 workarounds to unknown rev\n");
3761			hp_flags |= MV_HP_ERRATA_50XXB2;
3762			break;
3763		}
3764		break;
3765
3766	case chip_504x:
3767	case chip_508x:
3768		hpriv->ops = &mv5xxx_ops;
3769		hp_flags |= MV_HP_GEN_I;
3770
3771		switch (pdev->revision) {
3772		case 0x0:
3773			hp_flags |= MV_HP_ERRATA_50XXB0;
3774			break;
3775		case 0x3:
3776			hp_flags |= MV_HP_ERRATA_50XXB2;
3777			break;
3778		default:
3779			dev_warn(&pdev->dev,
3780				 "Applying B2 workarounds to unknown rev\n");
3781			hp_flags |= MV_HP_ERRATA_50XXB2;
3782			break;
3783		}
3784		break;
3785
3786	case chip_604x:
3787	case chip_608x:
3788		hpriv->ops = &mv6xxx_ops;
3789		hp_flags |= MV_HP_GEN_II;
3790
3791		switch (pdev->revision) {
3792		case 0x7:
3793			mv_60x1b2_errata_pci7(host);
3794			hp_flags |= MV_HP_ERRATA_60X1B2;
3795			break;
3796		case 0x9:
3797			hp_flags |= MV_HP_ERRATA_60X1C0;
3798			break;
3799		default:
3800			dev_warn(&pdev->dev,
3801				 "Applying B2 workarounds to unknown rev\n");
3802			hp_flags |= MV_HP_ERRATA_60X1B2;
3803			break;
3804		}
3805		break;
3806
3807	case chip_7042:
3808		hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3809		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3810		    (pdev->device == 0x2300 || pdev->device == 0x2310))
3811		{
3812			/*
3813			 * Highpoint RocketRAID PCIe 23xx series cards:
3814			 *
3815			 * Unconfigured drives are treated as "Legacy"
3816			 * by the BIOS, and it overwrites sector 8 with
3817			 * a "Lgcy" metadata block prior to Linux boot.
3818			 *
3819			 * Configured drives (RAID or JBOD) leave sector 8
3820			 * alone, but instead overwrite a high numbered
3821			 * sector for the RAID metadata.  This sector can
3822			 * be determined exactly, by truncating the physical
3823			 * drive capacity to a nice even GB value.
3824			 *
3825			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3826			 *
3827			 * Warn the user, lest they think we're just buggy.
3828			 */
3829			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3830				" BIOS CORRUPTS DATA on all attached drives,"
3831				" regardless of if/how they are configured."
3832				" BEWARE!\n");
3833			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3834				" use sectors 8-9 on \"Legacy\" drives,"
3835				" and avoid the final two gigabytes on"
3836				" all RocketRAID BIOS initialized drives.\n");
3837		}
3838		/* drop through */
3839	case chip_6042:
3840		hpriv->ops = &mv6xxx_ops;
3841		hp_flags |= MV_HP_GEN_IIE;
3842		if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3843			hp_flags |= MV_HP_CUT_THROUGH;
3844
3845		switch (pdev->revision) {
3846		case 0x2: /* Rev.B0: the first/only public release */
3847			hp_flags |= MV_HP_ERRATA_60X1C0;
3848			break;
3849		default:
3850			dev_warn(&pdev->dev,
3851				 "Applying 60X1C0 workarounds to unknown rev\n");
3852			hp_flags |= MV_HP_ERRATA_60X1C0;
3853			break;
3854		}
3855		break;
3856	case chip_soc:
3857		if (soc_is_65n(hpriv))
3858			hpriv->ops = &mv_soc_65n_ops;
3859		else
3860			hpriv->ops = &mv_soc_ops;
3861		hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3862			MV_HP_ERRATA_60X1C0;
3863		break;
3864
3865	default:
3866		dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
3867		return 1;
3868	}
3869
3870	hpriv->hp_flags = hp_flags;
3871	if (hp_flags & MV_HP_PCIE) {
3872		hpriv->irq_cause_offset	= PCIE_IRQ_CAUSE;
3873		hpriv->irq_mask_offset	= PCIE_IRQ_MASK;
3874		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
3875	} else {
3876		hpriv->irq_cause_offset	= PCI_IRQ_CAUSE;
3877		hpriv->irq_mask_offset	= PCI_IRQ_MASK;
3878		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
3879	}
3880
3881	return 0;
3882}
3883
3884/**
3885 *      mv_init_host - Perform some early initialization of the host.
3886 *	@host: ATA host to initialize
3887 *
3888 *      If possible, do an early global reset of the host.  Then do
3889 *      our port init and clear/unmask all/relevant host interrupts.
3890 *
3891 *      LOCKING:
3892 *      Inherited from caller.
3893 */
3894static int mv_init_host(struct ata_host *host)
3895{
3896	int rc = 0, n_hc, port, hc;
3897	struct mv_host_priv *hpriv = host->private_data;
3898	void __iomem *mmio = hpriv->base;
3899
3900	rc = mv_chip_id(host, hpriv->board_idx);
3901	if (rc)
3902		goto done;
3903
3904	if (IS_SOC(hpriv)) {
3905		hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3906		hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK;
3907	} else {
3908		hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3909		hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK;
3910	}
3911
3912	/* initialize shadow irq mask with register's value */
3913	hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3914
3915	/* global interrupt mask: 0 == mask everything */
3916	mv_set_main_irq_mask(host, ~0, 0);
3917
3918	n_hc = mv_get_hc_count(host->ports[0]->flags);
3919
3920	for (port = 0; port < host->n_ports; port++)
3921		if (hpriv->ops->read_preamp)
3922			hpriv->ops->read_preamp(hpriv, port, mmio);
3923
3924	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3925	if (rc)
3926		goto done;
3927
3928	hpriv->ops->reset_flash(hpriv, mmio);
3929	hpriv->ops->reset_bus(host, mmio);
3930	hpriv->ops->enable_leds(hpriv, mmio);
3931
3932	for (port = 0; port < host->n_ports; port++) {
3933		struct ata_port *ap = host->ports[port];
3934		void __iomem *port_mmio = mv_port_base(mmio, port);
3935
3936		mv_port_init(&ap->ioaddr, port_mmio);
3937	}
3938
3939	for (hc = 0; hc < n_hc; hc++) {
3940		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3941
3942		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3943			"(before clear)=0x%08x\n", hc,
3944			readl(hc_mmio + HC_CFG),
3945			readl(hc_mmio + HC_IRQ_CAUSE));
3946
3947		/* Clear any currently outstanding hc interrupt conditions */
3948		writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3949	}
3950
3951	if (!IS_SOC(hpriv)) {
3952		/* Clear any currently outstanding host interrupt conditions */
3953		writelfl(0, mmio + hpriv->irq_cause_offset);
3954
3955		/* and unmask interrupt generation for host regs */
3956		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3957	}
3958
3959	/*
3960	 * enable only global host interrupts for now.
3961	 * The per-port interrupts get done later as ports are set up.
3962	 */
3963	mv_set_main_irq_mask(host, 0, PCI_ERR);
3964	mv_set_irq_coalescing(host, irq_coalescing_io_count,
3965				    irq_coalescing_usecs);
3966done:
3967	return rc;
3968}
3969
3970static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3971{
3972	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3973							     MV_CRQB_Q_SZ, 0);
3974	if (!hpriv->crqb_pool)
3975		return -ENOMEM;
3976
3977	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3978							     MV_CRPB_Q_SZ, 0);
3979	if (!hpriv->crpb_pool)
3980		return -ENOMEM;
3981
3982	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3983							     MV_SG_TBL_SZ, 0);
3984	if (!hpriv->sg_tbl_pool)
3985		return -ENOMEM;
3986
3987	return 0;
3988}
3989
3990static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3991				 struct mbus_dram_target_info *dram)
3992{
3993	int i;
3994
3995	for (i = 0; i < 4; i++) {
3996		writel(0, hpriv->base + WINDOW_CTRL(i));
3997		writel(0, hpriv->base + WINDOW_BASE(i));
3998	}
3999
4000	for (i = 0; i < dram->num_cs; i++) {
4001		struct mbus_dram_window *cs = dram->cs + i;
4002
4003		writel(((cs->size - 1) & 0xffff0000) |
4004			(cs->mbus_attr << 8) |
4005			(dram->mbus_dram_target_id << 4) | 1,
4006			hpriv->base + WINDOW_CTRL(i));
4007		writel(cs->base, hpriv->base + WINDOW_BASE(i));
4008	}
4009}
4010
4011/**
4012 *      mv_platform_probe - handle a positive probe of an soc Marvell
4013 *      host
4014 *      @pdev: platform device found
4015 *
4016 *      LOCKING:
4017 *      Inherited from caller.
4018 */
4019static int mv_platform_probe(struct platform_device *pdev)
4020{
4021	const struct mv_sata_platform_data *mv_platform_data;
 
4022	const struct ata_port_info *ppi[] =
4023	    { &mv_port_info[chip_soc], NULL };
4024	struct ata_host *host;
4025	struct mv_host_priv *hpriv;
4026	struct resource *res;
4027	int n_ports, rc;
 
 
4028
4029	ata_print_version_once(&pdev->dev, DRV_VERSION);
4030
4031	/*
4032	 * Simple resource validation ..
4033	 */
4034	if (unlikely(pdev->num_resources != 2)) {
4035		dev_err(&pdev->dev, "invalid number of resources\n");
4036		return -EINVAL;
4037	}
4038
4039	/*
4040	 * Get the register base first
4041	 */
4042	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4043	if (res == NULL)
4044		return -EINVAL;
4045
4046	/* allocate host */
4047	mv_platform_data = pdev->dev.platform_data;
4048	n_ports = mv_platform_data->n_ports;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4049
4050	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4051	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4052
4053	if (!host || !hpriv)
4054		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
4055	host->private_data = hpriv;
4056	hpriv->n_ports = n_ports;
4057	hpriv->board_idx = chip_soc;
4058
4059	host->iomap = NULL;
4060	hpriv->base = devm_ioremap(&pdev->dev, res->start,
4061				   resource_size(res));
 
 
 
4062	hpriv->base -= SATAHC0_REG_BASE;
4063
4064#if defined(CONFIG_HAVE_CLK)
4065	hpriv->clk = clk_get(&pdev->dev, NULL);
4066	if (IS_ERR(hpriv->clk))
4067		dev_notice(&pdev->dev, "cannot get clkdev\n");
4068	else
4069		clk_enable(hpriv->clk);
4070#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4071
4072	/*
4073	 * (Re-)program MBUS remapping windows if we are asked to.
4074	 */
4075	if (mv_platform_data->dram != NULL)
4076		mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
 
4077
4078	rc = mv_create_dma_pools(hpriv, &pdev->dev);
4079	if (rc)
4080		goto err;
4081
 
 
 
 
 
 
 
 
 
4082	/* initialize adapter */
4083	rc = mv_init_host(host);
4084	if (rc)
4085		goto err;
4086
4087	dev_info(&pdev->dev, "slots %u ports %d\n",
4088		 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4089
4090	return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
4091				 IRQF_SHARED, &mv6_sht);
 
 
4092err:
4093#if defined(CONFIG_HAVE_CLK)
4094	if (!IS_ERR(hpriv->clk)) {
4095		clk_disable(hpriv->clk);
4096		clk_put(hpriv->clk);
4097	}
4098#endif
 
 
 
 
 
 
4099
4100	return rc;
4101}
4102
4103/*
4104 *
4105 *      mv_platform_remove    -       unplug a platform interface
4106 *      @pdev: platform device
4107 *
4108 *      A platform bus SATA device has been unplugged. Perform the needed
4109 *      cleanup. Also called on module unload for any active devices.
4110 */
4111static int __devexit mv_platform_remove(struct platform_device *pdev)
4112{
4113	struct device *dev = &pdev->dev;
4114	struct ata_host *host = dev_get_drvdata(dev);
4115#if defined(CONFIG_HAVE_CLK)
4116	struct mv_host_priv *hpriv = host->private_data;
4117#endif
4118	ata_host_detach(host);
4119
4120#if defined(CONFIG_HAVE_CLK)
4121	if (!IS_ERR(hpriv->clk)) {
4122		clk_disable(hpriv->clk);
4123		clk_put(hpriv->clk);
4124	}
4125#endif
4126	return 0;
 
 
 
 
 
4127}
4128
4129#ifdef CONFIG_PM
4130static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4131{
4132	struct ata_host *host = dev_get_drvdata(&pdev->dev);
 
4133	if (host)
4134		return ata_host_suspend(host, state);
4135	else
4136		return 0;
4137}
4138
4139static int mv_platform_resume(struct platform_device *pdev)
4140{
4141	struct ata_host *host = dev_get_drvdata(&pdev->dev);
 
4142	int ret;
4143
4144	if (host) {
4145		struct mv_host_priv *hpriv = host->private_data;
4146		const struct mv_sata_platform_data *mv_platform_data = \
4147			pdev->dev.platform_data;
4148		/*
4149		 * (Re-)program MBUS remapping windows if we are asked to.
4150		 */
4151		if (mv_platform_data->dram != NULL)
4152			mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
 
4153
4154		/* initialize adapter */
4155		ret = mv_init_host(host);
4156		if (ret) {
4157			printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4158			return ret;
4159		}
4160		ata_host_resume(host);
4161	}
4162
4163	return 0;
4164}
4165#else
4166#define mv_platform_suspend NULL
4167#define mv_platform_resume NULL
4168#endif
4169
 
 
 
 
 
 
 
 
 
4170static struct platform_driver mv_platform_driver = {
4171	.probe			= mv_platform_probe,
4172	.remove			= __devexit_p(mv_platform_remove),
4173	.suspend		= mv_platform_suspend,
4174	.resume			= mv_platform_resume,
4175	.driver			= {
4176				   .name = DRV_NAME,
4177				   .owner = THIS_MODULE,
4178				  },
4179};
4180
4181
4182#ifdef CONFIG_PCI
4183static int mv_pci_init_one(struct pci_dev *pdev,
4184			   const struct pci_device_id *ent);
4185#ifdef CONFIG_PM
4186static int mv_pci_device_resume(struct pci_dev *pdev);
4187#endif
4188
4189
4190static struct pci_driver mv_pci_driver = {
4191	.name			= DRV_NAME,
4192	.id_table		= mv_pci_tbl,
4193	.probe			= mv_pci_init_one,
4194	.remove			= ata_pci_remove_one,
4195#ifdef CONFIG_PM
4196	.suspend		= ata_pci_device_suspend,
4197	.resume			= mv_pci_device_resume,
4198#endif
4199
4200};
4201
4202/* move to PCI layer or libata core? */
4203static int pci_go_64(struct pci_dev *pdev)
4204{
4205	int rc;
4206
4207	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4208		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4209		if (rc) {
4210			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4211			if (rc) {
4212				dev_err(&pdev->dev,
4213					"64-bit DMA enable failed\n");
4214				return rc;
4215			}
4216		}
4217	} else {
4218		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4219		if (rc) {
4220			dev_err(&pdev->dev, "32-bit DMA enable failed\n");
4221			return rc;
4222		}
4223		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4224		if (rc) {
4225			dev_err(&pdev->dev,
4226				"32-bit consistent DMA enable failed\n");
4227			return rc;
4228		}
4229	}
4230
4231	return rc;
4232}
4233
4234/**
4235 *      mv_print_info - Dump key info to kernel log for perusal.
4236 *      @host: ATA host to print info about
4237 *
4238 *      FIXME: complete this.
4239 *
4240 *      LOCKING:
4241 *      Inherited from caller.
4242 */
4243static void mv_print_info(struct ata_host *host)
4244{
4245	struct pci_dev *pdev = to_pci_dev(host->dev);
4246	struct mv_host_priv *hpriv = host->private_data;
4247	u8 scc;
4248	const char *scc_s, *gen;
4249
4250	/* Use this to determine the HW stepping of the chip so we know
4251	 * what errata to workaround
4252	 */
4253	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4254	if (scc == 0)
4255		scc_s = "SCSI";
4256	else if (scc == 0x01)
4257		scc_s = "RAID";
4258	else
4259		scc_s = "?";
4260
4261	if (IS_GEN_I(hpriv))
4262		gen = "I";
4263	else if (IS_GEN_II(hpriv))
4264		gen = "II";
4265	else if (IS_GEN_IIE(hpriv))
4266		gen = "IIE";
4267	else
4268		gen = "?";
4269
4270	dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4271		 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4272		 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4273}
4274
4275/**
4276 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
4277 *      @pdev: PCI device found
4278 *      @ent: PCI device ID entry for the matched host
4279 *
4280 *      LOCKING:
4281 *      Inherited from caller.
4282 */
4283static int mv_pci_init_one(struct pci_dev *pdev,
4284			   const struct pci_device_id *ent)
4285{
4286	unsigned int board_idx = (unsigned int)ent->driver_data;
4287	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4288	struct ata_host *host;
4289	struct mv_host_priv *hpriv;
4290	int n_ports, port, rc;
4291
4292	ata_print_version_once(&pdev->dev, DRV_VERSION);
4293
4294	/* allocate host */
4295	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4296
4297	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4298	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4299	if (!host || !hpriv)
4300		return -ENOMEM;
4301	host->private_data = hpriv;
4302	hpriv->n_ports = n_ports;
4303	hpriv->board_idx = board_idx;
4304
4305	/* acquire resources */
4306	rc = pcim_enable_device(pdev);
4307	if (rc)
4308		return rc;
4309
4310	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4311	if (rc == -EBUSY)
4312		pcim_pin_device(pdev);
4313	if (rc)
4314		return rc;
4315	host->iomap = pcim_iomap_table(pdev);
4316	hpriv->base = host->iomap[MV_PRIMARY_BAR];
4317
4318	rc = pci_go_64(pdev);
4319	if (rc)
 
4320		return rc;
 
4321
4322	rc = mv_create_dma_pools(hpriv, &pdev->dev);
4323	if (rc)
4324		return rc;
4325
4326	for (port = 0; port < host->n_ports; port++) {
4327		struct ata_port *ap = host->ports[port];
4328		void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4329		unsigned int offset = port_mmio - hpriv->base;
4330
4331		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4332		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4333	}
4334
4335	/* initialize adapter */
4336	rc = mv_init_host(host);
4337	if (rc)
4338		return rc;
4339
4340	/* Enable message-switched interrupts, if requested */
4341	if (msi && pci_enable_msi(pdev) == 0)
4342		hpriv->hp_flags |= MV_HP_FLAG_MSI;
4343
4344	mv_dump_pci_cfg(pdev, 0x68);
4345	mv_print_info(host);
4346
4347	pci_set_master(pdev);
4348	pci_try_set_mwi(pdev);
4349	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4350				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4351}
4352
4353#ifdef CONFIG_PM
4354static int mv_pci_device_resume(struct pci_dev *pdev)
4355{
4356	struct ata_host *host = dev_get_drvdata(&pdev->dev);
4357	int rc;
4358
4359	rc = ata_pci_device_do_resume(pdev);
4360	if (rc)
4361		return rc;
4362
4363	/* initialize adapter */
4364	rc = mv_init_host(host);
4365	if (rc)
4366		return rc;
4367
4368	ata_host_resume(host);
4369
4370	return 0;
4371}
4372#endif
4373#endif
4374
4375static int mv_platform_probe(struct platform_device *pdev);
4376static int __devexit mv_platform_remove(struct platform_device *pdev);
4377
4378static int __init mv_init(void)
4379{
4380	int rc = -ENODEV;
4381#ifdef CONFIG_PCI
4382	rc = pci_register_driver(&mv_pci_driver);
4383	if (rc < 0)
4384		return rc;
4385#endif
4386	rc = platform_driver_register(&mv_platform_driver);
4387
4388#ifdef CONFIG_PCI
4389	if (rc < 0)
4390		pci_unregister_driver(&mv_pci_driver);
4391#endif
4392	return rc;
4393}
4394
4395static void __exit mv_exit(void)
4396{
4397#ifdef CONFIG_PCI
4398	pci_unregister_driver(&mv_pci_driver);
4399#endif
4400	platform_driver_unregister(&mv_platform_driver);
4401}
4402
4403MODULE_AUTHOR("Brett Russ");
4404MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4405MODULE_LICENSE("GPL");
4406MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4407MODULE_VERSION(DRV_VERSION);
4408MODULE_ALIAS("platform:" DRV_NAME);
4409
4410module_init(mv_init);
4411module_exit(mv_exit);