Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  sata_nv.c - NVIDIA nForce SATA
   4 *
   5 *  Copyright 2004 NVIDIA Corp.  All rights reserved.
   6 *  Copyright 2004 Andrew Chew
   7 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   8 *  libata documentation is available via 'make {ps|pdf}docs',
   9 *  as Documentation/driver-api/libata.rst
  10 *
  11 *  No hardware documentation available outside of NVIDIA.
  12 *  This driver programs the NVIDIA SATA controller in a similar
  13 *  fashion as with other PCI IDE BMDMA controllers, with a few
  14 *  NV-specific details such as register offsets, SATA phy location,
  15 *  hotplug info, etc.
  16 *
  17 *  CK804/MCP04 controllers support an alternate programming interface
  18 *  similar to the ADMA specification (with some modifications).
  19 *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
  20 *  sent through the legacy interface.
 
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/gfp.h>
  26#include <linux/pci.h>
  27#include <linux/blkdev.h>
  28#include <linux/delay.h>
  29#include <linux/interrupt.h>
  30#include <linux/device.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <linux/libata.h>
  34#include <trace/events/libata.h>
  35
  36#define DRV_NAME			"sata_nv"
  37#define DRV_VERSION			"3.5"
  38
  39#define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
  40
  41enum {
  42	NV_MMIO_BAR			= 5,
  43
  44	NV_PORTS			= 2,
  45	NV_PIO_MASK			= ATA_PIO4,
  46	NV_MWDMA_MASK			= ATA_MWDMA2,
  47	NV_UDMA_MASK			= ATA_UDMA6,
  48	NV_PORT0_SCR_REG_OFFSET		= 0x00,
  49	NV_PORT1_SCR_REG_OFFSET		= 0x40,
  50
  51	/* INT_STATUS/ENABLE */
  52	NV_INT_STATUS			= 0x10,
  53	NV_INT_ENABLE			= 0x11,
  54	NV_INT_STATUS_CK804		= 0x440,
  55	NV_INT_ENABLE_CK804		= 0x441,
  56
  57	/* INT_STATUS/ENABLE bits */
  58	NV_INT_DEV			= 0x01,
  59	NV_INT_PM			= 0x02,
  60	NV_INT_ADDED			= 0x04,
  61	NV_INT_REMOVED			= 0x08,
  62
  63	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
  64
  65	NV_INT_ALL			= 0x0f,
  66	NV_INT_MASK			= NV_INT_DEV |
  67					  NV_INT_ADDED | NV_INT_REMOVED,
  68
  69	/* INT_CONFIG */
  70	NV_INT_CONFIG			= 0x12,
  71	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
  72
  73	// For PCI config register 20
  74	NV_MCP_SATA_CFG_20		= 0x50,
  75	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
  76	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
  77	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
  78	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
  79	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
  80
  81	NV_ADMA_MAX_CPBS		= 32,
  82	NV_ADMA_CPB_SZ			= 128,
  83	NV_ADMA_APRD_SZ			= 16,
  84	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
  85					   NV_ADMA_APRD_SZ,
  86	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
  87	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
  88	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
  89					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
  90
  91	/* BAR5 offset to ADMA general registers */
  92	NV_ADMA_GEN			= 0x400,
  93	NV_ADMA_GEN_CTL			= 0x00,
  94	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
  95
  96	/* BAR5 offset to ADMA ports */
  97	NV_ADMA_PORT			= 0x480,
  98
  99	/* size of ADMA port register space  */
 100	NV_ADMA_PORT_SIZE		= 0x100,
 101
 102	/* ADMA port registers */
 103	NV_ADMA_CTL			= 0x40,
 104	NV_ADMA_CPB_COUNT		= 0x42,
 105	NV_ADMA_NEXT_CPB_IDX		= 0x43,
 106	NV_ADMA_STAT			= 0x44,
 107	NV_ADMA_CPB_BASE_LOW		= 0x48,
 108	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
 109	NV_ADMA_APPEND			= 0x50,
 110	NV_ADMA_NOTIFIER		= 0x68,
 111	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
 112
 113	/* NV_ADMA_CTL register bits */
 114	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
 115	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
 116	NV_ADMA_CTL_GO			= (1 << 7),
 117	NV_ADMA_CTL_AIEN		= (1 << 8),
 118	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
 119	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
 120
 121	/* CPB response flag bits */
 122	NV_CPB_RESP_DONE		= (1 << 0),
 123	NV_CPB_RESP_ATA_ERR		= (1 << 3),
 124	NV_CPB_RESP_CMD_ERR		= (1 << 4),
 125	NV_CPB_RESP_CPB_ERR		= (1 << 7),
 126
 127	/* CPB control flag bits */
 128	NV_CPB_CTL_CPB_VALID		= (1 << 0),
 129	NV_CPB_CTL_QUEUE		= (1 << 1),
 130	NV_CPB_CTL_APRD_VALID		= (1 << 2),
 131	NV_CPB_CTL_IEN			= (1 << 3),
 132	NV_CPB_CTL_FPDMA		= (1 << 4),
 133
 134	/* APRD flags */
 135	NV_APRD_WRITE			= (1 << 1),
 136	NV_APRD_END			= (1 << 2),
 137	NV_APRD_CONT			= (1 << 3),
 138
 139	/* NV_ADMA_STAT flags */
 140	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
 141	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
 142	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
 143	NV_ADMA_STAT_CPBERR		= (1 << 4),
 144	NV_ADMA_STAT_SERROR		= (1 << 5),
 145	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
 146	NV_ADMA_STAT_IDLE		= (1 << 8),
 147	NV_ADMA_STAT_LEGACY		= (1 << 9),
 148	NV_ADMA_STAT_STOPPED		= (1 << 10),
 149	NV_ADMA_STAT_DONE		= (1 << 12),
 150	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
 151					  NV_ADMA_STAT_TIMEOUT,
 152
 153	/* port flags */
 154	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
 155	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
 156
 157	/* MCP55 reg offset */
 158	NV_CTL_MCP55			= 0x400,
 159	NV_INT_STATUS_MCP55		= 0x440,
 160	NV_INT_ENABLE_MCP55		= 0x444,
 161	NV_NCQ_REG_MCP55		= 0x448,
 162
 163	/* MCP55 */
 164	NV_INT_ALL_MCP55		= 0xffff,
 165	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
 166	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
 167
 168	/* SWNCQ ENABLE BITS*/
 169	NV_CTL_PRI_SWNCQ		= 0x02,
 170	NV_CTL_SEC_SWNCQ		= 0x04,
 171
 172	/* SW NCQ status bits*/
 173	NV_SWNCQ_IRQ_DEV		= (1 << 0),
 174	NV_SWNCQ_IRQ_PM			= (1 << 1),
 175	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
 176	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
 177
 178	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
 179	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
 180	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
 181	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
 182
 183	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
 184					  NV_SWNCQ_IRQ_REMOVED,
 185
 186};
 187
 188/* ADMA Physical Region Descriptor - one SG segment */
 189struct nv_adma_prd {
 190	__le64			addr;
 191	__le32			len;
 192	u8			flags;
 193	u8			packet_len;
 194	__le16			reserved;
 195};
 196
 197enum nv_adma_regbits {
 198	CMDEND	= (1 << 15),		/* end of command list */
 199	WNB	= (1 << 14),		/* wait-not-BSY */
 200	IGN	= (1 << 13),		/* ignore this entry */
 201	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
 202	DA2	= (1 << (2 + 8)),
 203	DA1	= (1 << (1 + 8)),
 204	DA0	= (1 << (0 + 8)),
 205};
 206
 207/* ADMA Command Parameter Block
 208   The first 5 SG segments are stored inside the Command Parameter Block itself.
 209   If there are more than 5 segments the remainder are stored in a separate
 210   memory area indicated by next_aprd. */
 211struct nv_adma_cpb {
 212	u8			resp_flags;    /* 0 */
 213	u8			reserved1;     /* 1 */
 214	u8			ctl_flags;     /* 2 */
 215	/* len is length of taskfile in 64 bit words */
 216	u8			len;		/* 3  */
 217	u8			tag;           /* 4 */
 218	u8			next_cpb_idx;  /* 5 */
 219	__le16			reserved2;     /* 6-7 */
 220	__le16			tf[12];        /* 8-31 */
 221	struct nv_adma_prd	aprd[5];       /* 32-111 */
 222	__le64			next_aprd;     /* 112-119 */
 223	__le64			reserved3;     /* 120-127 */
 224};
 225
 226
 227struct nv_adma_port_priv {
 228	struct nv_adma_cpb	*cpb;
 229	dma_addr_t		cpb_dma;
 230	struct nv_adma_prd	*aprd;
 231	dma_addr_t		aprd_dma;
 232	void __iomem		*ctl_block;
 233	void __iomem		*gen_block;
 234	void __iomem		*notifier_clear_block;
 235	u64			adma_dma_mask;
 236	u8			flags;
 237	int			last_issue_ncq;
 238};
 239
 240struct nv_host_priv {
 241	unsigned long		type;
 242};
 243
 244struct defer_queue {
 245	u32		defer_bits;
 246	unsigned int	head;
 247	unsigned int	tail;
 248	unsigned int	tag[ATA_MAX_QUEUE];
 249};
 250
 251enum ncq_saw_flag_list {
 252	ncq_saw_d2h	= (1U << 0),
 253	ncq_saw_dmas	= (1U << 1),
 254	ncq_saw_sdb	= (1U << 2),
 255	ncq_saw_backout	= (1U << 3),
 256};
 257
 258struct nv_swncq_port_priv {
 259	struct ata_bmdma_prd *prd;	 /* our SG list */
 260	dma_addr_t	prd_dma; /* and its DMA mapping */
 261	void __iomem	*sactive_block;
 262	void __iomem	*irq_block;
 263	void __iomem	*tag_block;
 264	u32		qc_active;
 265
 266	unsigned int	last_issue_tag;
 267
 268	/* fifo circular queue to store deferral command */
 269	struct defer_queue defer_queue;
 270
 271	/* for NCQ interrupt analysis */
 272	u32		dhfis_bits;
 273	u32		dmafis_bits;
 274	u32		sdbfis_bits;
 275
 276	unsigned int	ncq_flags;
 277};
 278
 279
 280#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
 281
 282static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 283#ifdef CONFIG_PM_SLEEP
 284static int nv_pci_device_resume(struct pci_dev *pdev);
 285#endif
 286static void nv_ck804_host_stop(struct ata_host *host);
 287static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
 288static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
 289static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
 290static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 291static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 292
 293static int nv_hardreset(struct ata_link *link, unsigned int *class,
 294			unsigned long deadline);
 295static void nv_nf2_freeze(struct ata_port *ap);
 296static void nv_nf2_thaw(struct ata_port *ap);
 297static void nv_ck804_freeze(struct ata_port *ap);
 298static void nv_ck804_thaw(struct ata_port *ap);
 299static int nv_adma_slave_config(struct scsi_device *sdev);
 300static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
 301static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
 302static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
 303static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
 304static void nv_adma_irq_clear(struct ata_port *ap);
 305static int nv_adma_port_start(struct ata_port *ap);
 306static void nv_adma_port_stop(struct ata_port *ap);
 307#ifdef CONFIG_PM
 308static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
 309static int nv_adma_port_resume(struct ata_port *ap);
 310#endif
 311static void nv_adma_freeze(struct ata_port *ap);
 312static void nv_adma_thaw(struct ata_port *ap);
 313static void nv_adma_error_handler(struct ata_port *ap);
 314static void nv_adma_host_stop(struct ata_host *host);
 315static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
 316static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 317
 318static void nv_mcp55_thaw(struct ata_port *ap);
 319static void nv_mcp55_freeze(struct ata_port *ap);
 320static void nv_swncq_error_handler(struct ata_port *ap);
 321static int nv_swncq_slave_config(struct scsi_device *sdev);
 322static int nv_swncq_port_start(struct ata_port *ap);
 323static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
 324static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
 325static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
 326static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
 327static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
 328#ifdef CONFIG_PM
 329static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
 330static int nv_swncq_port_resume(struct ata_port *ap);
 331#endif
 332
 333enum nv_host_type
 334{
 335	GENERIC,
 336	NFORCE2,
 337	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
 338	CK804,
 339	ADMA,
 340	MCP5x,
 341	SWNCQ,
 342};
 343
 344static const struct pci_device_id nv_pci_tbl[] = {
 345	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
 346	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
 347	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
 348	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
 349	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
 350	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
 351	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
 352	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
 353	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
 354	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
 355	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
 356	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
 357	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
 358	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
 359
 360	{ } /* terminate list */
 361};
 362
 363static struct pci_driver nv_pci_driver = {
 364	.name			= DRV_NAME,
 365	.id_table		= nv_pci_tbl,
 366	.probe			= nv_init_one,
 367#ifdef CONFIG_PM_SLEEP
 368	.suspend		= ata_pci_device_suspend,
 369	.resume			= nv_pci_device_resume,
 370#endif
 371	.remove			= ata_pci_remove_one,
 372};
 373
 374static const struct scsi_host_template nv_sht = {
 375	ATA_BMDMA_SHT(DRV_NAME),
 376};
 377
 378static const struct scsi_host_template nv_adma_sht = {
 379	__ATA_BASE_SHT(DRV_NAME),
 380	.can_queue		= NV_ADMA_MAX_CPBS,
 381	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
 382	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
 383	.slave_configure	= nv_adma_slave_config,
 384	.sdev_groups		= ata_ncq_sdev_groups,
 385	.change_queue_depth     = ata_scsi_change_queue_depth,
 386	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
 387};
 388
 389static const struct scsi_host_template nv_swncq_sht = {
 390	__ATA_BASE_SHT(DRV_NAME),
 391	.can_queue		= ATA_MAX_QUEUE - 1,
 392	.sg_tablesize		= LIBATA_MAX_PRD,
 393	.dma_boundary		= ATA_DMA_BOUNDARY,
 394	.slave_configure	= nv_swncq_slave_config,
 395	.sdev_groups		= ata_ncq_sdev_groups,
 396	.change_queue_depth     = ata_scsi_change_queue_depth,
 397	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
 398};
 399
 400/*
 401 * NV SATA controllers have various different problems with hardreset
 402 * protocol depending on the specific controller and device.
 403 *
 404 * GENERIC:
 405 *
 406 *  bko11195 reports that link doesn't come online after hardreset on
 407 *  generic nv's and there have been several other similar reports on
 408 *  linux-ide.
 409 *
 410 *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
 411 *  softreset.
 412 *
 413 * NF2/3:
 414 *
 415 *  bko3352 reports nf2/3 controllers can't determine device signature
 416 *  reliably after hardreset.  The following thread reports detection
 417 *  failure on cold boot with the standard debouncing timing.
 418 *
 419 *  http://thread.gmane.org/gmane.linux.ide/34098
 420 *
 421 *  bko12176 reports that hardreset fails to bring up the link during
 422 *  boot on nf2.
 423 *
 424 * CK804:
 425 *
 426 *  For initial probing after boot and hot plugging, hardreset mostly
 427 *  works fine on CK804 but curiously, reprobing on the initial port
 428 *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
 429 *  FIS in somewhat undeterministic way.
 430 *
 431 * SWNCQ:
 432 *
 433 *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
 434 *  hardreset should be used and hardreset can't report proper
 435 *  signature, which suggests that mcp5x is closer to nf2 as long as
 436 *  reset quirkiness is concerned.
 437 *
 438 *  bko12703 reports that boot probing fails for intel SSD with
 439 *  hardreset.  Link fails to come online.  Softreset works fine.
 440 *
 441 * The failures are varied but the following patterns seem true for
 442 * all flavors.
 443 *
 444 * - Softreset during boot always works.
 445 *
 446 * - Hardreset during boot sometimes fails to bring up the link on
 447 *   certain comibnations and device signature acquisition is
 448 *   unreliable.
 449 *
 450 * - Hardreset is often necessary after hotplug.
 451 *
 452 * So, preferring softreset for boot probing and error handling (as
 453 * hardreset might bring down the link) but using hardreset for
 454 * post-boot probing should work around the above issues in most
 455 * cases.  Define nv_hardreset() which only kicks in for post-boot
 456 * probing and use it for all variants.
 457 */
 458static struct ata_port_operations nv_generic_ops = {
 459	.inherits		= &ata_bmdma_port_ops,
 460	.lost_interrupt		= ATA_OP_NULL,
 461	.scr_read		= nv_scr_read,
 462	.scr_write		= nv_scr_write,
 463	.hardreset		= nv_hardreset,
 464};
 465
 466static struct ata_port_operations nv_nf2_ops = {
 467	.inherits		= &nv_generic_ops,
 468	.freeze			= nv_nf2_freeze,
 469	.thaw			= nv_nf2_thaw,
 470};
 471
 472static struct ata_port_operations nv_ck804_ops = {
 473	.inherits		= &nv_generic_ops,
 474	.freeze			= nv_ck804_freeze,
 475	.thaw			= nv_ck804_thaw,
 476	.host_stop		= nv_ck804_host_stop,
 477};
 478
 479static struct ata_port_operations nv_adma_ops = {
 480	.inherits		= &nv_ck804_ops,
 481
 482	.check_atapi_dma	= nv_adma_check_atapi_dma,
 483	.sff_tf_read		= nv_adma_tf_read,
 484	.qc_defer		= ata_std_qc_defer,
 485	.qc_prep		= nv_adma_qc_prep,
 486	.qc_issue		= nv_adma_qc_issue,
 487	.sff_irq_clear		= nv_adma_irq_clear,
 488
 489	.freeze			= nv_adma_freeze,
 490	.thaw			= nv_adma_thaw,
 491	.error_handler		= nv_adma_error_handler,
 492	.post_internal_cmd	= nv_adma_post_internal_cmd,
 493
 494	.port_start		= nv_adma_port_start,
 495	.port_stop		= nv_adma_port_stop,
 496#ifdef CONFIG_PM
 497	.port_suspend		= nv_adma_port_suspend,
 498	.port_resume		= nv_adma_port_resume,
 499#endif
 500	.host_stop		= nv_adma_host_stop,
 501};
 502
 503static struct ata_port_operations nv_swncq_ops = {
 504	.inherits		= &nv_generic_ops,
 505
 506	.qc_defer		= ata_std_qc_defer,
 507	.qc_prep		= nv_swncq_qc_prep,
 508	.qc_issue		= nv_swncq_qc_issue,
 509
 510	.freeze			= nv_mcp55_freeze,
 511	.thaw			= nv_mcp55_thaw,
 512	.error_handler		= nv_swncq_error_handler,
 513
 514#ifdef CONFIG_PM
 515	.port_suspend		= nv_swncq_port_suspend,
 516	.port_resume		= nv_swncq_port_resume,
 517#endif
 518	.port_start		= nv_swncq_port_start,
 519};
 520
 521struct nv_pi_priv {
 522	irq_handler_t			irq_handler;
 523	const struct scsi_host_template	*sht;
 524};
 525
 526#define NV_PI_PRIV(_irq_handler, _sht) \
 527	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
 528
 529static const struct ata_port_info nv_port_info[] = {
 530	/* generic */
 531	{
 532		.flags		= ATA_FLAG_SATA,
 533		.pio_mask	= NV_PIO_MASK,
 534		.mwdma_mask	= NV_MWDMA_MASK,
 535		.udma_mask	= NV_UDMA_MASK,
 536		.port_ops	= &nv_generic_ops,
 537		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 538	},
 539	/* nforce2/3 */
 540	{
 541		.flags		= ATA_FLAG_SATA,
 542		.pio_mask	= NV_PIO_MASK,
 543		.mwdma_mask	= NV_MWDMA_MASK,
 544		.udma_mask	= NV_UDMA_MASK,
 545		.port_ops	= &nv_nf2_ops,
 546		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
 547	},
 548	/* ck804 */
 549	{
 550		.flags		= ATA_FLAG_SATA,
 551		.pio_mask	= NV_PIO_MASK,
 552		.mwdma_mask	= NV_MWDMA_MASK,
 553		.udma_mask	= NV_UDMA_MASK,
 554		.port_ops	= &nv_ck804_ops,
 555		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
 556	},
 557	/* ADMA */
 558	{
 559		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
 560		.pio_mask	= NV_PIO_MASK,
 561		.mwdma_mask	= NV_MWDMA_MASK,
 562		.udma_mask	= NV_UDMA_MASK,
 563		.port_ops	= &nv_adma_ops,
 564		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
 565	},
 566	/* MCP5x */
 567	{
 568		.flags		= ATA_FLAG_SATA,
 569		.pio_mask	= NV_PIO_MASK,
 570		.mwdma_mask	= NV_MWDMA_MASK,
 571		.udma_mask	= NV_UDMA_MASK,
 572		.port_ops	= &nv_generic_ops,
 573		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 574	},
 575	/* SWNCQ */
 576	{
 577		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
 578		.pio_mask	= NV_PIO_MASK,
 579		.mwdma_mask	= NV_MWDMA_MASK,
 580		.udma_mask	= NV_UDMA_MASK,
 581		.port_ops	= &nv_swncq_ops,
 582		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
 583	},
 584};
 585
 586MODULE_AUTHOR("NVIDIA");
 587MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
 588MODULE_LICENSE("GPL");
 589MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 590MODULE_VERSION(DRV_VERSION);
 591
 592static bool adma_enabled;
 593static bool swncq_enabled = true;
 594static bool msi_enabled;
 595
 596static void nv_adma_register_mode(struct ata_port *ap)
 597{
 598	struct nv_adma_port_priv *pp = ap->private_data;
 599	void __iomem *mmio = pp->ctl_block;
 600	u16 tmp, status;
 601	int count = 0;
 602
 603	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
 604		return;
 605
 606	status = readw(mmio + NV_ADMA_STAT);
 607	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
 608		ndelay(50);
 609		status = readw(mmio + NV_ADMA_STAT);
 610		count++;
 611	}
 612	if (count == 20)
 613		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
 614			      status);
 615
 616	tmp = readw(mmio + NV_ADMA_CTL);
 617	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 618
 619	count = 0;
 620	status = readw(mmio + NV_ADMA_STAT);
 621	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
 622		ndelay(50);
 623		status = readw(mmio + NV_ADMA_STAT);
 624		count++;
 625	}
 626	if (count == 20)
 627		ata_port_warn(ap,
 628			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
 629			      status);
 630
 631	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
 632}
 633
 634static void nv_adma_mode(struct ata_port *ap)
 635{
 636	struct nv_adma_port_priv *pp = ap->private_data;
 637	void __iomem *mmio = pp->ctl_block;
 638	u16 tmp, status;
 639	int count = 0;
 640
 641	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
 642		return;
 643
 644	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 645
 646	tmp = readw(mmio + NV_ADMA_CTL);
 647	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 648
 649	status = readw(mmio + NV_ADMA_STAT);
 650	while (((status & NV_ADMA_STAT_LEGACY) ||
 651	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
 652		ndelay(50);
 653		status = readw(mmio + NV_ADMA_STAT);
 654		count++;
 655	}
 656	if (count == 20)
 657		ata_port_warn(ap,
 658			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
 659			status);
 660
 661	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
 662}
 663
 664static int nv_adma_slave_config(struct scsi_device *sdev)
 665{
 666	struct ata_port *ap = ata_shost_to_port(sdev->host);
 667	struct nv_adma_port_priv *pp = ap->private_data;
 668	struct nv_adma_port_priv *port0, *port1;
 
 669	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 670	unsigned long segment_boundary, flags;
 671	unsigned short sg_tablesize;
 672	int rc;
 673	int adma_enable;
 674	u32 current_reg, new_reg, config_mask;
 675
 676	rc = ata_scsi_slave_config(sdev);
 677
 678	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
 679		/* Not a proper libata device, ignore */
 680		return rc;
 681
 682	spin_lock_irqsave(ap->lock, flags);
 683
 684	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
 685		/*
 686		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
 687		 * Therefore ATAPI commands are sent through the legacy interface.
 688		 * However, the legacy interface only supports 32-bit DMA.
 689		 * Restrict DMA parameters as required by the legacy interface
 690		 * when an ATAPI device is connected.
 691		 */
 692		segment_boundary = ATA_DMA_BOUNDARY;
 693		/* Subtract 1 since an extra entry may be needed for padding, see
 694		   libata-scsi.c */
 695		sg_tablesize = LIBATA_MAX_PRD - 1;
 696
 697		/* Since the legacy DMA engine is in use, we need to disable ADMA
 698		   on the port. */
 699		adma_enable = 0;
 700		nv_adma_register_mode(ap);
 701	} else {
 702		segment_boundary = NV_ADMA_DMA_BOUNDARY;
 703		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
 704		adma_enable = 1;
 705	}
 706
 707	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
 708
 709	if (ap->port_no == 1)
 710		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
 711			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
 712	else
 713		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
 714			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
 715
 716	if (adma_enable) {
 717		new_reg = current_reg | config_mask;
 718		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
 719	} else {
 720		new_reg = current_reg & ~config_mask;
 721		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
 722	}
 723
 724	if (current_reg != new_reg)
 725		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
 726
 727	port0 = ap->host->ports[0]->private_data;
 728	port1 = ap->host->ports[1]->private_data;
 
 
 729	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
 730	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
 731		/*
 732		 * We have to set the DMA mask to 32-bit if either port is in
 733		 * ATAPI mode, since they are on the same PCI device which is
 734		 * used for DMA mapping.  If either SCSI device is not allocated
 735		 * yet, it's OK since that port will discover its correct
 736		 * setting when it does get allocated.
 737		 */
 738		rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
 
 
 
 
 
 
 
 
 
 739	} else {
 740		rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
 
 
 
 
 
 
 
 741	}
 742
 743	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
 744	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
 745	ata_port_info(ap,
 746		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
 747		      (unsigned long long)*ap->host->dev->dma_mask,
 748		      segment_boundary, sg_tablesize);
 749
 750	spin_unlock_irqrestore(ap->lock, flags);
 751
 752	return rc;
 753}
 754
 755static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
 756{
 757	struct nv_adma_port_priv *pp = qc->ap->private_data;
 758	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 759}
 760
 761static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 762{
 763	/* Other than when internal or pass-through commands are executed,
 764	   the only time this function will be called in ADMA mode will be
 765	   if a command fails. In the failure case we don't care about going
 766	   into register mode with ADMA commands pending, as the commands will
 767	   all shortly be aborted anyway. We assume that NCQ commands are not
 768	   issued via passthrough, which is the only way that switching into
 769	   ADMA mode could abort outstanding commands. */
 770	nv_adma_register_mode(ap);
 771
 772	ata_sff_tf_read(ap, tf);
 773}
 774
 775static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
 776{
 777	unsigned int idx = 0;
 778
 779	if (tf->flags & ATA_TFLAG_ISADDR) {
 780		if (tf->flags & ATA_TFLAG_LBA48) {
 781			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
 782			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
 783			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
 784			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
 785			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
 786			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
 787		} else
 788			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
 789
 790		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
 791		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
 792		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
 793		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
 794	}
 795
 796	if (tf->flags & ATA_TFLAG_DEVICE)
 797		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
 798
 799	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
 800
 801	while (idx < 12)
 802		cpb[idx++] = cpu_to_le16(IGN);
 803
 804	return idx;
 805}
 806
 807static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 808{
 809	struct nv_adma_port_priv *pp = ap->private_data;
 810	u8 flags = pp->cpb[cpb_num].resp_flags;
 811
 812	ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags);
 813
 814	if (unlikely((force_err ||
 815		     flags & (NV_CPB_RESP_ATA_ERR |
 816			      NV_CPB_RESP_CMD_ERR |
 817			      NV_CPB_RESP_CPB_ERR)))) {
 818		struct ata_eh_info *ehi = &ap->link.eh_info;
 819		int freeze = 0;
 820
 821		ata_ehi_clear_desc(ehi);
 822		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
 823		if (flags & NV_CPB_RESP_ATA_ERR) {
 824			ata_ehi_push_desc(ehi, "ATA error");
 825			ehi->err_mask |= AC_ERR_DEV;
 826		} else if (flags & NV_CPB_RESP_CMD_ERR) {
 827			ata_ehi_push_desc(ehi, "CMD error");
 828			ehi->err_mask |= AC_ERR_DEV;
 829		} else if (flags & NV_CPB_RESP_CPB_ERR) {
 830			ata_ehi_push_desc(ehi, "CPB error");
 831			ehi->err_mask |= AC_ERR_SYSTEM;
 832			freeze = 1;
 833		} else {
 834			/* notifier error, but no error in CPB flags? */
 835			ata_ehi_push_desc(ehi, "unknown");
 836			ehi->err_mask |= AC_ERR_OTHER;
 837			freeze = 1;
 838		}
 839		/* Kill all commands. EH will determine what actually failed. */
 840		if (freeze)
 841			ata_port_freeze(ap);
 842		else
 843			ata_port_abort(ap);
 844		return -1;
 845	}
 846
 847	if (likely(flags & NV_CPB_RESP_DONE))
 848		return 1;
 849	return 0;
 850}
 851
 852static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
 853{
 854	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 855
 856	/* freeze if hotplugged */
 857	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
 858		ata_port_freeze(ap);
 859		return 1;
 860	}
 861
 862	/* bail out if not our interrupt */
 863	if (!(irq_stat & NV_INT_DEV))
 864		return 0;
 865
 866	/* DEV interrupt w/ no active qc? */
 867	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 868		ata_sff_check_status(ap);
 869		return 1;
 870	}
 871
 872	/* handle interrupt */
 873	return ata_bmdma_port_intr(ap, qc);
 874}
 875
 876static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
 877{
 878	struct ata_host *host = dev_instance;
 879	int i, handled = 0;
 880	u32 notifier_clears[2];
 881
 882	spin_lock(&host->lock);
 883
 884	for (i = 0; i < host->n_ports; i++) {
 885		struct ata_port *ap = host->ports[i];
 886		struct nv_adma_port_priv *pp = ap->private_data;
 887		void __iomem *mmio = pp->ctl_block;
 888		u16 status;
 889		u32 gen_ctl;
 890		u32 notifier, notifier_error;
 891
 892		notifier_clears[i] = 0;
 893
 894		/* if ADMA is disabled, use standard ata interrupt handler */
 895		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
 896			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 897				>> (NV_INT_PORT_SHIFT * i);
 898			handled += nv_host_intr(ap, irq_stat);
 899			continue;
 900		}
 901
 902		/* if in ATA register mode, check for standard interrupts */
 903		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
 904			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 905				>> (NV_INT_PORT_SHIFT * i);
 906			if (ata_tag_valid(ap->link.active_tag))
 907				/** NV_INT_DEV indication seems unreliable
 908				    at times at least in ADMA mode. Force it
 909				    on always when a command is active, to
 910				    prevent losing interrupts. */
 911				irq_stat |= NV_INT_DEV;
 912			handled += nv_host_intr(ap, irq_stat);
 913		}
 914
 915		notifier = readl(mmio + NV_ADMA_NOTIFIER);
 916		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
 917		notifier_clears[i] = notifier | notifier_error;
 918
 919		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
 920
 921		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
 922		    !notifier_error)
 923			/* Nothing to do */
 924			continue;
 925
 926		status = readw(mmio + NV_ADMA_STAT);
 927
 928		/*
 929		 * Clear status. Ensure the controller sees the
 930		 * clearing before we start looking at any of the CPB
 931		 * statuses, so that any CPB completions after this
 932		 * point in the handler will raise another interrupt.
 933		 */
 934		writew(status, mmio + NV_ADMA_STAT);
 935		readw(mmio + NV_ADMA_STAT); /* flush posted write */
 936		rmb();
 937
 938		handled++; /* irq handled if we got here */
 939
 940		/* freeze if hotplugged or controller error */
 941		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
 942				       NV_ADMA_STAT_HOTUNPLUG |
 943				       NV_ADMA_STAT_TIMEOUT |
 944				       NV_ADMA_STAT_SERROR))) {
 945			struct ata_eh_info *ehi = &ap->link.eh_info;
 946
 947			ata_ehi_clear_desc(ehi);
 948			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
 949			if (status & NV_ADMA_STAT_TIMEOUT) {
 950				ehi->err_mask |= AC_ERR_SYSTEM;
 951				ata_ehi_push_desc(ehi, "timeout");
 952			} else if (status & NV_ADMA_STAT_HOTPLUG) {
 953				ata_ehi_hotplugged(ehi);
 954				ata_ehi_push_desc(ehi, "hotplug");
 955			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
 956				ata_ehi_hotplugged(ehi);
 957				ata_ehi_push_desc(ehi, "hot unplug");
 958			} else if (status & NV_ADMA_STAT_SERROR) {
 959				/* let EH analyze SError and figure out cause */
 960				ata_ehi_push_desc(ehi, "SError");
 961			} else
 962				ata_ehi_push_desc(ehi, "unknown");
 963			ata_port_freeze(ap);
 964			continue;
 965		}
 966
 967		if (status & (NV_ADMA_STAT_DONE |
 968			      NV_ADMA_STAT_CPBERR |
 969			      NV_ADMA_STAT_CMD_COMPLETE)) {
 970			u32 check_commands = notifier_clears[i];
 971			u32 done_mask = 0;
 972			int pos, rc;
 973
 974			if (status & NV_ADMA_STAT_CPBERR) {
 975				/* check all active commands */
 976				if (ata_tag_valid(ap->link.active_tag))
 977					check_commands = 1 <<
 978						ap->link.active_tag;
 979				else
 980					check_commands = ap->link.sactive;
 981			}
 982
 983			/* check CPBs for completed commands */
 984			while ((pos = ffs(check_commands))) {
 985				pos--;
 986				rc = nv_adma_check_cpb(ap, pos,
 987						notifier_error & (1 << pos));
 988				if (rc > 0)
 989					done_mask |= 1 << pos;
 990				else if (unlikely(rc < 0))
 991					check_commands = 0;
 992				check_commands &= ~(1 << pos);
 993			}
 994			ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
 995		}
 996	}
 997
 998	if (notifier_clears[0] || notifier_clears[1]) {
 999		/* Note: Both notifier clear registers must be written
1000		   if either is set, even if one is zero, according to NVIDIA. */
1001		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1002		writel(notifier_clears[0], pp->notifier_clear_block);
1003		pp = host->ports[1]->private_data;
1004		writel(notifier_clears[1], pp->notifier_clear_block);
1005	}
1006
1007	spin_unlock(&host->lock);
1008
1009	return IRQ_RETVAL(handled);
1010}
1011
1012static void nv_adma_freeze(struct ata_port *ap)
1013{
1014	struct nv_adma_port_priv *pp = ap->private_data;
1015	void __iomem *mmio = pp->ctl_block;
1016	u16 tmp;
1017
1018	nv_ck804_freeze(ap);
1019
1020	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1021		return;
1022
1023	/* clear any outstanding CK804 notifications */
1024	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1025		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1026
1027	/* Disable interrupt */
1028	tmp = readw(mmio + NV_ADMA_CTL);
1029	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1030		mmio + NV_ADMA_CTL);
1031	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1032}
1033
1034static void nv_adma_thaw(struct ata_port *ap)
1035{
1036	struct nv_adma_port_priv *pp = ap->private_data;
1037	void __iomem *mmio = pp->ctl_block;
1038	u16 tmp;
1039
1040	nv_ck804_thaw(ap);
1041
1042	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1043		return;
1044
1045	/* Enable interrupt */
1046	tmp = readw(mmio + NV_ADMA_CTL);
1047	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1048		mmio + NV_ADMA_CTL);
1049	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1050}
1051
1052static void nv_adma_irq_clear(struct ata_port *ap)
1053{
1054	struct nv_adma_port_priv *pp = ap->private_data;
1055	void __iomem *mmio = pp->ctl_block;
1056	u32 notifier_clears[2];
1057
1058	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1059		ata_bmdma_irq_clear(ap);
1060		return;
1061	}
1062
1063	/* clear any outstanding CK804 notifications */
1064	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1065		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1066
1067	/* clear ADMA status */
1068	writew(0xffff, mmio + NV_ADMA_STAT);
1069
1070	/* clear notifiers - note both ports need to be written with
1071	   something even though we are only clearing on one */
1072	if (ap->port_no == 0) {
1073		notifier_clears[0] = 0xFFFFFFFF;
1074		notifier_clears[1] = 0;
1075	} else {
1076		notifier_clears[0] = 0;
1077		notifier_clears[1] = 0xFFFFFFFF;
1078	}
1079	pp = ap->host->ports[0]->private_data;
1080	writel(notifier_clears[0], pp->notifier_clear_block);
1081	pp = ap->host->ports[1]->private_data;
1082	writel(notifier_clears[1], pp->notifier_clear_block);
1083}
1084
1085static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1086{
1087	struct nv_adma_port_priv *pp = qc->ap->private_data;
1088
1089	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1090		ata_bmdma_post_internal_cmd(qc);
1091}
1092
1093static int nv_adma_port_start(struct ata_port *ap)
1094{
1095	struct device *dev = ap->host->dev;
1096	struct nv_adma_port_priv *pp;
1097	int rc;
1098	void *mem;
1099	dma_addr_t mem_dma;
1100	void __iomem *mmio;
1101	struct pci_dev *pdev = to_pci_dev(dev);
1102	u16 tmp;
1103
1104	/*
1105	 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1106	 * pad buffers.
1107	 */
1108	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 
 
 
1109	if (rc)
1110		return rc;
1111
1112	/* we might fallback to bmdma, allocate bmdma resources */
1113	rc = ata_bmdma_port_start(ap);
1114	if (rc)
1115		return rc;
1116
1117	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1118	if (!pp)
1119		return -ENOMEM;
1120
1121	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1122	       ap->port_no * NV_ADMA_PORT_SIZE;
1123	pp->ctl_block = mmio;
1124	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1125	pp->notifier_clear_block = pp->gen_block +
1126	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1127
1128	/*
1129	 * Now that the legacy PRD and padding buffer are allocated we can
1130	 * raise the DMA mask to allocate the CPB/APRD table.
1131	 */
1132	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1133
 
1134	pp->adma_dma_mask = *dev->dma_mask;
1135
1136	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1137				  &mem_dma, GFP_KERNEL);
1138	if (!mem)
1139		return -ENOMEM;
 
1140
1141	/*
1142	 * First item in chunk of DMA memory:
1143	 * 128-byte command parameter block (CPB)
1144	 * one for each command tag
1145	 */
1146	pp->cpb     = mem;
1147	pp->cpb_dma = mem_dma;
1148
1149	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1150	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1151
1152	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1153	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1154
1155	/*
1156	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1157	 */
1158	pp->aprd = mem;
1159	pp->aprd_dma = mem_dma;
1160
1161	ap->private_data = pp;
1162
1163	/* clear any outstanding interrupt conditions */
1164	writew(0xffff, mmio + NV_ADMA_STAT);
1165
1166	/* initialize port variables */
1167	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1168
1169	/* clear CPB fetch count */
1170	writew(0, mmio + NV_ADMA_CPB_COUNT);
1171
1172	/* clear GO for register mode, enable interrupt */
1173	tmp = readw(mmio + NV_ADMA_CTL);
1174	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1175		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1176
1177	tmp = readw(mmio + NV_ADMA_CTL);
1178	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1179	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1180	udelay(1);
1181	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1182	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1183
1184	return 0;
1185}
1186
1187static void nv_adma_port_stop(struct ata_port *ap)
1188{
1189	struct nv_adma_port_priv *pp = ap->private_data;
1190	void __iomem *mmio = pp->ctl_block;
1191
 
1192	writew(0, mmio + NV_ADMA_CTL);
1193}
1194
1195#ifdef CONFIG_PM
1196static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1197{
1198	struct nv_adma_port_priv *pp = ap->private_data;
1199	void __iomem *mmio = pp->ctl_block;
1200
1201	/* Go to register mode - clears GO */
1202	nv_adma_register_mode(ap);
1203
1204	/* clear CPB fetch count */
1205	writew(0, mmio + NV_ADMA_CPB_COUNT);
1206
1207	/* disable interrupt, shut down port */
1208	writew(0, mmio + NV_ADMA_CTL);
1209
1210	return 0;
1211}
1212
1213static int nv_adma_port_resume(struct ata_port *ap)
1214{
1215	struct nv_adma_port_priv *pp = ap->private_data;
1216	void __iomem *mmio = pp->ctl_block;
1217	u16 tmp;
1218
1219	/* set CPB block location */
1220	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1221	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1222
1223	/* clear any outstanding interrupt conditions */
1224	writew(0xffff, mmio + NV_ADMA_STAT);
1225
1226	/* initialize port variables */
1227	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1228
1229	/* clear CPB fetch count */
1230	writew(0, mmio + NV_ADMA_CPB_COUNT);
1231
1232	/* clear GO for register mode, enable interrupt */
1233	tmp = readw(mmio + NV_ADMA_CTL);
1234	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1235		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1236
1237	tmp = readw(mmio + NV_ADMA_CTL);
1238	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1239	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1240	udelay(1);
1241	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1242	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1243
1244	return 0;
1245}
1246#endif
1247
1248static void nv_adma_setup_port(struct ata_port *ap)
1249{
1250	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1251	struct ata_ioports *ioport = &ap->ioaddr;
1252
 
 
1253	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1254
1255	ioport->cmd_addr	= mmio;
1256	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1257	ioport->error_addr	=
1258	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1259	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1260	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1261	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1262	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1263	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1264	ioport->status_addr	=
1265	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1266	ioport->altstatus_addr	=
1267	ioport->ctl_addr	= mmio + 0x20;
1268}
1269
1270static int nv_adma_host_init(struct ata_host *host)
1271{
1272	struct pci_dev *pdev = to_pci_dev(host->dev);
1273	unsigned int i;
1274	u32 tmp32;
1275
 
 
1276	/* enable ADMA on the ports */
1277	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1278	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1279		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1280		 NV_MCP_SATA_CFG_20_PORT1_EN |
1281		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1282
1283	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1284
1285	for (i = 0; i < host->n_ports; i++)
1286		nv_adma_setup_port(host->ports[i]);
1287
1288	return 0;
1289}
1290
1291static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1292			      struct scatterlist *sg,
1293			      int idx,
1294			      struct nv_adma_prd *aprd)
1295{
1296	u8 flags = 0;
1297	if (qc->tf.flags & ATA_TFLAG_WRITE)
1298		flags |= NV_APRD_WRITE;
1299	if (idx == qc->n_elem - 1)
1300		flags |= NV_APRD_END;
1301	else if (idx != 4)
1302		flags |= NV_APRD_CONT;
1303
1304	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1305	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1306	aprd->flags = flags;
1307	aprd->packet_len = 0;
1308}
1309
1310static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1311{
1312	struct nv_adma_port_priv *pp = qc->ap->private_data;
1313	struct nv_adma_prd *aprd;
1314	struct scatterlist *sg;
1315	unsigned int si;
1316
 
 
1317	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1318		aprd = (si < 5) ? &cpb->aprd[si] :
1319			&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1320		nv_adma_fill_aprd(qc, sg, si, aprd);
1321	}
1322	if (si > 5)
1323		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1324	else
1325		cpb->next_aprd = cpu_to_le64(0);
1326}
1327
1328static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1329{
1330	struct nv_adma_port_priv *pp = qc->ap->private_data;
1331
1332	/* ADMA engine can only be used for non-ATAPI DMA commands,
1333	   or interrupt-driven no-data commands. */
1334	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1335	   (qc->tf.flags & ATA_TFLAG_POLLING))
1336		return 1;
1337
1338	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1339	   (qc->tf.protocol == ATA_PROT_NODATA))
1340		return 0;
1341
1342	return 1;
1343}
1344
1345static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1346{
1347	struct nv_adma_port_priv *pp = qc->ap->private_data;
1348	struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1349	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1350		       NV_CPB_CTL_IEN;
1351
1352	if (nv_adma_use_reg_mode(qc)) {
1353		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1354			(qc->flags & ATA_QCFLAG_DMAMAP));
1355		nv_adma_register_mode(qc->ap);
1356		ata_bmdma_qc_prep(qc);
1357		return AC_ERR_OK;
1358	}
1359
1360	cpb->resp_flags = NV_CPB_RESP_DONE;
1361	wmb();
1362	cpb->ctl_flags = 0;
1363	wmb();
1364
1365	cpb->len		= 3;
1366	cpb->tag		= qc->hw_tag;
1367	cpb->next_cpb_idx	= 0;
1368
1369	/* turn on NCQ flags for NCQ commands */
1370	if (qc->tf.protocol == ATA_PROT_NCQ)
1371		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1372
 
 
1373	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1374
1375	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1376		nv_adma_fill_sg(qc, cpb);
1377		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1378	} else
1379		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1380
1381	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1382	   until we are finished filling in all of the contents */
1383	wmb();
1384	cpb->ctl_flags = ctl_flags;
1385	wmb();
1386	cpb->resp_flags = 0;
1387
1388	return AC_ERR_OK;
1389}
1390
1391static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1392{
1393	struct nv_adma_port_priv *pp = qc->ap->private_data;
1394	void __iomem *mmio = pp->ctl_block;
1395	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1396
 
 
1397	/* We can't handle result taskfile with NCQ commands, since
1398	   retrieving the taskfile switches us out of ADMA mode and would abort
1399	   existing commands. */
1400	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1401		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1402		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1403		return AC_ERR_SYSTEM;
1404	}
1405
1406	if (nv_adma_use_reg_mode(qc)) {
1407		/* use ATA register mode */
 
1408		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1409			(qc->flags & ATA_QCFLAG_DMAMAP));
1410		nv_adma_register_mode(qc->ap);
1411		return ata_bmdma_qc_issue(qc);
1412	} else
1413		nv_adma_mode(qc->ap);
1414
1415	/* write append register, command tag in lower 8 bits
1416	   and (number of cpbs to append -1) in top 8 bits */
1417	wmb();
1418
1419	if (curr_ncq != pp->last_issue_ncq) {
1420		/* Seems to need some delay before switching between NCQ and
1421		   non-NCQ commands, else we get command timeouts and such. */
1422		udelay(20);
1423		pp->last_issue_ncq = curr_ncq;
1424	}
1425
1426	writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
 
 
1427
1428	return 0;
1429}
1430
1431static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1432{
1433	struct ata_host *host = dev_instance;
1434	unsigned int i;
1435	unsigned int handled = 0;
1436	unsigned long flags;
1437
1438	spin_lock_irqsave(&host->lock, flags);
1439
1440	for (i = 0; i < host->n_ports; i++) {
1441		struct ata_port *ap = host->ports[i];
1442		struct ata_queued_cmd *qc;
1443
1444		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1445		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1446			handled += ata_bmdma_port_intr(ap, qc);
1447		} else {
1448			/*
1449			 * No request pending?  Clear interrupt status
1450			 * anyway, in case there's one pending.
1451			 */
1452			ap->ops->sff_check_status(ap);
1453		}
1454	}
1455
1456	spin_unlock_irqrestore(&host->lock, flags);
1457
1458	return IRQ_RETVAL(handled);
1459}
1460
1461static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1462{
1463	int i, handled = 0;
1464
1465	for (i = 0; i < host->n_ports; i++) {
1466		handled += nv_host_intr(host->ports[i], irq_stat);
1467		irq_stat >>= NV_INT_PORT_SHIFT;
1468	}
1469
1470	return IRQ_RETVAL(handled);
1471}
1472
1473static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1474{
1475	struct ata_host *host = dev_instance;
1476	u8 irq_stat;
1477	irqreturn_t ret;
1478
1479	spin_lock(&host->lock);
1480	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1481	ret = nv_do_interrupt(host, irq_stat);
1482	spin_unlock(&host->lock);
1483
1484	return ret;
1485}
1486
1487static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1488{
1489	struct ata_host *host = dev_instance;
1490	u8 irq_stat;
1491	irqreturn_t ret;
1492
1493	spin_lock(&host->lock);
1494	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1495	ret = nv_do_interrupt(host, irq_stat);
1496	spin_unlock(&host->lock);
1497
1498	return ret;
1499}
1500
1501static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1502{
1503	if (sc_reg > SCR_CONTROL)
1504		return -EINVAL;
1505
1506	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1507	return 0;
1508}
1509
1510static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1511{
1512	if (sc_reg > SCR_CONTROL)
1513		return -EINVAL;
1514
1515	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1516	return 0;
1517}
1518
1519static int nv_hardreset(struct ata_link *link, unsigned int *class,
1520			unsigned long deadline)
1521{
1522	struct ata_eh_context *ehc = &link->eh_context;
1523
1524	/* Do hardreset iff it's post-boot probing, please read the
1525	 * comment above port ops for details.
1526	 */
1527	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1528	    !ata_dev_enabled(link->device))
1529		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1530				    NULL, NULL);
1531	else {
1532		const unsigned int *timing = sata_ehc_deb_timing(ehc);
1533		int rc;
1534
1535		if (!(ehc->i.flags & ATA_EHI_QUIET))
1536			ata_link_info(link,
1537				      "nv: skipping hardreset on occupied port\n");
1538
1539		/* make sure the link is online */
1540		rc = sata_link_resume(link, timing, deadline);
1541		/* whine about phy resume failure but proceed */
1542		if (rc && rc != -EOPNOTSUPP)
1543			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1544				      rc);
1545	}
1546
1547	/* device signature acquisition is unreliable */
1548	return -EAGAIN;
1549}
1550
1551static void nv_nf2_freeze(struct ata_port *ap)
1552{
1553	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1554	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1555	u8 mask;
1556
1557	mask = ioread8(scr_addr + NV_INT_ENABLE);
1558	mask &= ~(NV_INT_ALL << shift);
1559	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1560}
1561
1562static void nv_nf2_thaw(struct ata_port *ap)
1563{
1564	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1565	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1566	u8 mask;
1567
1568	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1569
1570	mask = ioread8(scr_addr + NV_INT_ENABLE);
1571	mask |= (NV_INT_MASK << shift);
1572	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1573}
1574
1575static void nv_ck804_freeze(struct ata_port *ap)
1576{
1577	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1578	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1579	u8 mask;
1580
1581	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1582	mask &= ~(NV_INT_ALL << shift);
1583	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1584}
1585
1586static void nv_ck804_thaw(struct ata_port *ap)
1587{
1588	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1589	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1590	u8 mask;
1591
1592	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1593
1594	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1595	mask |= (NV_INT_MASK << shift);
1596	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1597}
1598
1599static void nv_mcp55_freeze(struct ata_port *ap)
1600{
1601	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1602	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1603	u32 mask;
1604
1605	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1606
1607	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1608	mask &= ~(NV_INT_ALL_MCP55 << shift);
1609	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1610}
1611
1612static void nv_mcp55_thaw(struct ata_port *ap)
1613{
1614	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1615	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1616	u32 mask;
1617
1618	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1619
1620	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1621	mask |= (NV_INT_MASK_MCP55 << shift);
1622	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1623}
1624
1625static void nv_adma_error_handler(struct ata_port *ap)
1626{
1627	struct nv_adma_port_priv *pp = ap->private_data;
1628	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1629		void __iomem *mmio = pp->ctl_block;
1630		int i;
1631		u16 tmp;
1632
1633		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1634			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1635			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1636			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1637			u32 status = readw(mmio + NV_ADMA_STAT);
1638			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1639			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1640
1641			ata_port_err(ap,
1642				"EH in ADMA mode, notifier 0x%X "
1643				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1644				"next cpb count 0x%X next cpb idx 0x%x\n",
1645				notifier, notifier_error, gen_ctl, status,
1646				cpb_count, next_cpb_idx);
1647
1648			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1649				struct nv_adma_cpb *cpb = &pp->cpb[i];
1650				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1651				    ap->link.sactive & (1 << i))
1652					ata_port_err(ap,
1653						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1654						i, cpb->ctl_flags, cpb->resp_flags);
1655			}
1656		}
1657
1658		/* Push us back into port register mode for error handling. */
1659		nv_adma_register_mode(ap);
1660
1661		/* Mark all of the CPBs as invalid to prevent them from
1662		   being executed */
1663		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1664			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1665
1666		/* clear CPB fetch count */
1667		writew(0, mmio + NV_ADMA_CPB_COUNT);
1668
1669		/* Reset channel */
1670		tmp = readw(mmio + NV_ADMA_CTL);
1671		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1672		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1673		udelay(1);
1674		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1675		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1676	}
1677
1678	ata_bmdma_error_handler(ap);
1679}
1680
1681static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1682{
1683	struct nv_swncq_port_priv *pp = ap->private_data;
1684	struct defer_queue *dq = &pp->defer_queue;
1685
1686	/* queue is full */
1687	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1688	dq->defer_bits |= (1 << qc->hw_tag);
1689	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1690}
1691
1692static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1693{
1694	struct nv_swncq_port_priv *pp = ap->private_data;
1695	struct defer_queue *dq = &pp->defer_queue;
1696	unsigned int tag;
1697
1698	if (dq->head == dq->tail)	/* null queue */
1699		return NULL;
1700
1701	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1702	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1703	WARN_ON(!(dq->defer_bits & (1 << tag)));
1704	dq->defer_bits &= ~(1 << tag);
1705
1706	return ata_qc_from_tag(ap, tag);
1707}
1708
1709static void nv_swncq_fis_reinit(struct ata_port *ap)
1710{
1711	struct nv_swncq_port_priv *pp = ap->private_data;
1712
1713	pp->dhfis_bits = 0;
1714	pp->dmafis_bits = 0;
1715	pp->sdbfis_bits = 0;
1716	pp->ncq_flags = 0;
1717}
1718
1719static void nv_swncq_pp_reinit(struct ata_port *ap)
1720{
1721	struct nv_swncq_port_priv *pp = ap->private_data;
1722	struct defer_queue *dq = &pp->defer_queue;
1723
1724	dq->head = 0;
1725	dq->tail = 0;
1726	dq->defer_bits = 0;
1727	pp->qc_active = 0;
1728	pp->last_issue_tag = ATA_TAG_POISON;
1729	nv_swncq_fis_reinit(ap);
1730}
1731
1732static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1733{
1734	struct nv_swncq_port_priv *pp = ap->private_data;
1735
1736	writew(fis, pp->irq_block);
1737}
1738
1739static void __ata_bmdma_stop(struct ata_port *ap)
1740{
1741	struct ata_queued_cmd qc;
1742
1743	qc.ap = ap;
1744	ata_bmdma_stop(&qc);
1745}
1746
1747static void nv_swncq_ncq_stop(struct ata_port *ap)
1748{
1749	struct nv_swncq_port_priv *pp = ap->private_data;
1750	unsigned int i;
1751	u32 sactive;
1752	u32 done_mask;
1753
1754	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1755		     ap->qc_active, ap->link.sactive);
1756	ata_port_err(ap,
1757		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1758		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1759		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1760		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1761
1762	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1763		     ap->ops->sff_check_status(ap),
1764		     ioread8(ap->ioaddr.error_addr));
1765
1766	sactive = readl(pp->sactive_block);
1767	done_mask = pp->qc_active ^ sactive;
1768
1769	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1770	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1771		u8 err = 0;
1772		if (pp->qc_active & (1 << i))
1773			err = 0;
1774		else if (done_mask & (1 << i))
1775			err = 1;
1776		else
1777			continue;
1778
1779		ata_port_err(ap,
1780			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1781			     (pp->dhfis_bits >> i) & 0x1,
1782			     (pp->dmafis_bits >> i) & 0x1,
1783			     (pp->sdbfis_bits >> i) & 0x1,
1784			     (sactive >> i) & 0x1,
1785			     (err ? "error! tag doesn't exit" : " "));
1786	}
1787
1788	nv_swncq_pp_reinit(ap);
1789	ap->ops->sff_irq_clear(ap);
1790	__ata_bmdma_stop(ap);
1791	nv_swncq_irq_clear(ap, 0xffff);
1792}
1793
1794static void nv_swncq_error_handler(struct ata_port *ap)
1795{
1796	struct ata_eh_context *ehc = &ap->link.eh_context;
1797
1798	if (ap->link.sactive) {
1799		nv_swncq_ncq_stop(ap);
1800		ehc->i.action |= ATA_EH_RESET;
1801	}
1802
1803	ata_bmdma_error_handler(ap);
1804}
1805
1806#ifdef CONFIG_PM
1807static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1808{
1809	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1810	u32 tmp;
1811
1812	/* clear irq */
1813	writel(~0, mmio + NV_INT_STATUS_MCP55);
1814
1815	/* disable irq */
1816	writel(0, mmio + NV_INT_ENABLE_MCP55);
1817
1818	/* disable swncq */
1819	tmp = readl(mmio + NV_CTL_MCP55);
1820	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1821	writel(tmp, mmio + NV_CTL_MCP55);
1822
1823	return 0;
1824}
1825
1826static int nv_swncq_port_resume(struct ata_port *ap)
1827{
1828	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1829	u32 tmp;
1830
1831	/* clear irq */
1832	writel(~0, mmio + NV_INT_STATUS_MCP55);
1833
1834	/* enable irq */
1835	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1836
1837	/* enable swncq */
1838	tmp = readl(mmio + NV_CTL_MCP55);
1839	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1840
1841	return 0;
1842}
1843#endif
1844
1845static void nv_swncq_host_init(struct ata_host *host)
1846{
1847	u32 tmp;
1848	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1849	struct pci_dev *pdev = to_pci_dev(host->dev);
1850	u8 regval;
1851
1852	/* disable  ECO 398 */
1853	pci_read_config_byte(pdev, 0x7f, &regval);
1854	regval &= ~(1 << 7);
1855	pci_write_config_byte(pdev, 0x7f, regval);
1856
1857	/* enable swncq */
1858	tmp = readl(mmio + NV_CTL_MCP55);
1859	dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp);
1860	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1861
1862	/* enable irq intr */
1863	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1864	dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp);
1865	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1866
1867	/*  clear port irq */
1868	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1869}
1870
1871static int nv_swncq_slave_config(struct scsi_device *sdev)
1872{
1873	struct ata_port *ap = ata_shost_to_port(sdev->host);
1874	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1875	struct ata_device *dev;
1876	int rc;
1877	u8 rev;
1878	u8 check_maxtor = 0;
1879	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1880
1881	rc = ata_scsi_slave_config(sdev);
1882	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1883		/* Not a proper libata device, ignore */
1884		return rc;
1885
1886	dev = &ap->link.device[sdev->id];
1887	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1888		return rc;
1889
1890	/* if MCP51 and Maxtor, then disable ncq */
1891	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1892		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1893		check_maxtor = 1;
1894
1895	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1896	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1897		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1898		pci_read_config_byte(pdev, 0x8, &rev);
1899		if (rev <= 0xa2)
1900			check_maxtor = 1;
1901	}
1902
1903	if (!check_maxtor)
1904		return rc;
1905
1906	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1907
1908	if (strncmp(model_num, "Maxtor", 6) == 0) {
1909		ata_scsi_change_queue_depth(sdev, 1);
1910		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1911			       sdev->queue_depth);
1912	}
1913
1914	return rc;
1915}
1916
1917static int nv_swncq_port_start(struct ata_port *ap)
1918{
1919	struct device *dev = ap->host->dev;
1920	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1921	struct nv_swncq_port_priv *pp;
1922	int rc;
1923
1924	/* we might fallback to bmdma, allocate bmdma resources */
1925	rc = ata_bmdma_port_start(ap);
1926	if (rc)
1927		return rc;
1928
1929	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1930	if (!pp)
1931		return -ENOMEM;
1932
1933	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1934				      &pp->prd_dma, GFP_KERNEL);
1935	if (!pp->prd)
1936		return -ENOMEM;
 
1937
1938	ap->private_data = pp;
1939	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1940	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1941	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1942
1943	return 0;
1944}
1945
1946static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1947{
1948	if (qc->tf.protocol != ATA_PROT_NCQ) {
1949		ata_bmdma_qc_prep(qc);
1950		return AC_ERR_OK;
1951	}
1952
1953	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1954		return AC_ERR_OK;
1955
1956	nv_swncq_fill_sg(qc);
1957
1958	return AC_ERR_OK;
1959}
1960
1961static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1962{
1963	struct ata_port *ap = qc->ap;
1964	struct scatterlist *sg;
1965	struct nv_swncq_port_priv *pp = ap->private_data;
1966	struct ata_bmdma_prd *prd;
1967	unsigned int si, idx;
1968
1969	prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1970
1971	idx = 0;
1972	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1973		u32 addr, offset;
1974		u32 sg_len, len;
1975
1976		addr = (u32)sg_dma_address(sg);
1977		sg_len = sg_dma_len(sg);
1978
1979		while (sg_len) {
1980			offset = addr & 0xffff;
1981			len = sg_len;
1982			if ((offset + sg_len) > 0x10000)
1983				len = 0x10000 - offset;
1984
1985			prd[idx].addr = cpu_to_le32(addr);
1986			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1987
1988			idx++;
1989			sg_len -= len;
1990			addr += len;
1991		}
1992	}
1993
1994	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1995}
1996
1997static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1998					  struct ata_queued_cmd *qc)
1999{
2000	struct nv_swncq_port_priv *pp = ap->private_data;
2001
2002	if (qc == NULL)
2003		return 0;
2004
2005	writel((1 << qc->hw_tag), pp->sactive_block);
2006	pp->last_issue_tag = qc->hw_tag;
2007	pp->dhfis_bits &= ~(1 << qc->hw_tag);
2008	pp->dmafis_bits &= ~(1 << qc->hw_tag);
2009	pp->qc_active |= (0x1 << qc->hw_tag);
 
 
2010
2011	trace_ata_tf_load(ap, &qc->tf);
2012	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2013	trace_ata_exec_command(ap, &qc->tf, qc->hw_tag);
2014	ap->ops->sff_exec_command(ap, &qc->tf);
2015
 
 
2016	return 0;
2017}
2018
2019static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2020{
2021	struct ata_port *ap = qc->ap;
2022	struct nv_swncq_port_priv *pp = ap->private_data;
2023
2024	if (qc->tf.protocol != ATA_PROT_NCQ)
2025		return ata_bmdma_qc_issue(qc);
2026
 
 
2027	if (!pp->qc_active)
2028		nv_swncq_issue_atacmd(ap, qc);
2029	else
2030		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2031
2032	return 0;
2033}
2034
2035static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2036{
2037	u32 serror;
2038	struct ata_eh_info *ehi = &ap->link.eh_info;
2039
2040	ata_ehi_clear_desc(ehi);
2041
2042	/* AHCI needs SError cleared; otherwise, it might lock up */
2043	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2044	sata_scr_write(&ap->link, SCR_ERROR, serror);
2045
2046	/* analyze @irq_stat */
2047	if (fis & NV_SWNCQ_IRQ_ADDED)
2048		ata_ehi_push_desc(ehi, "hot plug");
2049	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2050		ata_ehi_push_desc(ehi, "hot unplug");
2051
2052	ata_ehi_hotplugged(ehi);
2053
2054	/* okay, let's hand over to EH */
2055	ehi->serror |= serror;
2056
2057	ata_port_freeze(ap);
2058}
2059
2060static int nv_swncq_sdbfis(struct ata_port *ap)
2061{
2062	struct ata_queued_cmd *qc;
2063	struct nv_swncq_port_priv *pp = ap->private_data;
2064	struct ata_eh_info *ehi = &ap->link.eh_info;
2065	u32 sactive;
2066	u32 done_mask;
2067	u8 host_stat;
2068	u8 lack_dhfis = 0;
2069
2070	host_stat = ap->ops->bmdma_status(ap);
2071	trace_ata_bmdma_status(ap, host_stat);
2072	if (unlikely(host_stat & ATA_DMA_ERR)) {
2073		/* error when transferring data to/from memory */
2074		ata_ehi_clear_desc(ehi);
2075		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2076		ehi->err_mask |= AC_ERR_HOST_BUS;
2077		ehi->action |= ATA_EH_RESET;
2078		return -EINVAL;
2079	}
2080
2081	ap->ops->sff_irq_clear(ap);
2082	__ata_bmdma_stop(ap);
2083
2084	sactive = readl(pp->sactive_block);
2085	done_mask = pp->qc_active ^ sactive;
2086
2087	pp->qc_active &= ~done_mask;
2088	pp->dhfis_bits &= ~done_mask;
2089	pp->dmafis_bits &= ~done_mask;
2090	pp->sdbfis_bits |= done_mask;
2091	ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2092
2093	if (!ap->qc_active) {
2094		ata_port_dbg(ap, "over\n");
2095		nv_swncq_pp_reinit(ap);
2096		return 0;
2097	}
2098
2099	if (pp->qc_active & pp->dhfis_bits)
2100		return 0;
2101
2102	if ((pp->ncq_flags & ncq_saw_backout) ||
2103	    (pp->qc_active ^ pp->dhfis_bits))
2104		/* if the controller can't get a device to host register FIS,
2105		 * The driver needs to reissue the new command.
2106		 */
2107		lack_dhfis = 1;
2108
2109	ata_port_dbg(ap, "QC: qc_active 0x%llx,"
2110		     "SWNCQ:qc_active 0x%X defer_bits %X "
2111		     "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2112		     ap->qc_active, pp->qc_active,
2113		     pp->defer_queue.defer_bits, pp->dhfis_bits,
2114		     pp->dmafis_bits, pp->last_issue_tag);
2115
2116	nv_swncq_fis_reinit(ap);
2117
2118	if (lack_dhfis) {
2119		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2120		nv_swncq_issue_atacmd(ap, qc);
2121		return 0;
2122	}
2123
2124	if (pp->defer_queue.defer_bits) {
2125		/* send deferral queue command */
2126		qc = nv_swncq_qc_from_dq(ap);
2127		WARN_ON(qc == NULL);
2128		nv_swncq_issue_atacmd(ap, qc);
2129	}
2130
2131	return 0;
2132}
2133
2134static inline u32 nv_swncq_tag(struct ata_port *ap)
2135{
2136	struct nv_swncq_port_priv *pp = ap->private_data;
2137	u32 tag;
2138
2139	tag = readb(pp->tag_block) >> 2;
2140	return (tag & 0x1f);
2141}
2142
2143static void nv_swncq_dmafis(struct ata_port *ap)
2144{
2145	struct ata_queued_cmd *qc;
2146	unsigned int rw;
2147	u8 dmactl;
2148	u32 tag;
2149	struct nv_swncq_port_priv *pp = ap->private_data;
2150
2151	__ata_bmdma_stop(ap);
2152	tag = nv_swncq_tag(ap);
2153
2154	ata_port_dbg(ap, "dma setup tag 0x%x\n", tag);
2155	qc = ata_qc_from_tag(ap, tag);
2156
2157	if (unlikely(!qc))
2158		return;
2159
2160	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2161
2162	/* load PRD table addr. */
2163	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2164		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2165
2166	/* specify data direction, triple-check start bit is clear */
2167	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2168	dmactl &= ~ATA_DMA_WR;
2169	if (!rw)
2170		dmactl |= ATA_DMA_WR;
2171
2172	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2173}
2174
2175static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2176{
2177	struct nv_swncq_port_priv *pp = ap->private_data;
2178	struct ata_queued_cmd *qc;
2179	struct ata_eh_info *ehi = &ap->link.eh_info;
2180	u32 serror;
2181	u8 ata_stat;
2182
2183	ata_stat = ap->ops->sff_check_status(ap);
2184	nv_swncq_irq_clear(ap, fis);
2185	if (!fis)
2186		return;
2187
2188	if (ata_port_is_frozen(ap))
2189		return;
2190
2191	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2192		nv_swncq_hotplug(ap, fis);
2193		return;
2194	}
2195
2196	if (!pp->qc_active)
2197		return;
2198
2199	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2200		return;
2201	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2202
2203	if (ata_stat & ATA_ERR) {
2204		ata_ehi_clear_desc(ehi);
2205		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2206		ehi->err_mask |= AC_ERR_DEV;
2207		ehi->serror |= serror;
2208		ehi->action |= ATA_EH_RESET;
2209		ata_port_freeze(ap);
2210		return;
2211	}
2212
2213	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2214		/* If the IRQ is backout, driver must issue
2215		 * the new command again some time later.
2216		 */
2217		pp->ncq_flags |= ncq_saw_backout;
2218	}
2219
2220	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2221		pp->ncq_flags |= ncq_saw_sdb;
2222		ata_port_dbg(ap, "SWNCQ: qc_active 0x%X "
2223			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2224			pp->qc_active, pp->dhfis_bits,
2225			pp->dmafis_bits, readl(pp->sactive_block));
2226		if (nv_swncq_sdbfis(ap) < 0)
2227			goto irq_error;
2228	}
2229
2230	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2231		/* The interrupt indicates the new command
2232		 * was transmitted correctly to the drive.
2233		 */
2234		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2235		pp->ncq_flags |= ncq_saw_d2h;
2236		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2237			ata_ehi_push_desc(ehi, "illegal fis transaction");
2238			ehi->err_mask |= AC_ERR_HSM;
2239			ehi->action |= ATA_EH_RESET;
2240			goto irq_error;
2241		}
2242
2243		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2244		    !(pp->ncq_flags & ncq_saw_dmas)) {
2245			ata_stat = ap->ops->sff_check_status(ap);
2246			if (ata_stat & ATA_BUSY)
2247				goto irq_exit;
2248
2249			if (pp->defer_queue.defer_bits) {
2250				ata_port_dbg(ap, "send next command\n");
2251				qc = nv_swncq_qc_from_dq(ap);
2252				nv_swncq_issue_atacmd(ap, qc);
2253			}
2254		}
2255	}
2256
2257	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2258		/* program the dma controller with appropriate PRD buffers
2259		 * and start the DMA transfer for requested command.
2260		 */
2261		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2262		pp->ncq_flags |= ncq_saw_dmas;
2263		nv_swncq_dmafis(ap);
2264	}
2265
2266irq_exit:
2267	return;
2268irq_error:
2269	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2270	ata_port_freeze(ap);
2271	return;
2272}
2273
2274static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2275{
2276	struct ata_host *host = dev_instance;
2277	unsigned int i;
2278	unsigned int handled = 0;
2279	unsigned long flags;
2280	u32 irq_stat;
2281
2282	spin_lock_irqsave(&host->lock, flags);
2283
2284	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2285
2286	for (i = 0; i < host->n_ports; i++) {
2287		struct ata_port *ap = host->ports[i];
2288
2289		if (ap->link.sactive) {
2290			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2291			handled = 1;
2292		} else {
2293			if (irq_stat)	/* reserve Hotplug */
2294				nv_swncq_irq_clear(ap, 0xfff0);
2295
2296			handled += nv_host_intr(ap, (u8)irq_stat);
2297		}
2298		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2299	}
2300
2301	spin_unlock_irqrestore(&host->lock, flags);
2302
2303	return IRQ_RETVAL(handled);
2304}
2305
2306static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2307{
2308	const struct ata_port_info *ppi[] = { NULL, NULL };
2309	struct nv_pi_priv *ipriv;
2310	struct ata_host *host;
2311	struct nv_host_priv *hpriv;
2312	int rc;
2313	u32 bar;
2314	void __iomem *base;
2315	unsigned long type = ent->driver_data;
2316
2317        // Make sure this is a SATA controller by counting the number of bars
2318        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2319        // it's an IDE controller and we ignore it.
2320	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
2321		if (pci_resource_start(pdev, bar) == 0)
2322			return -ENODEV;
2323
2324	ata_print_version_once(&pdev->dev, DRV_VERSION);
2325
2326	rc = pcim_enable_device(pdev);
2327	if (rc)
2328		return rc;
2329
2330	/* determine type and allocate host */
2331	if (type == CK804 && adma_enabled) {
2332		dev_notice(&pdev->dev, "Using ADMA mode\n");
2333		type = ADMA;
2334	} else if (type == MCP5x && swncq_enabled) {
2335		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2336		type = SWNCQ;
2337	}
2338
2339	ppi[0] = &nv_port_info[type];
2340	ipriv = ppi[0]->private_data;
2341	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2342	if (rc)
2343		return rc;
2344
2345	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2346	if (!hpriv)
2347		return -ENOMEM;
2348	hpriv->type = type;
2349	host->private_data = hpriv;
2350
2351	/* request and iomap NV_MMIO_BAR */
2352	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2353	if (rc)
2354		return rc;
2355
2356	/* configure SCR access */
2357	base = host->iomap[NV_MMIO_BAR];
2358	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2359	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2360
2361	/* enable SATA space for CK804 */
2362	if (type >= CK804) {
2363		u8 regval;
2364
2365		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2366		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2367		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2368	}
2369
2370	/* init ADMA */
2371	if (type == ADMA) {
2372		rc = nv_adma_host_init(host);
2373		if (rc)
2374			return rc;
2375	} else if (type == SWNCQ)
2376		nv_swncq_host_init(host);
2377
2378	if (msi_enabled) {
2379		dev_notice(&pdev->dev, "Using MSI\n");
2380		pci_enable_msi(pdev);
2381	}
2382
2383	pci_set_master(pdev);
2384	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2385}
2386
2387#ifdef CONFIG_PM_SLEEP
2388static int nv_pci_device_resume(struct pci_dev *pdev)
2389{
2390	struct ata_host *host = pci_get_drvdata(pdev);
2391	struct nv_host_priv *hpriv = host->private_data;
2392	int rc;
2393
2394	rc = ata_pci_device_do_resume(pdev);
2395	if (rc)
2396		return rc;
2397
2398	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2399		if (hpriv->type >= CK804) {
2400			u8 regval;
2401
2402			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2403			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2404			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2405		}
2406		if (hpriv->type == ADMA) {
2407			u32 tmp32;
2408			struct nv_adma_port_priv *pp;
2409			/* enable/disable ADMA on the ports appropriately */
2410			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2411
2412			pp = host->ports[0]->private_data;
2413			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2414				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2415					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2416			else
2417				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2418					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2419			pp = host->ports[1]->private_data;
2420			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2421				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2422					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2423			else
2424				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2425					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2426
2427			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2428		}
2429	}
2430
2431	ata_host_resume(host);
2432
2433	return 0;
2434}
2435#endif
2436
2437static void nv_ck804_host_stop(struct ata_host *host)
2438{
2439	struct pci_dev *pdev = to_pci_dev(host->dev);
2440	u8 regval;
2441
2442	/* disable SATA space for CK804 */
2443	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2444	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2445	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2446}
2447
2448static void nv_adma_host_stop(struct ata_host *host)
2449{
2450	struct pci_dev *pdev = to_pci_dev(host->dev);
2451	u32 tmp32;
2452
2453	/* disable ADMA on the ports */
2454	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2455	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2456		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2457		   NV_MCP_SATA_CFG_20_PORT1_EN |
2458		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2459
2460	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2461
2462	nv_ck804_host_stop(host);
2463}
2464
2465module_pci_driver(nv_pci_driver);
2466
2467module_param_named(adma, adma_enabled, bool, 0444);
2468MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2469module_param_named(swncq, swncq_enabled, bool, 0444);
2470MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2471module_param_named(msi, msi_enabled, bool, 0444);
2472MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
v4.6
 
   1/*
   2 *  sata_nv.c - NVIDIA nForce SATA
   3 *
   4 *  Copyright 2004 NVIDIA Corp.  All rights reserved.
   5 *  Copyright 2004 Andrew Chew
   6 *
   7 *
   8 *  This program is free software; you can redistribute it and/or modify
   9 *  it under the terms of the GNU General Public License as published by
  10 *  the Free Software Foundation; either version 2, or (at your option)
  11 *  any later version.
  12 *
  13 *  This program is distributed in the hope that it will be useful,
  14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *  GNU General Public License for more details.
  17 *
  18 *  You should have received a copy of the GNU General Public License
  19 *  along with this program; see the file COPYING.  If not, write to
  20 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  21 *
  22 *
  23 *  libata documentation is available via 'make {ps|pdf}docs',
  24 *  as Documentation/DocBook/libata.*
  25 *
  26 *  No hardware documentation available outside of NVIDIA.
  27 *  This driver programs the NVIDIA SATA controller in a similar
  28 *  fashion as with other PCI IDE BMDMA controllers, with a few
  29 *  NV-specific details such as register offsets, SATA phy location,
  30 *  hotplug info, etc.
  31 *
  32 *  CK804/MCP04 controllers support an alternate programming interface
  33 *  similar to the ADMA specification (with some modifications).
  34 *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
  35 *  sent through the legacy interface.
  36 *
  37 */
  38
  39#include <linux/kernel.h>
  40#include <linux/module.h>
  41#include <linux/gfp.h>
  42#include <linux/pci.h>
  43#include <linux/blkdev.h>
  44#include <linux/delay.h>
  45#include <linux/interrupt.h>
  46#include <linux/device.h>
  47#include <scsi/scsi_host.h>
  48#include <scsi/scsi_device.h>
  49#include <linux/libata.h>
 
  50
  51#define DRV_NAME			"sata_nv"
  52#define DRV_VERSION			"3.5"
  53
  54#define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
  55
  56enum {
  57	NV_MMIO_BAR			= 5,
  58
  59	NV_PORTS			= 2,
  60	NV_PIO_MASK			= ATA_PIO4,
  61	NV_MWDMA_MASK			= ATA_MWDMA2,
  62	NV_UDMA_MASK			= ATA_UDMA6,
  63	NV_PORT0_SCR_REG_OFFSET		= 0x00,
  64	NV_PORT1_SCR_REG_OFFSET		= 0x40,
  65
  66	/* INT_STATUS/ENABLE */
  67	NV_INT_STATUS			= 0x10,
  68	NV_INT_ENABLE			= 0x11,
  69	NV_INT_STATUS_CK804		= 0x440,
  70	NV_INT_ENABLE_CK804		= 0x441,
  71
  72	/* INT_STATUS/ENABLE bits */
  73	NV_INT_DEV			= 0x01,
  74	NV_INT_PM			= 0x02,
  75	NV_INT_ADDED			= 0x04,
  76	NV_INT_REMOVED			= 0x08,
  77
  78	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
  79
  80	NV_INT_ALL			= 0x0f,
  81	NV_INT_MASK			= NV_INT_DEV |
  82					  NV_INT_ADDED | NV_INT_REMOVED,
  83
  84	/* INT_CONFIG */
  85	NV_INT_CONFIG			= 0x12,
  86	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
  87
  88	// For PCI config register 20
  89	NV_MCP_SATA_CFG_20		= 0x50,
  90	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
  91	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
  92	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
  93	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
  94	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
  95
  96	NV_ADMA_MAX_CPBS		= 32,
  97	NV_ADMA_CPB_SZ			= 128,
  98	NV_ADMA_APRD_SZ			= 16,
  99	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
 100					   NV_ADMA_APRD_SZ,
 101	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
 102	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
 103	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
 104					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
 105
 106	/* BAR5 offset to ADMA general registers */
 107	NV_ADMA_GEN			= 0x400,
 108	NV_ADMA_GEN_CTL			= 0x00,
 109	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
 110
 111	/* BAR5 offset to ADMA ports */
 112	NV_ADMA_PORT			= 0x480,
 113
 114	/* size of ADMA port register space  */
 115	NV_ADMA_PORT_SIZE		= 0x100,
 116
 117	/* ADMA port registers */
 118	NV_ADMA_CTL			= 0x40,
 119	NV_ADMA_CPB_COUNT		= 0x42,
 120	NV_ADMA_NEXT_CPB_IDX		= 0x43,
 121	NV_ADMA_STAT			= 0x44,
 122	NV_ADMA_CPB_BASE_LOW		= 0x48,
 123	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
 124	NV_ADMA_APPEND			= 0x50,
 125	NV_ADMA_NOTIFIER		= 0x68,
 126	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
 127
 128	/* NV_ADMA_CTL register bits */
 129	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
 130	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
 131	NV_ADMA_CTL_GO			= (1 << 7),
 132	NV_ADMA_CTL_AIEN		= (1 << 8),
 133	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
 134	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
 135
 136	/* CPB response flag bits */
 137	NV_CPB_RESP_DONE		= (1 << 0),
 138	NV_CPB_RESP_ATA_ERR		= (1 << 3),
 139	NV_CPB_RESP_CMD_ERR		= (1 << 4),
 140	NV_CPB_RESP_CPB_ERR		= (1 << 7),
 141
 142	/* CPB control flag bits */
 143	NV_CPB_CTL_CPB_VALID		= (1 << 0),
 144	NV_CPB_CTL_QUEUE		= (1 << 1),
 145	NV_CPB_CTL_APRD_VALID		= (1 << 2),
 146	NV_CPB_CTL_IEN			= (1 << 3),
 147	NV_CPB_CTL_FPDMA		= (1 << 4),
 148
 149	/* APRD flags */
 150	NV_APRD_WRITE			= (1 << 1),
 151	NV_APRD_END			= (1 << 2),
 152	NV_APRD_CONT			= (1 << 3),
 153
 154	/* NV_ADMA_STAT flags */
 155	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
 156	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
 157	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
 158	NV_ADMA_STAT_CPBERR		= (1 << 4),
 159	NV_ADMA_STAT_SERROR		= (1 << 5),
 160	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
 161	NV_ADMA_STAT_IDLE		= (1 << 8),
 162	NV_ADMA_STAT_LEGACY		= (1 << 9),
 163	NV_ADMA_STAT_STOPPED		= (1 << 10),
 164	NV_ADMA_STAT_DONE		= (1 << 12),
 165	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
 166					  NV_ADMA_STAT_TIMEOUT,
 167
 168	/* port flags */
 169	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
 170	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
 171
 172	/* MCP55 reg offset */
 173	NV_CTL_MCP55			= 0x400,
 174	NV_INT_STATUS_MCP55		= 0x440,
 175	NV_INT_ENABLE_MCP55		= 0x444,
 176	NV_NCQ_REG_MCP55		= 0x448,
 177
 178	/* MCP55 */
 179	NV_INT_ALL_MCP55		= 0xffff,
 180	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
 181	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
 182
 183	/* SWNCQ ENABLE BITS*/
 184	NV_CTL_PRI_SWNCQ		= 0x02,
 185	NV_CTL_SEC_SWNCQ		= 0x04,
 186
 187	/* SW NCQ status bits*/
 188	NV_SWNCQ_IRQ_DEV		= (1 << 0),
 189	NV_SWNCQ_IRQ_PM			= (1 << 1),
 190	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
 191	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
 192
 193	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
 194	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
 195	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
 196	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
 197
 198	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
 199					  NV_SWNCQ_IRQ_REMOVED,
 200
 201};
 202
 203/* ADMA Physical Region Descriptor - one SG segment */
 204struct nv_adma_prd {
 205	__le64			addr;
 206	__le32			len;
 207	u8			flags;
 208	u8			packet_len;
 209	__le16			reserved;
 210};
 211
 212enum nv_adma_regbits {
 213	CMDEND	= (1 << 15),		/* end of command list */
 214	WNB	= (1 << 14),		/* wait-not-BSY */
 215	IGN	= (1 << 13),		/* ignore this entry */
 216	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
 217	DA2	= (1 << (2 + 8)),
 218	DA1	= (1 << (1 + 8)),
 219	DA0	= (1 << (0 + 8)),
 220};
 221
 222/* ADMA Command Parameter Block
 223   The first 5 SG segments are stored inside the Command Parameter Block itself.
 224   If there are more than 5 segments the remainder are stored in a separate
 225   memory area indicated by next_aprd. */
 226struct nv_adma_cpb {
 227	u8			resp_flags;    /* 0 */
 228	u8			reserved1;     /* 1 */
 229	u8			ctl_flags;     /* 2 */
 230	/* len is length of taskfile in 64 bit words */
 231	u8			len;		/* 3  */
 232	u8			tag;           /* 4 */
 233	u8			next_cpb_idx;  /* 5 */
 234	__le16			reserved2;     /* 6-7 */
 235	__le16			tf[12];        /* 8-31 */
 236	struct nv_adma_prd	aprd[5];       /* 32-111 */
 237	__le64			next_aprd;     /* 112-119 */
 238	__le64			reserved3;     /* 120-127 */
 239};
 240
 241
 242struct nv_adma_port_priv {
 243	struct nv_adma_cpb	*cpb;
 244	dma_addr_t		cpb_dma;
 245	struct nv_adma_prd	*aprd;
 246	dma_addr_t		aprd_dma;
 247	void __iomem		*ctl_block;
 248	void __iomem		*gen_block;
 249	void __iomem		*notifier_clear_block;
 250	u64			adma_dma_mask;
 251	u8			flags;
 252	int			last_issue_ncq;
 253};
 254
 255struct nv_host_priv {
 256	unsigned long		type;
 257};
 258
 259struct defer_queue {
 260	u32		defer_bits;
 261	unsigned int	head;
 262	unsigned int	tail;
 263	unsigned int	tag[ATA_MAX_QUEUE];
 264};
 265
 266enum ncq_saw_flag_list {
 267	ncq_saw_d2h	= (1U << 0),
 268	ncq_saw_dmas	= (1U << 1),
 269	ncq_saw_sdb	= (1U << 2),
 270	ncq_saw_backout	= (1U << 3),
 271};
 272
 273struct nv_swncq_port_priv {
 274	struct ata_bmdma_prd *prd;	 /* our SG list */
 275	dma_addr_t	prd_dma; /* and its DMA mapping */
 276	void __iomem	*sactive_block;
 277	void __iomem	*irq_block;
 278	void __iomem	*tag_block;
 279	u32		qc_active;
 280
 281	unsigned int	last_issue_tag;
 282
 283	/* fifo circular queue to store deferral command */
 284	struct defer_queue defer_queue;
 285
 286	/* for NCQ interrupt analysis */
 287	u32		dhfis_bits;
 288	u32		dmafis_bits;
 289	u32		sdbfis_bits;
 290
 291	unsigned int	ncq_flags;
 292};
 293
 294
 295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
 296
 297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 298#ifdef CONFIG_PM_SLEEP
 299static int nv_pci_device_resume(struct pci_dev *pdev);
 300#endif
 301static void nv_ck804_host_stop(struct ata_host *host);
 302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
 303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
 304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
 305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 307
 308static int nv_hardreset(struct ata_link *link, unsigned int *class,
 309			unsigned long deadline);
 310static void nv_nf2_freeze(struct ata_port *ap);
 311static void nv_nf2_thaw(struct ata_port *ap);
 312static void nv_ck804_freeze(struct ata_port *ap);
 313static void nv_ck804_thaw(struct ata_port *ap);
 314static int nv_adma_slave_config(struct scsi_device *sdev);
 315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
 316static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
 317static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
 318static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
 319static void nv_adma_irq_clear(struct ata_port *ap);
 320static int nv_adma_port_start(struct ata_port *ap);
 321static void nv_adma_port_stop(struct ata_port *ap);
 322#ifdef CONFIG_PM
 323static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
 324static int nv_adma_port_resume(struct ata_port *ap);
 325#endif
 326static void nv_adma_freeze(struct ata_port *ap);
 327static void nv_adma_thaw(struct ata_port *ap);
 328static void nv_adma_error_handler(struct ata_port *ap);
 329static void nv_adma_host_stop(struct ata_host *host);
 330static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
 331static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 332
 333static void nv_mcp55_thaw(struct ata_port *ap);
 334static void nv_mcp55_freeze(struct ata_port *ap);
 335static void nv_swncq_error_handler(struct ata_port *ap);
 336static int nv_swncq_slave_config(struct scsi_device *sdev);
 337static int nv_swncq_port_start(struct ata_port *ap);
 338static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
 339static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
 340static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
 341static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
 342static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
 343#ifdef CONFIG_PM
 344static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
 345static int nv_swncq_port_resume(struct ata_port *ap);
 346#endif
 347
 348enum nv_host_type
 349{
 350	GENERIC,
 351	NFORCE2,
 352	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
 353	CK804,
 354	ADMA,
 355	MCP5x,
 356	SWNCQ,
 357};
 358
 359static const struct pci_device_id nv_pci_tbl[] = {
 360	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
 361	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
 362	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
 363	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
 364	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
 365	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
 366	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
 367	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
 368	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
 369	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
 370	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
 371	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
 372	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
 373	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
 374
 375	{ } /* terminate list */
 376};
 377
 378static struct pci_driver nv_pci_driver = {
 379	.name			= DRV_NAME,
 380	.id_table		= nv_pci_tbl,
 381	.probe			= nv_init_one,
 382#ifdef CONFIG_PM_SLEEP
 383	.suspend		= ata_pci_device_suspend,
 384	.resume			= nv_pci_device_resume,
 385#endif
 386	.remove			= ata_pci_remove_one,
 387};
 388
 389static struct scsi_host_template nv_sht = {
 390	ATA_BMDMA_SHT(DRV_NAME),
 391};
 392
 393static struct scsi_host_template nv_adma_sht = {
 394	ATA_NCQ_SHT(DRV_NAME),
 395	.can_queue		= NV_ADMA_MAX_CPBS,
 396	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
 397	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
 398	.slave_configure	= nv_adma_slave_config,
 
 
 
 399};
 400
 401static struct scsi_host_template nv_swncq_sht = {
 402	ATA_NCQ_SHT(DRV_NAME),
 403	.can_queue		= ATA_MAX_QUEUE,
 404	.sg_tablesize		= LIBATA_MAX_PRD,
 405	.dma_boundary		= ATA_DMA_BOUNDARY,
 406	.slave_configure	= nv_swncq_slave_config,
 
 
 
 407};
 408
 409/*
 410 * NV SATA controllers have various different problems with hardreset
 411 * protocol depending on the specific controller and device.
 412 *
 413 * GENERIC:
 414 *
 415 *  bko11195 reports that link doesn't come online after hardreset on
 416 *  generic nv's and there have been several other similar reports on
 417 *  linux-ide.
 418 *
 419 *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
 420 *  softreset.
 421 *
 422 * NF2/3:
 423 *
 424 *  bko3352 reports nf2/3 controllers can't determine device signature
 425 *  reliably after hardreset.  The following thread reports detection
 426 *  failure on cold boot with the standard debouncing timing.
 427 *
 428 *  http://thread.gmane.org/gmane.linux.ide/34098
 429 *
 430 *  bko12176 reports that hardreset fails to bring up the link during
 431 *  boot on nf2.
 432 *
 433 * CK804:
 434 *
 435 *  For initial probing after boot and hot plugging, hardreset mostly
 436 *  works fine on CK804 but curiously, reprobing on the initial port
 437 *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
 438 *  FIS in somewhat undeterministic way.
 439 *
 440 * SWNCQ:
 441 *
 442 *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
 443 *  hardreset should be used and hardreset can't report proper
 444 *  signature, which suggests that mcp5x is closer to nf2 as long as
 445 *  reset quirkiness is concerned.
 446 *
 447 *  bko12703 reports that boot probing fails for intel SSD with
 448 *  hardreset.  Link fails to come online.  Softreset works fine.
 449 *
 450 * The failures are varied but the following patterns seem true for
 451 * all flavors.
 452 *
 453 * - Softreset during boot always works.
 454 *
 455 * - Hardreset during boot sometimes fails to bring up the link on
 456 *   certain comibnations and device signature acquisition is
 457 *   unreliable.
 458 *
 459 * - Hardreset is often necessary after hotplug.
 460 *
 461 * So, preferring softreset for boot probing and error handling (as
 462 * hardreset might bring down the link) but using hardreset for
 463 * post-boot probing should work around the above issues in most
 464 * cases.  Define nv_hardreset() which only kicks in for post-boot
 465 * probing and use it for all variants.
 466 */
 467static struct ata_port_operations nv_generic_ops = {
 468	.inherits		= &ata_bmdma_port_ops,
 469	.lost_interrupt		= ATA_OP_NULL,
 470	.scr_read		= nv_scr_read,
 471	.scr_write		= nv_scr_write,
 472	.hardreset		= nv_hardreset,
 473};
 474
 475static struct ata_port_operations nv_nf2_ops = {
 476	.inherits		= &nv_generic_ops,
 477	.freeze			= nv_nf2_freeze,
 478	.thaw			= nv_nf2_thaw,
 479};
 480
 481static struct ata_port_operations nv_ck804_ops = {
 482	.inherits		= &nv_generic_ops,
 483	.freeze			= nv_ck804_freeze,
 484	.thaw			= nv_ck804_thaw,
 485	.host_stop		= nv_ck804_host_stop,
 486};
 487
 488static struct ata_port_operations nv_adma_ops = {
 489	.inherits		= &nv_ck804_ops,
 490
 491	.check_atapi_dma	= nv_adma_check_atapi_dma,
 492	.sff_tf_read		= nv_adma_tf_read,
 493	.qc_defer		= ata_std_qc_defer,
 494	.qc_prep		= nv_adma_qc_prep,
 495	.qc_issue		= nv_adma_qc_issue,
 496	.sff_irq_clear		= nv_adma_irq_clear,
 497
 498	.freeze			= nv_adma_freeze,
 499	.thaw			= nv_adma_thaw,
 500	.error_handler		= nv_adma_error_handler,
 501	.post_internal_cmd	= nv_adma_post_internal_cmd,
 502
 503	.port_start		= nv_adma_port_start,
 504	.port_stop		= nv_adma_port_stop,
 505#ifdef CONFIG_PM
 506	.port_suspend		= nv_adma_port_suspend,
 507	.port_resume		= nv_adma_port_resume,
 508#endif
 509	.host_stop		= nv_adma_host_stop,
 510};
 511
 512static struct ata_port_operations nv_swncq_ops = {
 513	.inherits		= &nv_generic_ops,
 514
 515	.qc_defer		= ata_std_qc_defer,
 516	.qc_prep		= nv_swncq_qc_prep,
 517	.qc_issue		= nv_swncq_qc_issue,
 518
 519	.freeze			= nv_mcp55_freeze,
 520	.thaw			= nv_mcp55_thaw,
 521	.error_handler		= nv_swncq_error_handler,
 522
 523#ifdef CONFIG_PM
 524	.port_suspend		= nv_swncq_port_suspend,
 525	.port_resume		= nv_swncq_port_resume,
 526#endif
 527	.port_start		= nv_swncq_port_start,
 528};
 529
 530struct nv_pi_priv {
 531	irq_handler_t			irq_handler;
 532	struct scsi_host_template	*sht;
 533};
 534
 535#define NV_PI_PRIV(_irq_handler, _sht) \
 536	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
 537
 538static const struct ata_port_info nv_port_info[] = {
 539	/* generic */
 540	{
 541		.flags		= ATA_FLAG_SATA,
 542		.pio_mask	= NV_PIO_MASK,
 543		.mwdma_mask	= NV_MWDMA_MASK,
 544		.udma_mask	= NV_UDMA_MASK,
 545		.port_ops	= &nv_generic_ops,
 546		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 547	},
 548	/* nforce2/3 */
 549	{
 550		.flags		= ATA_FLAG_SATA,
 551		.pio_mask	= NV_PIO_MASK,
 552		.mwdma_mask	= NV_MWDMA_MASK,
 553		.udma_mask	= NV_UDMA_MASK,
 554		.port_ops	= &nv_nf2_ops,
 555		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
 556	},
 557	/* ck804 */
 558	{
 559		.flags		= ATA_FLAG_SATA,
 560		.pio_mask	= NV_PIO_MASK,
 561		.mwdma_mask	= NV_MWDMA_MASK,
 562		.udma_mask	= NV_UDMA_MASK,
 563		.port_ops	= &nv_ck804_ops,
 564		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
 565	},
 566	/* ADMA */
 567	{
 568		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
 569		.pio_mask	= NV_PIO_MASK,
 570		.mwdma_mask	= NV_MWDMA_MASK,
 571		.udma_mask	= NV_UDMA_MASK,
 572		.port_ops	= &nv_adma_ops,
 573		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
 574	},
 575	/* MCP5x */
 576	{
 577		.flags		= ATA_FLAG_SATA,
 578		.pio_mask	= NV_PIO_MASK,
 579		.mwdma_mask	= NV_MWDMA_MASK,
 580		.udma_mask	= NV_UDMA_MASK,
 581		.port_ops	= &nv_generic_ops,
 582		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 583	},
 584	/* SWNCQ */
 585	{
 586		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
 587		.pio_mask	= NV_PIO_MASK,
 588		.mwdma_mask	= NV_MWDMA_MASK,
 589		.udma_mask	= NV_UDMA_MASK,
 590		.port_ops	= &nv_swncq_ops,
 591		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
 592	},
 593};
 594
 595MODULE_AUTHOR("NVIDIA");
 596MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
 597MODULE_LICENSE("GPL");
 598MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 599MODULE_VERSION(DRV_VERSION);
 600
 601static bool adma_enabled;
 602static bool swncq_enabled = true;
 603static bool msi_enabled;
 604
 605static void nv_adma_register_mode(struct ata_port *ap)
 606{
 607	struct nv_adma_port_priv *pp = ap->private_data;
 608	void __iomem *mmio = pp->ctl_block;
 609	u16 tmp, status;
 610	int count = 0;
 611
 612	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
 613		return;
 614
 615	status = readw(mmio + NV_ADMA_STAT);
 616	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
 617		ndelay(50);
 618		status = readw(mmio + NV_ADMA_STAT);
 619		count++;
 620	}
 621	if (count == 20)
 622		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
 623			      status);
 624
 625	tmp = readw(mmio + NV_ADMA_CTL);
 626	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 627
 628	count = 0;
 629	status = readw(mmio + NV_ADMA_STAT);
 630	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
 631		ndelay(50);
 632		status = readw(mmio + NV_ADMA_STAT);
 633		count++;
 634	}
 635	if (count == 20)
 636		ata_port_warn(ap,
 637			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
 638			      status);
 639
 640	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
 641}
 642
 643static void nv_adma_mode(struct ata_port *ap)
 644{
 645	struct nv_adma_port_priv *pp = ap->private_data;
 646	void __iomem *mmio = pp->ctl_block;
 647	u16 tmp, status;
 648	int count = 0;
 649
 650	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
 651		return;
 652
 653	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 654
 655	tmp = readw(mmio + NV_ADMA_CTL);
 656	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 657
 658	status = readw(mmio + NV_ADMA_STAT);
 659	while (((status & NV_ADMA_STAT_LEGACY) ||
 660	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
 661		ndelay(50);
 662		status = readw(mmio + NV_ADMA_STAT);
 663		count++;
 664	}
 665	if (count == 20)
 666		ata_port_warn(ap,
 667			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
 668			status);
 669
 670	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
 671}
 672
 673static int nv_adma_slave_config(struct scsi_device *sdev)
 674{
 675	struct ata_port *ap = ata_shost_to_port(sdev->host);
 676	struct nv_adma_port_priv *pp = ap->private_data;
 677	struct nv_adma_port_priv *port0, *port1;
 678	struct scsi_device *sdev0, *sdev1;
 679	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 680	unsigned long segment_boundary, flags;
 681	unsigned short sg_tablesize;
 682	int rc;
 683	int adma_enable;
 684	u32 current_reg, new_reg, config_mask;
 685
 686	rc = ata_scsi_slave_config(sdev);
 687
 688	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
 689		/* Not a proper libata device, ignore */
 690		return rc;
 691
 692	spin_lock_irqsave(ap->lock, flags);
 693
 694	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
 695		/*
 696		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
 697		 * Therefore ATAPI commands are sent through the legacy interface.
 698		 * However, the legacy interface only supports 32-bit DMA.
 699		 * Restrict DMA parameters as required by the legacy interface
 700		 * when an ATAPI device is connected.
 701		 */
 702		segment_boundary = ATA_DMA_BOUNDARY;
 703		/* Subtract 1 since an extra entry may be needed for padding, see
 704		   libata-scsi.c */
 705		sg_tablesize = LIBATA_MAX_PRD - 1;
 706
 707		/* Since the legacy DMA engine is in use, we need to disable ADMA
 708		   on the port. */
 709		adma_enable = 0;
 710		nv_adma_register_mode(ap);
 711	} else {
 712		segment_boundary = NV_ADMA_DMA_BOUNDARY;
 713		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
 714		adma_enable = 1;
 715	}
 716
 717	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
 718
 719	if (ap->port_no == 1)
 720		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
 721			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
 722	else
 723		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
 724			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
 725
 726	if (adma_enable) {
 727		new_reg = current_reg | config_mask;
 728		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
 729	} else {
 730		new_reg = current_reg & ~config_mask;
 731		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
 732	}
 733
 734	if (current_reg != new_reg)
 735		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
 736
 737	port0 = ap->host->ports[0]->private_data;
 738	port1 = ap->host->ports[1]->private_data;
 739	sdev0 = ap->host->ports[0]->link.device[0].sdev;
 740	sdev1 = ap->host->ports[1]->link.device[0].sdev;
 741	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
 742	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
 743		/** We have to set the DMA mask to 32-bit if either port is in
 744		    ATAPI mode, since they are on the same PCI device which is
 745		    used for DMA mapping. If we set the mask we also need to set
 746		    the bounce limit on both ports to ensure that the block
 747		    layer doesn't feed addresses that cause DMA mapping to
 748		    choke. If either SCSI device is not allocated yet, it's OK
 749		    since that port will discover its correct setting when it
 750		    does get allocated.
 751		    Note: Setting 32-bit mask should not fail. */
 752		if (sdev0)
 753			blk_queue_bounce_limit(sdev0->request_queue,
 754					       ATA_DMA_MASK);
 755		if (sdev1)
 756			blk_queue_bounce_limit(sdev1->request_queue,
 757					       ATA_DMA_MASK);
 758
 759		dma_set_mask(&pdev->dev, ATA_DMA_MASK);
 760	} else {
 761		/** This shouldn't fail as it was set to this value before */
 762		dma_set_mask(&pdev->dev, pp->adma_dma_mask);
 763		if (sdev0)
 764			blk_queue_bounce_limit(sdev0->request_queue,
 765					       pp->adma_dma_mask);
 766		if (sdev1)
 767			blk_queue_bounce_limit(sdev1->request_queue,
 768					       pp->adma_dma_mask);
 769	}
 770
 771	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
 772	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
 773	ata_port_info(ap,
 774		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
 775		      (unsigned long long)*ap->host->dev->dma_mask,
 776		      segment_boundary, sg_tablesize);
 777
 778	spin_unlock_irqrestore(ap->lock, flags);
 779
 780	return rc;
 781}
 782
 783static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
 784{
 785	struct nv_adma_port_priv *pp = qc->ap->private_data;
 786	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 787}
 788
 789static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 790{
 791	/* Other than when internal or pass-through commands are executed,
 792	   the only time this function will be called in ADMA mode will be
 793	   if a command fails. In the failure case we don't care about going
 794	   into register mode with ADMA commands pending, as the commands will
 795	   all shortly be aborted anyway. We assume that NCQ commands are not
 796	   issued via passthrough, which is the only way that switching into
 797	   ADMA mode could abort outstanding commands. */
 798	nv_adma_register_mode(ap);
 799
 800	ata_sff_tf_read(ap, tf);
 801}
 802
 803static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
 804{
 805	unsigned int idx = 0;
 806
 807	if (tf->flags & ATA_TFLAG_ISADDR) {
 808		if (tf->flags & ATA_TFLAG_LBA48) {
 809			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
 810			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
 811			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
 812			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
 813			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
 814			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
 815		} else
 816			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
 817
 818		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
 819		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
 820		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
 821		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
 822	}
 823
 824	if (tf->flags & ATA_TFLAG_DEVICE)
 825		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
 826
 827	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
 828
 829	while (idx < 12)
 830		cpb[idx++] = cpu_to_le16(IGN);
 831
 832	return idx;
 833}
 834
 835static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 836{
 837	struct nv_adma_port_priv *pp = ap->private_data;
 838	u8 flags = pp->cpb[cpb_num].resp_flags;
 839
 840	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
 841
 842	if (unlikely((force_err ||
 843		     flags & (NV_CPB_RESP_ATA_ERR |
 844			      NV_CPB_RESP_CMD_ERR |
 845			      NV_CPB_RESP_CPB_ERR)))) {
 846		struct ata_eh_info *ehi = &ap->link.eh_info;
 847		int freeze = 0;
 848
 849		ata_ehi_clear_desc(ehi);
 850		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
 851		if (flags & NV_CPB_RESP_ATA_ERR) {
 852			ata_ehi_push_desc(ehi, "ATA error");
 853			ehi->err_mask |= AC_ERR_DEV;
 854		} else if (flags & NV_CPB_RESP_CMD_ERR) {
 855			ata_ehi_push_desc(ehi, "CMD error");
 856			ehi->err_mask |= AC_ERR_DEV;
 857		} else if (flags & NV_CPB_RESP_CPB_ERR) {
 858			ata_ehi_push_desc(ehi, "CPB error");
 859			ehi->err_mask |= AC_ERR_SYSTEM;
 860			freeze = 1;
 861		} else {
 862			/* notifier error, but no error in CPB flags? */
 863			ata_ehi_push_desc(ehi, "unknown");
 864			ehi->err_mask |= AC_ERR_OTHER;
 865			freeze = 1;
 866		}
 867		/* Kill all commands. EH will determine what actually failed. */
 868		if (freeze)
 869			ata_port_freeze(ap);
 870		else
 871			ata_port_abort(ap);
 872		return -1;
 873	}
 874
 875	if (likely(flags & NV_CPB_RESP_DONE))
 876		return 1;
 877	return 0;
 878}
 879
 880static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
 881{
 882	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 883
 884	/* freeze if hotplugged */
 885	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
 886		ata_port_freeze(ap);
 887		return 1;
 888	}
 889
 890	/* bail out if not our interrupt */
 891	if (!(irq_stat & NV_INT_DEV))
 892		return 0;
 893
 894	/* DEV interrupt w/ no active qc? */
 895	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 896		ata_sff_check_status(ap);
 897		return 1;
 898	}
 899
 900	/* handle interrupt */
 901	return ata_bmdma_port_intr(ap, qc);
 902}
 903
 904static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
 905{
 906	struct ata_host *host = dev_instance;
 907	int i, handled = 0;
 908	u32 notifier_clears[2];
 909
 910	spin_lock(&host->lock);
 911
 912	for (i = 0; i < host->n_ports; i++) {
 913		struct ata_port *ap = host->ports[i];
 914		struct nv_adma_port_priv *pp = ap->private_data;
 915		void __iomem *mmio = pp->ctl_block;
 916		u16 status;
 917		u32 gen_ctl;
 918		u32 notifier, notifier_error;
 919
 920		notifier_clears[i] = 0;
 921
 922		/* if ADMA is disabled, use standard ata interrupt handler */
 923		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
 924			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 925				>> (NV_INT_PORT_SHIFT * i);
 926			handled += nv_host_intr(ap, irq_stat);
 927			continue;
 928		}
 929
 930		/* if in ATA register mode, check for standard interrupts */
 931		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
 932			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 933				>> (NV_INT_PORT_SHIFT * i);
 934			if (ata_tag_valid(ap->link.active_tag))
 935				/** NV_INT_DEV indication seems unreliable
 936				    at times at least in ADMA mode. Force it
 937				    on always when a command is active, to
 938				    prevent losing interrupts. */
 939				irq_stat |= NV_INT_DEV;
 940			handled += nv_host_intr(ap, irq_stat);
 941		}
 942
 943		notifier = readl(mmio + NV_ADMA_NOTIFIER);
 944		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
 945		notifier_clears[i] = notifier | notifier_error;
 946
 947		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
 948
 949		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
 950		    !notifier_error)
 951			/* Nothing to do */
 952			continue;
 953
 954		status = readw(mmio + NV_ADMA_STAT);
 955
 956		/*
 957		 * Clear status. Ensure the controller sees the
 958		 * clearing before we start looking at any of the CPB
 959		 * statuses, so that any CPB completions after this
 960		 * point in the handler will raise another interrupt.
 961		 */
 962		writew(status, mmio + NV_ADMA_STAT);
 963		readw(mmio + NV_ADMA_STAT); /* flush posted write */
 964		rmb();
 965
 966		handled++; /* irq handled if we got here */
 967
 968		/* freeze if hotplugged or controller error */
 969		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
 970				       NV_ADMA_STAT_HOTUNPLUG |
 971				       NV_ADMA_STAT_TIMEOUT |
 972				       NV_ADMA_STAT_SERROR))) {
 973			struct ata_eh_info *ehi = &ap->link.eh_info;
 974
 975			ata_ehi_clear_desc(ehi);
 976			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
 977			if (status & NV_ADMA_STAT_TIMEOUT) {
 978				ehi->err_mask |= AC_ERR_SYSTEM;
 979				ata_ehi_push_desc(ehi, "timeout");
 980			} else if (status & NV_ADMA_STAT_HOTPLUG) {
 981				ata_ehi_hotplugged(ehi);
 982				ata_ehi_push_desc(ehi, "hotplug");
 983			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
 984				ata_ehi_hotplugged(ehi);
 985				ata_ehi_push_desc(ehi, "hot unplug");
 986			} else if (status & NV_ADMA_STAT_SERROR) {
 987				/* let EH analyze SError and figure out cause */
 988				ata_ehi_push_desc(ehi, "SError");
 989			} else
 990				ata_ehi_push_desc(ehi, "unknown");
 991			ata_port_freeze(ap);
 992			continue;
 993		}
 994
 995		if (status & (NV_ADMA_STAT_DONE |
 996			      NV_ADMA_STAT_CPBERR |
 997			      NV_ADMA_STAT_CMD_COMPLETE)) {
 998			u32 check_commands = notifier_clears[i];
 999			u32 done_mask = 0;
1000			int pos, rc;
1001
1002			if (status & NV_ADMA_STAT_CPBERR) {
1003				/* check all active commands */
1004				if (ata_tag_valid(ap->link.active_tag))
1005					check_commands = 1 <<
1006						ap->link.active_tag;
1007				else
1008					check_commands = ap->link.sactive;
1009			}
1010
1011			/* check CPBs for completed commands */
1012			while ((pos = ffs(check_commands))) {
1013				pos--;
1014				rc = nv_adma_check_cpb(ap, pos,
1015						notifier_error & (1 << pos));
1016				if (rc > 0)
1017					done_mask |= 1 << pos;
1018				else if (unlikely(rc < 0))
1019					check_commands = 0;
1020				check_commands &= ~(1 << pos);
1021			}
1022			ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1023		}
1024	}
1025
1026	if (notifier_clears[0] || notifier_clears[1]) {
1027		/* Note: Both notifier clear registers must be written
1028		   if either is set, even if one is zero, according to NVIDIA. */
1029		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1030		writel(notifier_clears[0], pp->notifier_clear_block);
1031		pp = host->ports[1]->private_data;
1032		writel(notifier_clears[1], pp->notifier_clear_block);
1033	}
1034
1035	spin_unlock(&host->lock);
1036
1037	return IRQ_RETVAL(handled);
1038}
1039
1040static void nv_adma_freeze(struct ata_port *ap)
1041{
1042	struct nv_adma_port_priv *pp = ap->private_data;
1043	void __iomem *mmio = pp->ctl_block;
1044	u16 tmp;
1045
1046	nv_ck804_freeze(ap);
1047
1048	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1049		return;
1050
1051	/* clear any outstanding CK804 notifications */
1052	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1053		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1054
1055	/* Disable interrupt */
1056	tmp = readw(mmio + NV_ADMA_CTL);
1057	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1058		mmio + NV_ADMA_CTL);
1059	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1060}
1061
1062static void nv_adma_thaw(struct ata_port *ap)
1063{
1064	struct nv_adma_port_priv *pp = ap->private_data;
1065	void __iomem *mmio = pp->ctl_block;
1066	u16 tmp;
1067
1068	nv_ck804_thaw(ap);
1069
1070	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1071		return;
1072
1073	/* Enable interrupt */
1074	tmp = readw(mmio + NV_ADMA_CTL);
1075	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1076		mmio + NV_ADMA_CTL);
1077	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1078}
1079
1080static void nv_adma_irq_clear(struct ata_port *ap)
1081{
1082	struct nv_adma_port_priv *pp = ap->private_data;
1083	void __iomem *mmio = pp->ctl_block;
1084	u32 notifier_clears[2];
1085
1086	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1087		ata_bmdma_irq_clear(ap);
1088		return;
1089	}
1090
1091	/* clear any outstanding CK804 notifications */
1092	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1093		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1094
1095	/* clear ADMA status */
1096	writew(0xffff, mmio + NV_ADMA_STAT);
1097
1098	/* clear notifiers - note both ports need to be written with
1099	   something even though we are only clearing on one */
1100	if (ap->port_no == 0) {
1101		notifier_clears[0] = 0xFFFFFFFF;
1102		notifier_clears[1] = 0;
1103	} else {
1104		notifier_clears[0] = 0;
1105		notifier_clears[1] = 0xFFFFFFFF;
1106	}
1107	pp = ap->host->ports[0]->private_data;
1108	writel(notifier_clears[0], pp->notifier_clear_block);
1109	pp = ap->host->ports[1]->private_data;
1110	writel(notifier_clears[1], pp->notifier_clear_block);
1111}
1112
1113static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1114{
1115	struct nv_adma_port_priv *pp = qc->ap->private_data;
1116
1117	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1118		ata_bmdma_post_internal_cmd(qc);
1119}
1120
1121static int nv_adma_port_start(struct ata_port *ap)
1122{
1123	struct device *dev = ap->host->dev;
1124	struct nv_adma_port_priv *pp;
1125	int rc;
1126	void *mem;
1127	dma_addr_t mem_dma;
1128	void __iomem *mmio;
1129	struct pci_dev *pdev = to_pci_dev(dev);
1130	u16 tmp;
1131
1132	VPRINTK("ENTER\n");
1133
1134	/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1135	   pad buffers */
1136	rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1137	if (rc)
1138		return rc;
1139	rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1140	if (rc)
1141		return rc;
1142
1143	/* we might fallback to bmdma, allocate bmdma resources */
1144	rc = ata_bmdma_port_start(ap);
1145	if (rc)
1146		return rc;
1147
1148	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1149	if (!pp)
1150		return -ENOMEM;
1151
1152	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1153	       ap->port_no * NV_ADMA_PORT_SIZE;
1154	pp->ctl_block = mmio;
1155	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1156	pp->notifier_clear_block = pp->gen_block +
1157	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1158
1159	/* Now that the legacy PRD and padding buffer are allocated we can
1160	   safely raise the DMA mask to allocate the CPB/APRD table.
1161	   These are allowed to fail since we store the value that ends up
1162	   being used to set as the bounce limit in slave_config later if
1163	   needed. */
1164	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1165	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1166	pp->adma_dma_mask = *dev->dma_mask;
1167
1168	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1169				  &mem_dma, GFP_KERNEL);
1170	if (!mem)
1171		return -ENOMEM;
1172	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1173
1174	/*
1175	 * First item in chunk of DMA memory:
1176	 * 128-byte command parameter block (CPB)
1177	 * one for each command tag
1178	 */
1179	pp->cpb     = mem;
1180	pp->cpb_dma = mem_dma;
1181
1182	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1183	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1184
1185	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1186	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1187
1188	/*
1189	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1190	 */
1191	pp->aprd = mem;
1192	pp->aprd_dma = mem_dma;
1193
1194	ap->private_data = pp;
1195
1196	/* clear any outstanding interrupt conditions */
1197	writew(0xffff, mmio + NV_ADMA_STAT);
1198
1199	/* initialize port variables */
1200	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1201
1202	/* clear CPB fetch count */
1203	writew(0, mmio + NV_ADMA_CPB_COUNT);
1204
1205	/* clear GO for register mode, enable interrupt */
1206	tmp = readw(mmio + NV_ADMA_CTL);
1207	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1208		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1209
1210	tmp = readw(mmio + NV_ADMA_CTL);
1211	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1212	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1213	udelay(1);
1214	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1215	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1216
1217	return 0;
1218}
1219
1220static void nv_adma_port_stop(struct ata_port *ap)
1221{
1222	struct nv_adma_port_priv *pp = ap->private_data;
1223	void __iomem *mmio = pp->ctl_block;
1224
1225	VPRINTK("ENTER\n");
1226	writew(0, mmio + NV_ADMA_CTL);
1227}
1228
1229#ifdef CONFIG_PM
1230static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1231{
1232	struct nv_adma_port_priv *pp = ap->private_data;
1233	void __iomem *mmio = pp->ctl_block;
1234
1235	/* Go to register mode - clears GO */
1236	nv_adma_register_mode(ap);
1237
1238	/* clear CPB fetch count */
1239	writew(0, mmio + NV_ADMA_CPB_COUNT);
1240
1241	/* disable interrupt, shut down port */
1242	writew(0, mmio + NV_ADMA_CTL);
1243
1244	return 0;
1245}
1246
1247static int nv_adma_port_resume(struct ata_port *ap)
1248{
1249	struct nv_adma_port_priv *pp = ap->private_data;
1250	void __iomem *mmio = pp->ctl_block;
1251	u16 tmp;
1252
1253	/* set CPB block location */
1254	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1255	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1256
1257	/* clear any outstanding interrupt conditions */
1258	writew(0xffff, mmio + NV_ADMA_STAT);
1259
1260	/* initialize port variables */
1261	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1262
1263	/* clear CPB fetch count */
1264	writew(0, mmio + NV_ADMA_CPB_COUNT);
1265
1266	/* clear GO for register mode, enable interrupt */
1267	tmp = readw(mmio + NV_ADMA_CTL);
1268	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1269		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1270
1271	tmp = readw(mmio + NV_ADMA_CTL);
1272	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1273	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1274	udelay(1);
1275	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1276	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1277
1278	return 0;
1279}
1280#endif
1281
1282static void nv_adma_setup_port(struct ata_port *ap)
1283{
1284	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1285	struct ata_ioports *ioport = &ap->ioaddr;
1286
1287	VPRINTK("ENTER\n");
1288
1289	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1290
1291	ioport->cmd_addr	= mmio;
1292	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1293	ioport->error_addr	=
1294	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1295	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1296	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1297	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1298	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1299	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1300	ioport->status_addr	=
1301	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1302	ioport->altstatus_addr	=
1303	ioport->ctl_addr	= mmio + 0x20;
1304}
1305
1306static int nv_adma_host_init(struct ata_host *host)
1307{
1308	struct pci_dev *pdev = to_pci_dev(host->dev);
1309	unsigned int i;
1310	u32 tmp32;
1311
1312	VPRINTK("ENTER\n");
1313
1314	/* enable ADMA on the ports */
1315	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1316	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1317		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1318		 NV_MCP_SATA_CFG_20_PORT1_EN |
1319		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1320
1321	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1322
1323	for (i = 0; i < host->n_ports; i++)
1324		nv_adma_setup_port(host->ports[i]);
1325
1326	return 0;
1327}
1328
1329static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1330			      struct scatterlist *sg,
1331			      int idx,
1332			      struct nv_adma_prd *aprd)
1333{
1334	u8 flags = 0;
1335	if (qc->tf.flags & ATA_TFLAG_WRITE)
1336		flags |= NV_APRD_WRITE;
1337	if (idx == qc->n_elem - 1)
1338		flags |= NV_APRD_END;
1339	else if (idx != 4)
1340		flags |= NV_APRD_CONT;
1341
1342	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1343	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1344	aprd->flags = flags;
1345	aprd->packet_len = 0;
1346}
1347
1348static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1349{
1350	struct nv_adma_port_priv *pp = qc->ap->private_data;
1351	struct nv_adma_prd *aprd;
1352	struct scatterlist *sg;
1353	unsigned int si;
1354
1355	VPRINTK("ENTER\n");
1356
1357	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1358		aprd = (si < 5) ? &cpb->aprd[si] :
1359			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1360		nv_adma_fill_aprd(qc, sg, si, aprd);
1361	}
1362	if (si > 5)
1363		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1364	else
1365		cpb->next_aprd = cpu_to_le64(0);
1366}
1367
1368static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1369{
1370	struct nv_adma_port_priv *pp = qc->ap->private_data;
1371
1372	/* ADMA engine can only be used for non-ATAPI DMA commands,
1373	   or interrupt-driven no-data commands. */
1374	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1375	   (qc->tf.flags & ATA_TFLAG_POLLING))
1376		return 1;
1377
1378	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1379	   (qc->tf.protocol == ATA_PROT_NODATA))
1380		return 0;
1381
1382	return 1;
1383}
1384
1385static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1386{
1387	struct nv_adma_port_priv *pp = qc->ap->private_data;
1388	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1389	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1390		       NV_CPB_CTL_IEN;
1391
1392	if (nv_adma_use_reg_mode(qc)) {
1393		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1394			(qc->flags & ATA_QCFLAG_DMAMAP));
1395		nv_adma_register_mode(qc->ap);
1396		ata_bmdma_qc_prep(qc);
1397		return;
1398	}
1399
1400	cpb->resp_flags = NV_CPB_RESP_DONE;
1401	wmb();
1402	cpb->ctl_flags = 0;
1403	wmb();
1404
1405	cpb->len		= 3;
1406	cpb->tag		= qc->tag;
1407	cpb->next_cpb_idx	= 0;
1408
1409	/* turn on NCQ flags for NCQ commands */
1410	if (qc->tf.protocol == ATA_PROT_NCQ)
1411		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1412
1413	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1414
1415	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1416
1417	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1418		nv_adma_fill_sg(qc, cpb);
1419		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1420	} else
1421		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1422
1423	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1424	   until we are finished filling in all of the contents */
1425	wmb();
1426	cpb->ctl_flags = ctl_flags;
1427	wmb();
1428	cpb->resp_flags = 0;
 
 
1429}
1430
1431static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1432{
1433	struct nv_adma_port_priv *pp = qc->ap->private_data;
1434	void __iomem *mmio = pp->ctl_block;
1435	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1436
1437	VPRINTK("ENTER\n");
1438
1439	/* We can't handle result taskfile with NCQ commands, since
1440	   retrieving the taskfile switches us out of ADMA mode and would abort
1441	   existing commands. */
1442	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1443		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1444		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1445		return AC_ERR_SYSTEM;
1446	}
1447
1448	if (nv_adma_use_reg_mode(qc)) {
1449		/* use ATA register mode */
1450		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1451		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1452			(qc->flags & ATA_QCFLAG_DMAMAP));
1453		nv_adma_register_mode(qc->ap);
1454		return ata_bmdma_qc_issue(qc);
1455	} else
1456		nv_adma_mode(qc->ap);
1457
1458	/* write append register, command tag in lower 8 bits
1459	   and (number of cpbs to append -1) in top 8 bits */
1460	wmb();
1461
1462	if (curr_ncq != pp->last_issue_ncq) {
1463		/* Seems to need some delay before switching between NCQ and
1464		   non-NCQ commands, else we get command timeouts and such. */
1465		udelay(20);
1466		pp->last_issue_ncq = curr_ncq;
1467	}
1468
1469	writew(qc->tag, mmio + NV_ADMA_APPEND);
1470
1471	DPRINTK("Issued tag %u\n", qc->tag);
1472
1473	return 0;
1474}
1475
1476static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1477{
1478	struct ata_host *host = dev_instance;
1479	unsigned int i;
1480	unsigned int handled = 0;
1481	unsigned long flags;
1482
1483	spin_lock_irqsave(&host->lock, flags);
1484
1485	for (i = 0; i < host->n_ports; i++) {
1486		struct ata_port *ap = host->ports[i];
1487		struct ata_queued_cmd *qc;
1488
1489		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1490		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1491			handled += ata_bmdma_port_intr(ap, qc);
1492		} else {
1493			/*
1494			 * No request pending?  Clear interrupt status
1495			 * anyway, in case there's one pending.
1496			 */
1497			ap->ops->sff_check_status(ap);
1498		}
1499	}
1500
1501	spin_unlock_irqrestore(&host->lock, flags);
1502
1503	return IRQ_RETVAL(handled);
1504}
1505
1506static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1507{
1508	int i, handled = 0;
1509
1510	for (i = 0; i < host->n_ports; i++) {
1511		handled += nv_host_intr(host->ports[i], irq_stat);
1512		irq_stat >>= NV_INT_PORT_SHIFT;
1513	}
1514
1515	return IRQ_RETVAL(handled);
1516}
1517
1518static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1519{
1520	struct ata_host *host = dev_instance;
1521	u8 irq_stat;
1522	irqreturn_t ret;
1523
1524	spin_lock(&host->lock);
1525	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1526	ret = nv_do_interrupt(host, irq_stat);
1527	spin_unlock(&host->lock);
1528
1529	return ret;
1530}
1531
1532static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1533{
1534	struct ata_host *host = dev_instance;
1535	u8 irq_stat;
1536	irqreturn_t ret;
1537
1538	spin_lock(&host->lock);
1539	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1540	ret = nv_do_interrupt(host, irq_stat);
1541	spin_unlock(&host->lock);
1542
1543	return ret;
1544}
1545
1546static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1547{
1548	if (sc_reg > SCR_CONTROL)
1549		return -EINVAL;
1550
1551	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1552	return 0;
1553}
1554
1555static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1556{
1557	if (sc_reg > SCR_CONTROL)
1558		return -EINVAL;
1559
1560	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1561	return 0;
1562}
1563
1564static int nv_hardreset(struct ata_link *link, unsigned int *class,
1565			unsigned long deadline)
1566{
1567	struct ata_eh_context *ehc = &link->eh_context;
1568
1569	/* Do hardreset iff it's post-boot probing, please read the
1570	 * comment above port ops for details.
1571	 */
1572	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1573	    !ata_dev_enabled(link->device))
1574		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1575				    NULL, NULL);
1576	else {
1577		const unsigned long *timing = sata_ehc_deb_timing(ehc);
1578		int rc;
1579
1580		if (!(ehc->i.flags & ATA_EHI_QUIET))
1581			ata_link_info(link,
1582				      "nv: skipping hardreset on occupied port\n");
1583
1584		/* make sure the link is online */
1585		rc = sata_link_resume(link, timing, deadline);
1586		/* whine about phy resume failure but proceed */
1587		if (rc && rc != -EOPNOTSUPP)
1588			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1589				      rc);
1590	}
1591
1592	/* device signature acquisition is unreliable */
1593	return -EAGAIN;
1594}
1595
1596static void nv_nf2_freeze(struct ata_port *ap)
1597{
1598	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1599	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1600	u8 mask;
1601
1602	mask = ioread8(scr_addr + NV_INT_ENABLE);
1603	mask &= ~(NV_INT_ALL << shift);
1604	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1605}
1606
1607static void nv_nf2_thaw(struct ata_port *ap)
1608{
1609	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1610	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1611	u8 mask;
1612
1613	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1614
1615	mask = ioread8(scr_addr + NV_INT_ENABLE);
1616	mask |= (NV_INT_MASK << shift);
1617	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1618}
1619
1620static void nv_ck804_freeze(struct ata_port *ap)
1621{
1622	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1623	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1624	u8 mask;
1625
1626	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1627	mask &= ~(NV_INT_ALL << shift);
1628	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1629}
1630
1631static void nv_ck804_thaw(struct ata_port *ap)
1632{
1633	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1634	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1635	u8 mask;
1636
1637	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1638
1639	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1640	mask |= (NV_INT_MASK << shift);
1641	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1642}
1643
1644static void nv_mcp55_freeze(struct ata_port *ap)
1645{
1646	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1647	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1648	u32 mask;
1649
1650	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1651
1652	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1653	mask &= ~(NV_INT_ALL_MCP55 << shift);
1654	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1655}
1656
1657static void nv_mcp55_thaw(struct ata_port *ap)
1658{
1659	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1660	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1661	u32 mask;
1662
1663	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1664
1665	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1666	mask |= (NV_INT_MASK_MCP55 << shift);
1667	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1668}
1669
1670static void nv_adma_error_handler(struct ata_port *ap)
1671{
1672	struct nv_adma_port_priv *pp = ap->private_data;
1673	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1674		void __iomem *mmio = pp->ctl_block;
1675		int i;
1676		u16 tmp;
1677
1678		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1679			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1680			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1681			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1682			u32 status = readw(mmio + NV_ADMA_STAT);
1683			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1684			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1685
1686			ata_port_err(ap,
1687				"EH in ADMA mode, notifier 0x%X "
1688				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1689				"next cpb count 0x%X next cpb idx 0x%x\n",
1690				notifier, notifier_error, gen_ctl, status,
1691				cpb_count, next_cpb_idx);
1692
1693			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1694				struct nv_adma_cpb *cpb = &pp->cpb[i];
1695				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1696				    ap->link.sactive & (1 << i))
1697					ata_port_err(ap,
1698						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1699						i, cpb->ctl_flags, cpb->resp_flags);
1700			}
1701		}
1702
1703		/* Push us back into port register mode for error handling. */
1704		nv_adma_register_mode(ap);
1705
1706		/* Mark all of the CPBs as invalid to prevent them from
1707		   being executed */
1708		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1709			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1710
1711		/* clear CPB fetch count */
1712		writew(0, mmio + NV_ADMA_CPB_COUNT);
1713
1714		/* Reset channel */
1715		tmp = readw(mmio + NV_ADMA_CTL);
1716		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1717		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1718		udelay(1);
1719		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1720		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1721	}
1722
1723	ata_bmdma_error_handler(ap);
1724}
1725
1726static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1727{
1728	struct nv_swncq_port_priv *pp = ap->private_data;
1729	struct defer_queue *dq = &pp->defer_queue;
1730
1731	/* queue is full */
1732	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1733	dq->defer_bits |= (1 << qc->tag);
1734	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1735}
1736
1737static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1738{
1739	struct nv_swncq_port_priv *pp = ap->private_data;
1740	struct defer_queue *dq = &pp->defer_queue;
1741	unsigned int tag;
1742
1743	if (dq->head == dq->tail)	/* null queue */
1744		return NULL;
1745
1746	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1747	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1748	WARN_ON(!(dq->defer_bits & (1 << tag)));
1749	dq->defer_bits &= ~(1 << tag);
1750
1751	return ata_qc_from_tag(ap, tag);
1752}
1753
1754static void nv_swncq_fis_reinit(struct ata_port *ap)
1755{
1756	struct nv_swncq_port_priv *pp = ap->private_data;
1757
1758	pp->dhfis_bits = 0;
1759	pp->dmafis_bits = 0;
1760	pp->sdbfis_bits = 0;
1761	pp->ncq_flags = 0;
1762}
1763
1764static void nv_swncq_pp_reinit(struct ata_port *ap)
1765{
1766	struct nv_swncq_port_priv *pp = ap->private_data;
1767	struct defer_queue *dq = &pp->defer_queue;
1768
1769	dq->head = 0;
1770	dq->tail = 0;
1771	dq->defer_bits = 0;
1772	pp->qc_active = 0;
1773	pp->last_issue_tag = ATA_TAG_POISON;
1774	nv_swncq_fis_reinit(ap);
1775}
1776
1777static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1778{
1779	struct nv_swncq_port_priv *pp = ap->private_data;
1780
1781	writew(fis, pp->irq_block);
1782}
1783
1784static void __ata_bmdma_stop(struct ata_port *ap)
1785{
1786	struct ata_queued_cmd qc;
1787
1788	qc.ap = ap;
1789	ata_bmdma_stop(&qc);
1790}
1791
1792static void nv_swncq_ncq_stop(struct ata_port *ap)
1793{
1794	struct nv_swncq_port_priv *pp = ap->private_data;
1795	unsigned int i;
1796	u32 sactive;
1797	u32 done_mask;
1798
1799	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1800		     ap->qc_active, ap->link.sactive);
1801	ata_port_err(ap,
1802		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1803		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1804		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1805		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1806
1807	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1808		     ap->ops->sff_check_status(ap),
1809		     ioread8(ap->ioaddr.error_addr));
1810
1811	sactive = readl(pp->sactive_block);
1812	done_mask = pp->qc_active ^ sactive;
1813
1814	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1815	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1816		u8 err = 0;
1817		if (pp->qc_active & (1 << i))
1818			err = 0;
1819		else if (done_mask & (1 << i))
1820			err = 1;
1821		else
1822			continue;
1823
1824		ata_port_err(ap,
1825			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1826			     (pp->dhfis_bits >> i) & 0x1,
1827			     (pp->dmafis_bits >> i) & 0x1,
1828			     (pp->sdbfis_bits >> i) & 0x1,
1829			     (sactive >> i) & 0x1,
1830			     (err ? "error! tag doesn't exit" : " "));
1831	}
1832
1833	nv_swncq_pp_reinit(ap);
1834	ap->ops->sff_irq_clear(ap);
1835	__ata_bmdma_stop(ap);
1836	nv_swncq_irq_clear(ap, 0xffff);
1837}
1838
1839static void nv_swncq_error_handler(struct ata_port *ap)
1840{
1841	struct ata_eh_context *ehc = &ap->link.eh_context;
1842
1843	if (ap->link.sactive) {
1844		nv_swncq_ncq_stop(ap);
1845		ehc->i.action |= ATA_EH_RESET;
1846	}
1847
1848	ata_bmdma_error_handler(ap);
1849}
1850
1851#ifdef CONFIG_PM
1852static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1853{
1854	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1855	u32 tmp;
1856
1857	/* clear irq */
1858	writel(~0, mmio + NV_INT_STATUS_MCP55);
1859
1860	/* disable irq */
1861	writel(0, mmio + NV_INT_ENABLE_MCP55);
1862
1863	/* disable swncq */
1864	tmp = readl(mmio + NV_CTL_MCP55);
1865	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1866	writel(tmp, mmio + NV_CTL_MCP55);
1867
1868	return 0;
1869}
1870
1871static int nv_swncq_port_resume(struct ata_port *ap)
1872{
1873	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1874	u32 tmp;
1875
1876	/* clear irq */
1877	writel(~0, mmio + NV_INT_STATUS_MCP55);
1878
1879	/* enable irq */
1880	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1881
1882	/* enable swncq */
1883	tmp = readl(mmio + NV_CTL_MCP55);
1884	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1885
1886	return 0;
1887}
1888#endif
1889
1890static void nv_swncq_host_init(struct ata_host *host)
1891{
1892	u32 tmp;
1893	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1894	struct pci_dev *pdev = to_pci_dev(host->dev);
1895	u8 regval;
1896
1897	/* disable  ECO 398 */
1898	pci_read_config_byte(pdev, 0x7f, &regval);
1899	regval &= ~(1 << 7);
1900	pci_write_config_byte(pdev, 0x7f, regval);
1901
1902	/* enable swncq */
1903	tmp = readl(mmio + NV_CTL_MCP55);
1904	VPRINTK("HOST_CTL:0x%X\n", tmp);
1905	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1906
1907	/* enable irq intr */
1908	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1909	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1910	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1911
1912	/*  clear port irq */
1913	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1914}
1915
1916static int nv_swncq_slave_config(struct scsi_device *sdev)
1917{
1918	struct ata_port *ap = ata_shost_to_port(sdev->host);
1919	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1920	struct ata_device *dev;
1921	int rc;
1922	u8 rev;
1923	u8 check_maxtor = 0;
1924	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1925
1926	rc = ata_scsi_slave_config(sdev);
1927	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1928		/* Not a proper libata device, ignore */
1929		return rc;
1930
1931	dev = &ap->link.device[sdev->id];
1932	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1933		return rc;
1934
1935	/* if MCP51 and Maxtor, then disable ncq */
1936	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1937		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1938		check_maxtor = 1;
1939
1940	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1941	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1942		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1943		pci_read_config_byte(pdev, 0x8, &rev);
1944		if (rev <= 0xa2)
1945			check_maxtor = 1;
1946	}
1947
1948	if (!check_maxtor)
1949		return rc;
1950
1951	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1952
1953	if (strncmp(model_num, "Maxtor", 6) == 0) {
1954		ata_scsi_change_queue_depth(sdev, 1);
1955		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1956			       sdev->queue_depth);
1957	}
1958
1959	return rc;
1960}
1961
1962static int nv_swncq_port_start(struct ata_port *ap)
1963{
1964	struct device *dev = ap->host->dev;
1965	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1966	struct nv_swncq_port_priv *pp;
1967	int rc;
1968
1969	/* we might fallback to bmdma, allocate bmdma resources */
1970	rc = ata_bmdma_port_start(ap);
1971	if (rc)
1972		return rc;
1973
1974	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1975	if (!pp)
1976		return -ENOMEM;
1977
1978	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1979				      &pp->prd_dma, GFP_KERNEL);
1980	if (!pp->prd)
1981		return -ENOMEM;
1982	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1983
1984	ap->private_data = pp;
1985	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1986	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1987	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1988
1989	return 0;
1990}
1991
1992static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1993{
1994	if (qc->tf.protocol != ATA_PROT_NCQ) {
1995		ata_bmdma_qc_prep(qc);
1996		return;
1997	}
1998
1999	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2000		return;
2001
2002	nv_swncq_fill_sg(qc);
 
 
2003}
2004
2005static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2006{
2007	struct ata_port *ap = qc->ap;
2008	struct scatterlist *sg;
2009	struct nv_swncq_port_priv *pp = ap->private_data;
2010	struct ata_bmdma_prd *prd;
2011	unsigned int si, idx;
2012
2013	prd = pp->prd + ATA_MAX_PRD * qc->tag;
2014
2015	idx = 0;
2016	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2017		u32 addr, offset;
2018		u32 sg_len, len;
2019
2020		addr = (u32)sg_dma_address(sg);
2021		sg_len = sg_dma_len(sg);
2022
2023		while (sg_len) {
2024			offset = addr & 0xffff;
2025			len = sg_len;
2026			if ((offset + sg_len) > 0x10000)
2027				len = 0x10000 - offset;
2028
2029			prd[idx].addr = cpu_to_le32(addr);
2030			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2031
2032			idx++;
2033			sg_len -= len;
2034			addr += len;
2035		}
2036	}
2037
2038	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2039}
2040
2041static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2042					  struct ata_queued_cmd *qc)
2043{
2044	struct nv_swncq_port_priv *pp = ap->private_data;
2045
2046	if (qc == NULL)
2047		return 0;
2048
2049	DPRINTK("Enter\n");
2050
2051	writel((1 << qc->tag), pp->sactive_block);
2052	pp->last_issue_tag = qc->tag;
2053	pp->dhfis_bits &= ~(1 << qc->tag);
2054	pp->dmafis_bits &= ~(1 << qc->tag);
2055	pp->qc_active |= (0x1 << qc->tag);
2056
 
2057	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
 
2058	ap->ops->sff_exec_command(ap, &qc->tf);
2059
2060	DPRINTK("Issued tag %u\n", qc->tag);
2061
2062	return 0;
2063}
2064
2065static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2066{
2067	struct ata_port *ap = qc->ap;
2068	struct nv_swncq_port_priv *pp = ap->private_data;
2069
2070	if (qc->tf.protocol != ATA_PROT_NCQ)
2071		return ata_bmdma_qc_issue(qc);
2072
2073	DPRINTK("Enter\n");
2074
2075	if (!pp->qc_active)
2076		nv_swncq_issue_atacmd(ap, qc);
2077	else
2078		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2079
2080	return 0;
2081}
2082
2083static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2084{
2085	u32 serror;
2086	struct ata_eh_info *ehi = &ap->link.eh_info;
2087
2088	ata_ehi_clear_desc(ehi);
2089
2090	/* AHCI needs SError cleared; otherwise, it might lock up */
2091	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2092	sata_scr_write(&ap->link, SCR_ERROR, serror);
2093
2094	/* analyze @irq_stat */
2095	if (fis & NV_SWNCQ_IRQ_ADDED)
2096		ata_ehi_push_desc(ehi, "hot plug");
2097	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2098		ata_ehi_push_desc(ehi, "hot unplug");
2099
2100	ata_ehi_hotplugged(ehi);
2101
2102	/* okay, let's hand over to EH */
2103	ehi->serror |= serror;
2104
2105	ata_port_freeze(ap);
2106}
2107
2108static int nv_swncq_sdbfis(struct ata_port *ap)
2109{
2110	struct ata_queued_cmd *qc;
2111	struct nv_swncq_port_priv *pp = ap->private_data;
2112	struct ata_eh_info *ehi = &ap->link.eh_info;
2113	u32 sactive;
2114	u32 done_mask;
2115	u8 host_stat;
2116	u8 lack_dhfis = 0;
2117
2118	host_stat = ap->ops->bmdma_status(ap);
 
2119	if (unlikely(host_stat & ATA_DMA_ERR)) {
2120		/* error when transferring data to/from memory */
2121		ata_ehi_clear_desc(ehi);
2122		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2123		ehi->err_mask |= AC_ERR_HOST_BUS;
2124		ehi->action |= ATA_EH_RESET;
2125		return -EINVAL;
2126	}
2127
2128	ap->ops->sff_irq_clear(ap);
2129	__ata_bmdma_stop(ap);
2130
2131	sactive = readl(pp->sactive_block);
2132	done_mask = pp->qc_active ^ sactive;
2133
2134	pp->qc_active &= ~done_mask;
2135	pp->dhfis_bits &= ~done_mask;
2136	pp->dmafis_bits &= ~done_mask;
2137	pp->sdbfis_bits |= done_mask;
2138	ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2139
2140	if (!ap->qc_active) {
2141		DPRINTK("over\n");
2142		nv_swncq_pp_reinit(ap);
2143		return 0;
2144	}
2145
2146	if (pp->qc_active & pp->dhfis_bits)
2147		return 0;
2148
2149	if ((pp->ncq_flags & ncq_saw_backout) ||
2150	    (pp->qc_active ^ pp->dhfis_bits))
2151		/* if the controller can't get a device to host register FIS,
2152		 * The driver needs to reissue the new command.
2153		 */
2154		lack_dhfis = 1;
2155
2156	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2157		"SWNCQ:qc_active 0x%X defer_bits %X "
2158		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2159		ap->print_id, ap->qc_active, pp->qc_active,
2160		pp->defer_queue.defer_bits, pp->dhfis_bits,
2161		pp->dmafis_bits, pp->last_issue_tag);
2162
2163	nv_swncq_fis_reinit(ap);
2164
2165	if (lack_dhfis) {
2166		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2167		nv_swncq_issue_atacmd(ap, qc);
2168		return 0;
2169	}
2170
2171	if (pp->defer_queue.defer_bits) {
2172		/* send deferral queue command */
2173		qc = nv_swncq_qc_from_dq(ap);
2174		WARN_ON(qc == NULL);
2175		nv_swncq_issue_atacmd(ap, qc);
2176	}
2177
2178	return 0;
2179}
2180
2181static inline u32 nv_swncq_tag(struct ata_port *ap)
2182{
2183	struct nv_swncq_port_priv *pp = ap->private_data;
2184	u32 tag;
2185
2186	tag = readb(pp->tag_block) >> 2;
2187	return (tag & 0x1f);
2188}
2189
2190static void nv_swncq_dmafis(struct ata_port *ap)
2191{
2192	struct ata_queued_cmd *qc;
2193	unsigned int rw;
2194	u8 dmactl;
2195	u32 tag;
2196	struct nv_swncq_port_priv *pp = ap->private_data;
2197
2198	__ata_bmdma_stop(ap);
2199	tag = nv_swncq_tag(ap);
2200
2201	DPRINTK("dma setup tag 0x%x\n", tag);
2202	qc = ata_qc_from_tag(ap, tag);
2203
2204	if (unlikely(!qc))
2205		return;
2206
2207	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2208
2209	/* load PRD table addr. */
2210	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2211		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2212
2213	/* specify data direction, triple-check start bit is clear */
2214	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2215	dmactl &= ~ATA_DMA_WR;
2216	if (!rw)
2217		dmactl |= ATA_DMA_WR;
2218
2219	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2220}
2221
2222static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2223{
2224	struct nv_swncq_port_priv *pp = ap->private_data;
2225	struct ata_queued_cmd *qc;
2226	struct ata_eh_info *ehi = &ap->link.eh_info;
2227	u32 serror;
2228	u8 ata_stat;
2229
2230	ata_stat = ap->ops->sff_check_status(ap);
2231	nv_swncq_irq_clear(ap, fis);
2232	if (!fis)
2233		return;
2234
2235	if (ap->pflags & ATA_PFLAG_FROZEN)
2236		return;
2237
2238	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2239		nv_swncq_hotplug(ap, fis);
2240		return;
2241	}
2242
2243	if (!pp->qc_active)
2244		return;
2245
2246	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2247		return;
2248	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2249
2250	if (ata_stat & ATA_ERR) {
2251		ata_ehi_clear_desc(ehi);
2252		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2253		ehi->err_mask |= AC_ERR_DEV;
2254		ehi->serror |= serror;
2255		ehi->action |= ATA_EH_RESET;
2256		ata_port_freeze(ap);
2257		return;
2258	}
2259
2260	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2261		/* If the IRQ is backout, driver must issue
2262		 * the new command again some time later.
2263		 */
2264		pp->ncq_flags |= ncq_saw_backout;
2265	}
2266
2267	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2268		pp->ncq_flags |= ncq_saw_sdb;
2269		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2270			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2271			ap->print_id, pp->qc_active, pp->dhfis_bits,
2272			pp->dmafis_bits, readl(pp->sactive_block));
2273		if (nv_swncq_sdbfis(ap) < 0)
2274			goto irq_error;
2275	}
2276
2277	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2278		/* The interrupt indicates the new command
2279		 * was transmitted correctly to the drive.
2280		 */
2281		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2282		pp->ncq_flags |= ncq_saw_d2h;
2283		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2284			ata_ehi_push_desc(ehi, "illegal fis transaction");
2285			ehi->err_mask |= AC_ERR_HSM;
2286			ehi->action |= ATA_EH_RESET;
2287			goto irq_error;
2288		}
2289
2290		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2291		    !(pp->ncq_flags & ncq_saw_dmas)) {
2292			ata_stat = ap->ops->sff_check_status(ap);
2293			if (ata_stat & ATA_BUSY)
2294				goto irq_exit;
2295
2296			if (pp->defer_queue.defer_bits) {
2297				DPRINTK("send next command\n");
2298				qc = nv_swncq_qc_from_dq(ap);
2299				nv_swncq_issue_atacmd(ap, qc);
2300			}
2301		}
2302	}
2303
2304	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2305		/* program the dma controller with appropriate PRD buffers
2306		 * and start the DMA transfer for requested command.
2307		 */
2308		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2309		pp->ncq_flags |= ncq_saw_dmas;
2310		nv_swncq_dmafis(ap);
2311	}
2312
2313irq_exit:
2314	return;
2315irq_error:
2316	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2317	ata_port_freeze(ap);
2318	return;
2319}
2320
2321static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2322{
2323	struct ata_host *host = dev_instance;
2324	unsigned int i;
2325	unsigned int handled = 0;
2326	unsigned long flags;
2327	u32 irq_stat;
2328
2329	spin_lock_irqsave(&host->lock, flags);
2330
2331	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2332
2333	for (i = 0; i < host->n_ports; i++) {
2334		struct ata_port *ap = host->ports[i];
2335
2336		if (ap->link.sactive) {
2337			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2338			handled = 1;
2339		} else {
2340			if (irq_stat)	/* reserve Hotplug */
2341				nv_swncq_irq_clear(ap, 0xfff0);
2342
2343			handled += nv_host_intr(ap, (u8)irq_stat);
2344		}
2345		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2346	}
2347
2348	spin_unlock_irqrestore(&host->lock, flags);
2349
2350	return IRQ_RETVAL(handled);
2351}
2352
2353static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2354{
2355	const struct ata_port_info *ppi[] = { NULL, NULL };
2356	struct nv_pi_priv *ipriv;
2357	struct ata_host *host;
2358	struct nv_host_priv *hpriv;
2359	int rc;
2360	u32 bar;
2361	void __iomem *base;
2362	unsigned long type = ent->driver_data;
2363
2364        // Make sure this is a SATA controller by counting the number of bars
2365        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2366        // it's an IDE controller and we ignore it.
2367	for (bar = 0; bar < 6; bar++)
2368		if (pci_resource_start(pdev, bar) == 0)
2369			return -ENODEV;
2370
2371	ata_print_version_once(&pdev->dev, DRV_VERSION);
2372
2373	rc = pcim_enable_device(pdev);
2374	if (rc)
2375		return rc;
2376
2377	/* determine type and allocate host */
2378	if (type == CK804 && adma_enabled) {
2379		dev_notice(&pdev->dev, "Using ADMA mode\n");
2380		type = ADMA;
2381	} else if (type == MCP5x && swncq_enabled) {
2382		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2383		type = SWNCQ;
2384	}
2385
2386	ppi[0] = &nv_port_info[type];
2387	ipriv = ppi[0]->private_data;
2388	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2389	if (rc)
2390		return rc;
2391
2392	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2393	if (!hpriv)
2394		return -ENOMEM;
2395	hpriv->type = type;
2396	host->private_data = hpriv;
2397
2398	/* request and iomap NV_MMIO_BAR */
2399	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2400	if (rc)
2401		return rc;
2402
2403	/* configure SCR access */
2404	base = host->iomap[NV_MMIO_BAR];
2405	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2406	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2407
2408	/* enable SATA space for CK804 */
2409	if (type >= CK804) {
2410		u8 regval;
2411
2412		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2413		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2414		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2415	}
2416
2417	/* init ADMA */
2418	if (type == ADMA) {
2419		rc = nv_adma_host_init(host);
2420		if (rc)
2421			return rc;
2422	} else if (type == SWNCQ)
2423		nv_swncq_host_init(host);
2424
2425	if (msi_enabled) {
2426		dev_notice(&pdev->dev, "Using MSI\n");
2427		pci_enable_msi(pdev);
2428	}
2429
2430	pci_set_master(pdev);
2431	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2432}
2433
2434#ifdef CONFIG_PM_SLEEP
2435static int nv_pci_device_resume(struct pci_dev *pdev)
2436{
2437	struct ata_host *host = pci_get_drvdata(pdev);
2438	struct nv_host_priv *hpriv = host->private_data;
2439	int rc;
2440
2441	rc = ata_pci_device_do_resume(pdev);
2442	if (rc)
2443		return rc;
2444
2445	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2446		if (hpriv->type >= CK804) {
2447			u8 regval;
2448
2449			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2450			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2451			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2452		}
2453		if (hpriv->type == ADMA) {
2454			u32 tmp32;
2455			struct nv_adma_port_priv *pp;
2456			/* enable/disable ADMA on the ports appropriately */
2457			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2458
2459			pp = host->ports[0]->private_data;
2460			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2461				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2462					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2463			else
2464				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2465					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2466			pp = host->ports[1]->private_data;
2467			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2468				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2469					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2470			else
2471				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2472					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2473
2474			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2475		}
2476	}
2477
2478	ata_host_resume(host);
2479
2480	return 0;
2481}
2482#endif
2483
2484static void nv_ck804_host_stop(struct ata_host *host)
2485{
2486	struct pci_dev *pdev = to_pci_dev(host->dev);
2487	u8 regval;
2488
2489	/* disable SATA space for CK804 */
2490	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2491	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2492	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2493}
2494
2495static void nv_adma_host_stop(struct ata_host *host)
2496{
2497	struct pci_dev *pdev = to_pci_dev(host->dev);
2498	u32 tmp32;
2499
2500	/* disable ADMA on the ports */
2501	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2502	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2503		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2504		   NV_MCP_SATA_CFG_20_PORT1_EN |
2505		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2506
2507	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2508
2509	nv_ck804_host_stop(host);
2510}
2511
2512module_pci_driver(nv_pci_driver);
2513
2514module_param_named(adma, adma_enabled, bool, 0444);
2515MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2516module_param_named(swncq, swncq_enabled, bool, 0444);
2517MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2518module_param_named(msi, msi_enabled, bool, 0444);
2519MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");