Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  sata_nv.c - NVIDIA nForce SATA
   4 *
   5 *  Copyright 2004 NVIDIA Corp.  All rights reserved.
   6 *  Copyright 2004 Andrew Chew
   7 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   8 *  libata documentation is available via 'make {ps|pdf}docs',
   9 *  as Documentation/driver-api/libata.rst
  10 *
  11 *  No hardware documentation available outside of NVIDIA.
  12 *  This driver programs the NVIDIA SATA controller in a similar
  13 *  fashion as with other PCI IDE BMDMA controllers, with a few
  14 *  NV-specific details such as register offsets, SATA phy location,
  15 *  hotplug info, etc.
  16 *
  17 *  CK804/MCP04 controllers support an alternate programming interface
  18 *  similar to the ADMA specification (with some modifications).
  19 *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
  20 *  sent through the legacy interface.
 
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/gfp.h>
  26#include <linux/pci.h>
 
  27#include <linux/blkdev.h>
  28#include <linux/delay.h>
  29#include <linux/interrupt.h>
  30#include <linux/device.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_device.h>
  33#include <linux/libata.h>
  34#include <trace/events/libata.h>
  35
  36#define DRV_NAME			"sata_nv"
  37#define DRV_VERSION			"3.5"
  38
  39#define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
  40
  41enum {
  42	NV_MMIO_BAR			= 5,
  43
  44	NV_PORTS			= 2,
  45	NV_PIO_MASK			= ATA_PIO4,
  46	NV_MWDMA_MASK			= ATA_MWDMA2,
  47	NV_UDMA_MASK			= ATA_UDMA6,
  48	NV_PORT0_SCR_REG_OFFSET		= 0x00,
  49	NV_PORT1_SCR_REG_OFFSET		= 0x40,
  50
  51	/* INT_STATUS/ENABLE */
  52	NV_INT_STATUS			= 0x10,
  53	NV_INT_ENABLE			= 0x11,
  54	NV_INT_STATUS_CK804		= 0x440,
  55	NV_INT_ENABLE_CK804		= 0x441,
  56
  57	/* INT_STATUS/ENABLE bits */
  58	NV_INT_DEV			= 0x01,
  59	NV_INT_PM			= 0x02,
  60	NV_INT_ADDED			= 0x04,
  61	NV_INT_REMOVED			= 0x08,
  62
  63	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
  64
  65	NV_INT_ALL			= 0x0f,
  66	NV_INT_MASK			= NV_INT_DEV |
  67					  NV_INT_ADDED | NV_INT_REMOVED,
  68
  69	/* INT_CONFIG */
  70	NV_INT_CONFIG			= 0x12,
  71	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
  72
  73	// For PCI config register 20
  74	NV_MCP_SATA_CFG_20		= 0x50,
  75	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
  76	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
  77	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
  78	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
  79	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
  80
  81	NV_ADMA_MAX_CPBS		= 32,
  82	NV_ADMA_CPB_SZ			= 128,
  83	NV_ADMA_APRD_SZ			= 16,
  84	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
  85					   NV_ADMA_APRD_SZ,
  86	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
  87	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
  88	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
  89					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
  90
  91	/* BAR5 offset to ADMA general registers */
  92	NV_ADMA_GEN			= 0x400,
  93	NV_ADMA_GEN_CTL			= 0x00,
  94	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
  95
  96	/* BAR5 offset to ADMA ports */
  97	NV_ADMA_PORT			= 0x480,
  98
  99	/* size of ADMA port register space  */
 100	NV_ADMA_PORT_SIZE		= 0x100,
 101
 102	/* ADMA port registers */
 103	NV_ADMA_CTL			= 0x40,
 104	NV_ADMA_CPB_COUNT		= 0x42,
 105	NV_ADMA_NEXT_CPB_IDX		= 0x43,
 106	NV_ADMA_STAT			= 0x44,
 107	NV_ADMA_CPB_BASE_LOW		= 0x48,
 108	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
 109	NV_ADMA_APPEND			= 0x50,
 110	NV_ADMA_NOTIFIER		= 0x68,
 111	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
 112
 113	/* NV_ADMA_CTL register bits */
 114	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
 115	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
 116	NV_ADMA_CTL_GO			= (1 << 7),
 117	NV_ADMA_CTL_AIEN		= (1 << 8),
 118	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
 119	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
 120
 121	/* CPB response flag bits */
 122	NV_CPB_RESP_DONE		= (1 << 0),
 123	NV_CPB_RESP_ATA_ERR		= (1 << 3),
 124	NV_CPB_RESP_CMD_ERR		= (1 << 4),
 125	NV_CPB_RESP_CPB_ERR		= (1 << 7),
 126
 127	/* CPB control flag bits */
 128	NV_CPB_CTL_CPB_VALID		= (1 << 0),
 129	NV_CPB_CTL_QUEUE		= (1 << 1),
 130	NV_CPB_CTL_APRD_VALID		= (1 << 2),
 131	NV_CPB_CTL_IEN			= (1 << 3),
 132	NV_CPB_CTL_FPDMA		= (1 << 4),
 133
 134	/* APRD flags */
 135	NV_APRD_WRITE			= (1 << 1),
 136	NV_APRD_END			= (1 << 2),
 137	NV_APRD_CONT			= (1 << 3),
 138
 139	/* NV_ADMA_STAT flags */
 140	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
 141	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
 142	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
 143	NV_ADMA_STAT_CPBERR		= (1 << 4),
 144	NV_ADMA_STAT_SERROR		= (1 << 5),
 145	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
 146	NV_ADMA_STAT_IDLE		= (1 << 8),
 147	NV_ADMA_STAT_LEGACY		= (1 << 9),
 148	NV_ADMA_STAT_STOPPED		= (1 << 10),
 149	NV_ADMA_STAT_DONE		= (1 << 12),
 150	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
 151					  NV_ADMA_STAT_TIMEOUT,
 152
 153	/* port flags */
 154	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
 155	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
 156
 157	/* MCP55 reg offset */
 158	NV_CTL_MCP55			= 0x400,
 159	NV_INT_STATUS_MCP55		= 0x440,
 160	NV_INT_ENABLE_MCP55		= 0x444,
 161	NV_NCQ_REG_MCP55		= 0x448,
 162
 163	/* MCP55 */
 164	NV_INT_ALL_MCP55		= 0xffff,
 165	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
 166	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
 167
 168	/* SWNCQ ENABLE BITS*/
 169	NV_CTL_PRI_SWNCQ		= 0x02,
 170	NV_CTL_SEC_SWNCQ		= 0x04,
 171
 172	/* SW NCQ status bits*/
 173	NV_SWNCQ_IRQ_DEV		= (1 << 0),
 174	NV_SWNCQ_IRQ_PM			= (1 << 1),
 175	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
 176	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
 177
 178	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
 179	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
 180	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
 181	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
 182
 183	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
 184					  NV_SWNCQ_IRQ_REMOVED,
 185
 186};
 187
 188/* ADMA Physical Region Descriptor - one SG segment */
 189struct nv_adma_prd {
 190	__le64			addr;
 191	__le32			len;
 192	u8			flags;
 193	u8			packet_len;
 194	__le16			reserved;
 195};
 196
 197enum nv_adma_regbits {
 198	CMDEND	= (1 << 15),		/* end of command list */
 199	WNB	= (1 << 14),		/* wait-not-BSY */
 200	IGN	= (1 << 13),		/* ignore this entry */
 201	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
 202	DA2	= (1 << (2 + 8)),
 203	DA1	= (1 << (1 + 8)),
 204	DA0	= (1 << (0 + 8)),
 205};
 206
 207/* ADMA Command Parameter Block
 208   The first 5 SG segments are stored inside the Command Parameter Block itself.
 209   If there are more than 5 segments the remainder are stored in a separate
 210   memory area indicated by next_aprd. */
 211struct nv_adma_cpb {
 212	u8			resp_flags;    /* 0 */
 213	u8			reserved1;     /* 1 */
 214	u8			ctl_flags;     /* 2 */
 215	/* len is length of taskfile in 64 bit words */
 216	u8			len;		/* 3  */
 217	u8			tag;           /* 4 */
 218	u8			next_cpb_idx;  /* 5 */
 219	__le16			reserved2;     /* 6-7 */
 220	__le16			tf[12];        /* 8-31 */
 221	struct nv_adma_prd	aprd[5];       /* 32-111 */
 222	__le64			next_aprd;     /* 112-119 */
 223	__le64			reserved3;     /* 120-127 */
 224};
 225
 226
 227struct nv_adma_port_priv {
 228	struct nv_adma_cpb	*cpb;
 229	dma_addr_t		cpb_dma;
 230	struct nv_adma_prd	*aprd;
 231	dma_addr_t		aprd_dma;
 232	void __iomem		*ctl_block;
 233	void __iomem		*gen_block;
 234	void __iomem		*notifier_clear_block;
 235	u64			adma_dma_mask;
 236	u8			flags;
 237	int			last_issue_ncq;
 238};
 239
 240struct nv_host_priv {
 241	unsigned long		type;
 242};
 243
 244struct defer_queue {
 245	u32		defer_bits;
 246	unsigned int	head;
 247	unsigned int	tail;
 248	unsigned int	tag[ATA_MAX_QUEUE];
 249};
 250
 251enum ncq_saw_flag_list {
 252	ncq_saw_d2h	= (1U << 0),
 253	ncq_saw_dmas	= (1U << 1),
 254	ncq_saw_sdb	= (1U << 2),
 255	ncq_saw_backout	= (1U << 3),
 256};
 257
 258struct nv_swncq_port_priv {
 259	struct ata_bmdma_prd *prd;	 /* our SG list */
 260	dma_addr_t	prd_dma; /* and its DMA mapping */
 261	void __iomem	*sactive_block;
 262	void __iomem	*irq_block;
 263	void __iomem	*tag_block;
 264	u32		qc_active;
 265
 266	unsigned int	last_issue_tag;
 267
 268	/* fifo circular queue to store deferral command */
 269	struct defer_queue defer_queue;
 270
 271	/* for NCQ interrupt analysis */
 272	u32		dhfis_bits;
 273	u32		dmafis_bits;
 274	u32		sdbfis_bits;
 275
 276	unsigned int	ncq_flags;
 277};
 278
 279
 280#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
 281
 282static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 283#ifdef CONFIG_PM_SLEEP
 284static int nv_pci_device_resume(struct pci_dev *pdev);
 285#endif
 286static void nv_ck804_host_stop(struct ata_host *host);
 287static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
 288static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
 289static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
 290static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 291static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 292
 293static int nv_hardreset(struct ata_link *link, unsigned int *class,
 294			unsigned long deadline);
 295static void nv_nf2_freeze(struct ata_port *ap);
 296static void nv_nf2_thaw(struct ata_port *ap);
 297static void nv_ck804_freeze(struct ata_port *ap);
 298static void nv_ck804_thaw(struct ata_port *ap);
 299static int nv_adma_slave_config(struct scsi_device *sdev);
 300static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
 301static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
 302static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
 303static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
 304static void nv_adma_irq_clear(struct ata_port *ap);
 305static int nv_adma_port_start(struct ata_port *ap);
 306static void nv_adma_port_stop(struct ata_port *ap);
 307#ifdef CONFIG_PM
 308static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
 309static int nv_adma_port_resume(struct ata_port *ap);
 310#endif
 311static void nv_adma_freeze(struct ata_port *ap);
 312static void nv_adma_thaw(struct ata_port *ap);
 313static void nv_adma_error_handler(struct ata_port *ap);
 314static void nv_adma_host_stop(struct ata_host *host);
 315static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
 316static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 317
 318static void nv_mcp55_thaw(struct ata_port *ap);
 319static void nv_mcp55_freeze(struct ata_port *ap);
 320static void nv_swncq_error_handler(struct ata_port *ap);
 321static int nv_swncq_slave_config(struct scsi_device *sdev);
 322static int nv_swncq_port_start(struct ata_port *ap);
 323static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
 324static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
 325static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
 326static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
 327static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
 328#ifdef CONFIG_PM
 329static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
 330static int nv_swncq_port_resume(struct ata_port *ap);
 331#endif
 332
 333enum nv_host_type
 334{
 335	GENERIC,
 336	NFORCE2,
 337	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
 338	CK804,
 339	ADMA,
 340	MCP5x,
 341	SWNCQ,
 342};
 343
 344static const struct pci_device_id nv_pci_tbl[] = {
 345	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
 346	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
 347	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
 348	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
 349	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
 350	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
 351	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
 352	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
 353	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
 354	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
 355	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
 356	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
 357	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
 358	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
 359
 360	{ } /* terminate list */
 361};
 362
 363static struct pci_driver nv_pci_driver = {
 364	.name			= DRV_NAME,
 365	.id_table		= nv_pci_tbl,
 366	.probe			= nv_init_one,
 367#ifdef CONFIG_PM_SLEEP
 368	.suspend		= ata_pci_device_suspend,
 369	.resume			= nv_pci_device_resume,
 370#endif
 371	.remove			= ata_pci_remove_one,
 372};
 373
 374static const struct scsi_host_template nv_sht = {
 375	ATA_BMDMA_SHT(DRV_NAME),
 376};
 377
 378static const struct scsi_host_template nv_adma_sht = {
 379	__ATA_BASE_SHT(DRV_NAME),
 380	.can_queue		= NV_ADMA_MAX_CPBS,
 381	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
 382	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
 383	.slave_configure	= nv_adma_slave_config,
 384	.sdev_groups		= ata_ncq_sdev_groups,
 385	.change_queue_depth     = ata_scsi_change_queue_depth,
 386	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
 387};
 388
 389static const struct scsi_host_template nv_swncq_sht = {
 390	__ATA_BASE_SHT(DRV_NAME),
 391	.can_queue		= ATA_MAX_QUEUE - 1,
 392	.sg_tablesize		= LIBATA_MAX_PRD,
 393	.dma_boundary		= ATA_DMA_BOUNDARY,
 394	.slave_configure	= nv_swncq_slave_config,
 395	.sdev_groups		= ata_ncq_sdev_groups,
 396	.change_queue_depth     = ata_scsi_change_queue_depth,
 397	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
 398};
 399
 400/*
 401 * NV SATA controllers have various different problems with hardreset
 402 * protocol depending on the specific controller and device.
 403 *
 404 * GENERIC:
 405 *
 406 *  bko11195 reports that link doesn't come online after hardreset on
 407 *  generic nv's and there have been several other similar reports on
 408 *  linux-ide.
 409 *
 410 *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
 411 *  softreset.
 412 *
 413 * NF2/3:
 414 *
 415 *  bko3352 reports nf2/3 controllers can't determine device signature
 416 *  reliably after hardreset.  The following thread reports detection
 417 *  failure on cold boot with the standard debouncing timing.
 418 *
 419 *  http://thread.gmane.org/gmane.linux.ide/34098
 420 *
 421 *  bko12176 reports that hardreset fails to bring up the link during
 422 *  boot on nf2.
 423 *
 424 * CK804:
 425 *
 426 *  For initial probing after boot and hot plugging, hardreset mostly
 427 *  works fine on CK804 but curiously, reprobing on the initial port
 428 *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
 429 *  FIS in somewhat undeterministic way.
 430 *
 431 * SWNCQ:
 432 *
 433 *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
 434 *  hardreset should be used and hardreset can't report proper
 435 *  signature, which suggests that mcp5x is closer to nf2 as long as
 436 *  reset quirkiness is concerned.
 437 *
 438 *  bko12703 reports that boot probing fails for intel SSD with
 439 *  hardreset.  Link fails to come online.  Softreset works fine.
 440 *
 441 * The failures are varied but the following patterns seem true for
 442 * all flavors.
 443 *
 444 * - Softreset during boot always works.
 445 *
 446 * - Hardreset during boot sometimes fails to bring up the link on
 447 *   certain comibnations and device signature acquisition is
 448 *   unreliable.
 449 *
 450 * - Hardreset is often necessary after hotplug.
 451 *
 452 * So, preferring softreset for boot probing and error handling (as
 453 * hardreset might bring down the link) but using hardreset for
 454 * post-boot probing should work around the above issues in most
 455 * cases.  Define nv_hardreset() which only kicks in for post-boot
 456 * probing and use it for all variants.
 457 */
 458static struct ata_port_operations nv_generic_ops = {
 459	.inherits		= &ata_bmdma_port_ops,
 460	.lost_interrupt		= ATA_OP_NULL,
 461	.scr_read		= nv_scr_read,
 462	.scr_write		= nv_scr_write,
 463	.hardreset		= nv_hardreset,
 464};
 465
 466static struct ata_port_operations nv_nf2_ops = {
 467	.inherits		= &nv_generic_ops,
 468	.freeze			= nv_nf2_freeze,
 469	.thaw			= nv_nf2_thaw,
 470};
 471
 472static struct ata_port_operations nv_ck804_ops = {
 473	.inherits		= &nv_generic_ops,
 474	.freeze			= nv_ck804_freeze,
 475	.thaw			= nv_ck804_thaw,
 476	.host_stop		= nv_ck804_host_stop,
 477};
 478
 479static struct ata_port_operations nv_adma_ops = {
 480	.inherits		= &nv_ck804_ops,
 481
 482	.check_atapi_dma	= nv_adma_check_atapi_dma,
 483	.sff_tf_read		= nv_adma_tf_read,
 484	.qc_defer		= ata_std_qc_defer,
 485	.qc_prep		= nv_adma_qc_prep,
 486	.qc_issue		= nv_adma_qc_issue,
 487	.sff_irq_clear		= nv_adma_irq_clear,
 488
 489	.freeze			= nv_adma_freeze,
 490	.thaw			= nv_adma_thaw,
 491	.error_handler		= nv_adma_error_handler,
 492	.post_internal_cmd	= nv_adma_post_internal_cmd,
 493
 494	.port_start		= nv_adma_port_start,
 495	.port_stop		= nv_adma_port_stop,
 496#ifdef CONFIG_PM
 497	.port_suspend		= nv_adma_port_suspend,
 498	.port_resume		= nv_adma_port_resume,
 499#endif
 500	.host_stop		= nv_adma_host_stop,
 501};
 502
 503static struct ata_port_operations nv_swncq_ops = {
 504	.inherits		= &nv_generic_ops,
 505
 506	.qc_defer		= ata_std_qc_defer,
 507	.qc_prep		= nv_swncq_qc_prep,
 508	.qc_issue		= nv_swncq_qc_issue,
 509
 510	.freeze			= nv_mcp55_freeze,
 511	.thaw			= nv_mcp55_thaw,
 512	.error_handler		= nv_swncq_error_handler,
 513
 514#ifdef CONFIG_PM
 515	.port_suspend		= nv_swncq_port_suspend,
 516	.port_resume		= nv_swncq_port_resume,
 517#endif
 518	.port_start		= nv_swncq_port_start,
 519};
 520
 521struct nv_pi_priv {
 522	irq_handler_t			irq_handler;
 523	const struct scsi_host_template	*sht;
 524};
 525
 526#define NV_PI_PRIV(_irq_handler, _sht) \
 527	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
 528
 529static const struct ata_port_info nv_port_info[] = {
 530	/* generic */
 531	{
 532		.flags		= ATA_FLAG_SATA,
 533		.pio_mask	= NV_PIO_MASK,
 534		.mwdma_mask	= NV_MWDMA_MASK,
 535		.udma_mask	= NV_UDMA_MASK,
 536		.port_ops	= &nv_generic_ops,
 537		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 538	},
 539	/* nforce2/3 */
 540	{
 541		.flags		= ATA_FLAG_SATA,
 542		.pio_mask	= NV_PIO_MASK,
 543		.mwdma_mask	= NV_MWDMA_MASK,
 544		.udma_mask	= NV_UDMA_MASK,
 545		.port_ops	= &nv_nf2_ops,
 546		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
 547	},
 548	/* ck804 */
 549	{
 550		.flags		= ATA_FLAG_SATA,
 551		.pio_mask	= NV_PIO_MASK,
 552		.mwdma_mask	= NV_MWDMA_MASK,
 553		.udma_mask	= NV_UDMA_MASK,
 554		.port_ops	= &nv_ck804_ops,
 555		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
 556	},
 557	/* ADMA */
 558	{
 559		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
 560		.pio_mask	= NV_PIO_MASK,
 561		.mwdma_mask	= NV_MWDMA_MASK,
 562		.udma_mask	= NV_UDMA_MASK,
 563		.port_ops	= &nv_adma_ops,
 564		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
 565	},
 566	/* MCP5x */
 567	{
 568		.flags		= ATA_FLAG_SATA,
 569		.pio_mask	= NV_PIO_MASK,
 570		.mwdma_mask	= NV_MWDMA_MASK,
 571		.udma_mask	= NV_UDMA_MASK,
 572		.port_ops	= &nv_generic_ops,
 573		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 574	},
 575	/* SWNCQ */
 576	{
 577		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
 578		.pio_mask	= NV_PIO_MASK,
 579		.mwdma_mask	= NV_MWDMA_MASK,
 580		.udma_mask	= NV_UDMA_MASK,
 581		.port_ops	= &nv_swncq_ops,
 582		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
 583	},
 584};
 585
 586MODULE_AUTHOR("NVIDIA");
 587MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
 588MODULE_LICENSE("GPL");
 589MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 590MODULE_VERSION(DRV_VERSION);
 591
 592static bool adma_enabled;
 593static bool swncq_enabled = true;
 594static bool msi_enabled;
 595
 596static void nv_adma_register_mode(struct ata_port *ap)
 597{
 598	struct nv_adma_port_priv *pp = ap->private_data;
 599	void __iomem *mmio = pp->ctl_block;
 600	u16 tmp, status;
 601	int count = 0;
 602
 603	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
 604		return;
 605
 606	status = readw(mmio + NV_ADMA_STAT);
 607	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
 608		ndelay(50);
 609		status = readw(mmio + NV_ADMA_STAT);
 610		count++;
 611	}
 612	if (count == 20)
 613		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
 614			      status);
 615
 616	tmp = readw(mmio + NV_ADMA_CTL);
 617	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 618
 619	count = 0;
 620	status = readw(mmio + NV_ADMA_STAT);
 621	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
 622		ndelay(50);
 623		status = readw(mmio + NV_ADMA_STAT);
 624		count++;
 625	}
 626	if (count == 20)
 627		ata_port_warn(ap,
 628			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
 629			      status);
 630
 631	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
 632}
 633
 634static void nv_adma_mode(struct ata_port *ap)
 635{
 636	struct nv_adma_port_priv *pp = ap->private_data;
 637	void __iomem *mmio = pp->ctl_block;
 638	u16 tmp, status;
 639	int count = 0;
 640
 641	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
 642		return;
 643
 644	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 645
 646	tmp = readw(mmio + NV_ADMA_CTL);
 647	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 648
 649	status = readw(mmio + NV_ADMA_STAT);
 650	while (((status & NV_ADMA_STAT_LEGACY) ||
 651	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
 652		ndelay(50);
 653		status = readw(mmio + NV_ADMA_STAT);
 654		count++;
 655	}
 656	if (count == 20)
 657		ata_port_warn(ap,
 658			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
 659			status);
 660
 661	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
 662}
 663
 664static int nv_adma_slave_config(struct scsi_device *sdev)
 665{
 666	struct ata_port *ap = ata_shost_to_port(sdev->host);
 667	struct nv_adma_port_priv *pp = ap->private_data;
 668	struct nv_adma_port_priv *port0, *port1;
 
 669	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 670	unsigned long segment_boundary, flags;
 671	unsigned short sg_tablesize;
 672	int rc;
 673	int adma_enable;
 674	u32 current_reg, new_reg, config_mask;
 675
 676	rc = ata_scsi_slave_config(sdev);
 677
 678	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
 679		/* Not a proper libata device, ignore */
 680		return rc;
 681
 682	spin_lock_irqsave(ap->lock, flags);
 683
 684	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
 685		/*
 686		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
 687		 * Therefore ATAPI commands are sent through the legacy interface.
 688		 * However, the legacy interface only supports 32-bit DMA.
 689		 * Restrict DMA parameters as required by the legacy interface
 690		 * when an ATAPI device is connected.
 691		 */
 692		segment_boundary = ATA_DMA_BOUNDARY;
 693		/* Subtract 1 since an extra entry may be needed for padding, see
 694		   libata-scsi.c */
 695		sg_tablesize = LIBATA_MAX_PRD - 1;
 696
 697		/* Since the legacy DMA engine is in use, we need to disable ADMA
 698		   on the port. */
 699		adma_enable = 0;
 700		nv_adma_register_mode(ap);
 701	} else {
 702		segment_boundary = NV_ADMA_DMA_BOUNDARY;
 703		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
 704		adma_enable = 1;
 705	}
 706
 707	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
 708
 709	if (ap->port_no == 1)
 710		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
 711			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
 712	else
 713		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
 714			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
 715
 716	if (adma_enable) {
 717		new_reg = current_reg | config_mask;
 718		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
 719	} else {
 720		new_reg = current_reg & ~config_mask;
 721		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
 722	}
 723
 724	if (current_reg != new_reg)
 725		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
 726
 727	port0 = ap->host->ports[0]->private_data;
 728	port1 = ap->host->ports[1]->private_data;
 
 
 729	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
 730	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
 731		/*
 732		 * We have to set the DMA mask to 32-bit if either port is in
 733		 * ATAPI mode, since they are on the same PCI device which is
 734		 * used for DMA mapping.  If either SCSI device is not allocated
 735		 * yet, it's OK since that port will discover its correct
 736		 * setting when it does get allocated.
 737		 */
 738		rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
 
 
 
 
 
 
 
 
 
 739	} else {
 740		rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
 
 
 
 
 
 
 
 741	}
 742
 743	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
 744	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
 745	ata_port_info(ap,
 746		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
 747		      (unsigned long long)*ap->host->dev->dma_mask,
 748		      segment_boundary, sg_tablesize);
 749
 750	spin_unlock_irqrestore(ap->lock, flags);
 751
 752	return rc;
 753}
 754
 755static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
 756{
 757	struct nv_adma_port_priv *pp = qc->ap->private_data;
 758	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 759}
 760
 761static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 762{
 763	/* Other than when internal or pass-through commands are executed,
 764	   the only time this function will be called in ADMA mode will be
 765	   if a command fails. In the failure case we don't care about going
 766	   into register mode with ADMA commands pending, as the commands will
 767	   all shortly be aborted anyway. We assume that NCQ commands are not
 768	   issued via passthrough, which is the only way that switching into
 769	   ADMA mode could abort outstanding commands. */
 770	nv_adma_register_mode(ap);
 771
 772	ata_sff_tf_read(ap, tf);
 773}
 774
 775static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
 776{
 777	unsigned int idx = 0;
 778
 779	if (tf->flags & ATA_TFLAG_ISADDR) {
 780		if (tf->flags & ATA_TFLAG_LBA48) {
 781			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
 782			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
 783			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
 784			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
 785			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
 786			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
 787		} else
 788			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
 789
 790		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
 791		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
 792		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
 793		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
 794	}
 795
 796	if (tf->flags & ATA_TFLAG_DEVICE)
 797		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
 798
 799	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
 800
 801	while (idx < 12)
 802		cpb[idx++] = cpu_to_le16(IGN);
 803
 804	return idx;
 805}
 806
 807static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 808{
 809	struct nv_adma_port_priv *pp = ap->private_data;
 810	u8 flags = pp->cpb[cpb_num].resp_flags;
 811
 812	ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags);
 813
 814	if (unlikely((force_err ||
 815		     flags & (NV_CPB_RESP_ATA_ERR |
 816			      NV_CPB_RESP_CMD_ERR |
 817			      NV_CPB_RESP_CPB_ERR)))) {
 818		struct ata_eh_info *ehi = &ap->link.eh_info;
 819		int freeze = 0;
 820
 821		ata_ehi_clear_desc(ehi);
 822		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
 823		if (flags & NV_CPB_RESP_ATA_ERR) {
 824			ata_ehi_push_desc(ehi, "ATA error");
 825			ehi->err_mask |= AC_ERR_DEV;
 826		} else if (flags & NV_CPB_RESP_CMD_ERR) {
 827			ata_ehi_push_desc(ehi, "CMD error");
 828			ehi->err_mask |= AC_ERR_DEV;
 829		} else if (flags & NV_CPB_RESP_CPB_ERR) {
 830			ata_ehi_push_desc(ehi, "CPB error");
 831			ehi->err_mask |= AC_ERR_SYSTEM;
 832			freeze = 1;
 833		} else {
 834			/* notifier error, but no error in CPB flags? */
 835			ata_ehi_push_desc(ehi, "unknown");
 836			ehi->err_mask |= AC_ERR_OTHER;
 837			freeze = 1;
 838		}
 839		/* Kill all commands. EH will determine what actually failed. */
 840		if (freeze)
 841			ata_port_freeze(ap);
 842		else
 843			ata_port_abort(ap);
 844		return -1;
 845	}
 846
 847	if (likely(flags & NV_CPB_RESP_DONE))
 848		return 1;
 849	return 0;
 850}
 851
 852static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
 853{
 854	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 855
 856	/* freeze if hotplugged */
 857	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
 858		ata_port_freeze(ap);
 859		return 1;
 860	}
 861
 862	/* bail out if not our interrupt */
 863	if (!(irq_stat & NV_INT_DEV))
 864		return 0;
 865
 866	/* DEV interrupt w/ no active qc? */
 867	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 868		ata_sff_check_status(ap);
 869		return 1;
 870	}
 871
 872	/* handle interrupt */
 873	return ata_bmdma_port_intr(ap, qc);
 874}
 875
 876static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
 877{
 878	struct ata_host *host = dev_instance;
 879	int i, handled = 0;
 880	u32 notifier_clears[2];
 881
 882	spin_lock(&host->lock);
 883
 884	for (i = 0; i < host->n_ports; i++) {
 885		struct ata_port *ap = host->ports[i];
 886		struct nv_adma_port_priv *pp = ap->private_data;
 887		void __iomem *mmio = pp->ctl_block;
 888		u16 status;
 889		u32 gen_ctl;
 890		u32 notifier, notifier_error;
 891
 892		notifier_clears[i] = 0;
 893
 894		/* if ADMA is disabled, use standard ata interrupt handler */
 895		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
 896			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 897				>> (NV_INT_PORT_SHIFT * i);
 898			handled += nv_host_intr(ap, irq_stat);
 899			continue;
 900		}
 901
 902		/* if in ATA register mode, check for standard interrupts */
 903		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
 904			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 905				>> (NV_INT_PORT_SHIFT * i);
 906			if (ata_tag_valid(ap->link.active_tag))
 907				/** NV_INT_DEV indication seems unreliable
 908				    at times at least in ADMA mode. Force it
 909				    on always when a command is active, to
 910				    prevent losing interrupts. */
 911				irq_stat |= NV_INT_DEV;
 912			handled += nv_host_intr(ap, irq_stat);
 913		}
 914
 915		notifier = readl(mmio + NV_ADMA_NOTIFIER);
 916		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
 917		notifier_clears[i] = notifier | notifier_error;
 918
 919		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
 920
 921		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
 922		    !notifier_error)
 923			/* Nothing to do */
 924			continue;
 925
 926		status = readw(mmio + NV_ADMA_STAT);
 927
 928		/*
 929		 * Clear status. Ensure the controller sees the
 930		 * clearing before we start looking at any of the CPB
 931		 * statuses, so that any CPB completions after this
 932		 * point in the handler will raise another interrupt.
 933		 */
 934		writew(status, mmio + NV_ADMA_STAT);
 935		readw(mmio + NV_ADMA_STAT); /* flush posted write */
 936		rmb();
 937
 938		handled++; /* irq handled if we got here */
 939
 940		/* freeze if hotplugged or controller error */
 941		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
 942				       NV_ADMA_STAT_HOTUNPLUG |
 943				       NV_ADMA_STAT_TIMEOUT |
 944				       NV_ADMA_STAT_SERROR))) {
 945			struct ata_eh_info *ehi = &ap->link.eh_info;
 946
 947			ata_ehi_clear_desc(ehi);
 948			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
 949			if (status & NV_ADMA_STAT_TIMEOUT) {
 950				ehi->err_mask |= AC_ERR_SYSTEM;
 951				ata_ehi_push_desc(ehi, "timeout");
 952			} else if (status & NV_ADMA_STAT_HOTPLUG) {
 953				ata_ehi_hotplugged(ehi);
 954				ata_ehi_push_desc(ehi, "hotplug");
 955			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
 956				ata_ehi_hotplugged(ehi);
 957				ata_ehi_push_desc(ehi, "hot unplug");
 958			} else if (status & NV_ADMA_STAT_SERROR) {
 959				/* let EH analyze SError and figure out cause */
 960				ata_ehi_push_desc(ehi, "SError");
 961			} else
 962				ata_ehi_push_desc(ehi, "unknown");
 963			ata_port_freeze(ap);
 964			continue;
 965		}
 966
 967		if (status & (NV_ADMA_STAT_DONE |
 968			      NV_ADMA_STAT_CPBERR |
 969			      NV_ADMA_STAT_CMD_COMPLETE)) {
 970			u32 check_commands = notifier_clears[i];
 971			u32 done_mask = 0;
 972			int pos, rc;
 973
 974			if (status & NV_ADMA_STAT_CPBERR) {
 975				/* check all active commands */
 976				if (ata_tag_valid(ap->link.active_tag))
 977					check_commands = 1 <<
 978						ap->link.active_tag;
 979				else
 980					check_commands = ap->link.sactive;
 981			}
 982
 983			/* check CPBs for completed commands */
 984			while ((pos = ffs(check_commands))) {
 985				pos--;
 986				rc = nv_adma_check_cpb(ap, pos,
 987						notifier_error & (1 << pos));
 988				if (rc > 0)
 989					done_mask |= 1 << pos;
 990				else if (unlikely(rc < 0))
 991					check_commands = 0;
 992				check_commands &= ~(1 << pos);
 993			}
 994			ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
 995		}
 996	}
 997
 998	if (notifier_clears[0] || notifier_clears[1]) {
 999		/* Note: Both notifier clear registers must be written
1000		   if either is set, even if one is zero, according to NVIDIA. */
1001		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1002		writel(notifier_clears[0], pp->notifier_clear_block);
1003		pp = host->ports[1]->private_data;
1004		writel(notifier_clears[1], pp->notifier_clear_block);
1005	}
1006
1007	spin_unlock(&host->lock);
1008
1009	return IRQ_RETVAL(handled);
1010}
1011
1012static void nv_adma_freeze(struct ata_port *ap)
1013{
1014	struct nv_adma_port_priv *pp = ap->private_data;
1015	void __iomem *mmio = pp->ctl_block;
1016	u16 tmp;
1017
1018	nv_ck804_freeze(ap);
1019
1020	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1021		return;
1022
1023	/* clear any outstanding CK804 notifications */
1024	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1025		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1026
1027	/* Disable interrupt */
1028	tmp = readw(mmio + NV_ADMA_CTL);
1029	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1030		mmio + NV_ADMA_CTL);
1031	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1032}
1033
1034static void nv_adma_thaw(struct ata_port *ap)
1035{
1036	struct nv_adma_port_priv *pp = ap->private_data;
1037	void __iomem *mmio = pp->ctl_block;
1038	u16 tmp;
1039
1040	nv_ck804_thaw(ap);
1041
1042	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1043		return;
1044
1045	/* Enable interrupt */
1046	tmp = readw(mmio + NV_ADMA_CTL);
1047	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1048		mmio + NV_ADMA_CTL);
1049	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1050}
1051
1052static void nv_adma_irq_clear(struct ata_port *ap)
1053{
1054	struct nv_adma_port_priv *pp = ap->private_data;
1055	void __iomem *mmio = pp->ctl_block;
1056	u32 notifier_clears[2];
1057
1058	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1059		ata_bmdma_irq_clear(ap);
1060		return;
1061	}
1062
1063	/* clear any outstanding CK804 notifications */
1064	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1065		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1066
1067	/* clear ADMA status */
1068	writew(0xffff, mmio + NV_ADMA_STAT);
1069
1070	/* clear notifiers - note both ports need to be written with
1071	   something even though we are only clearing on one */
1072	if (ap->port_no == 0) {
1073		notifier_clears[0] = 0xFFFFFFFF;
1074		notifier_clears[1] = 0;
1075	} else {
1076		notifier_clears[0] = 0;
1077		notifier_clears[1] = 0xFFFFFFFF;
1078	}
1079	pp = ap->host->ports[0]->private_data;
1080	writel(notifier_clears[0], pp->notifier_clear_block);
1081	pp = ap->host->ports[1]->private_data;
1082	writel(notifier_clears[1], pp->notifier_clear_block);
1083}
1084
1085static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1086{
1087	struct nv_adma_port_priv *pp = qc->ap->private_data;
1088
1089	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1090		ata_bmdma_post_internal_cmd(qc);
1091}
1092
1093static int nv_adma_port_start(struct ata_port *ap)
1094{
1095	struct device *dev = ap->host->dev;
1096	struct nv_adma_port_priv *pp;
1097	int rc;
1098	void *mem;
1099	dma_addr_t mem_dma;
1100	void __iomem *mmio;
1101	struct pci_dev *pdev = to_pci_dev(dev);
1102	u16 tmp;
1103
1104	/*
1105	 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1106	 * pad buffers.
1107	 */
1108	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 
 
 
1109	if (rc)
1110		return rc;
1111
1112	/* we might fallback to bmdma, allocate bmdma resources */
1113	rc = ata_bmdma_port_start(ap);
1114	if (rc)
1115		return rc;
1116
1117	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1118	if (!pp)
1119		return -ENOMEM;
1120
1121	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1122	       ap->port_no * NV_ADMA_PORT_SIZE;
1123	pp->ctl_block = mmio;
1124	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1125	pp->notifier_clear_block = pp->gen_block +
1126	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1127
1128	/*
1129	 * Now that the legacy PRD and padding buffer are allocated we can
1130	 * raise the DMA mask to allocate the CPB/APRD table.
1131	 */
1132	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1133
 
1134	pp->adma_dma_mask = *dev->dma_mask;
1135
1136	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1137				  &mem_dma, GFP_KERNEL);
1138	if (!mem)
1139		return -ENOMEM;
 
1140
1141	/*
1142	 * First item in chunk of DMA memory:
1143	 * 128-byte command parameter block (CPB)
1144	 * one for each command tag
1145	 */
1146	pp->cpb     = mem;
1147	pp->cpb_dma = mem_dma;
1148
1149	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1150	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1151
1152	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1153	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1154
1155	/*
1156	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1157	 */
1158	pp->aprd = mem;
1159	pp->aprd_dma = mem_dma;
1160
1161	ap->private_data = pp;
1162
1163	/* clear any outstanding interrupt conditions */
1164	writew(0xffff, mmio + NV_ADMA_STAT);
1165
1166	/* initialize port variables */
1167	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1168
1169	/* clear CPB fetch count */
1170	writew(0, mmio + NV_ADMA_CPB_COUNT);
1171
1172	/* clear GO for register mode, enable interrupt */
1173	tmp = readw(mmio + NV_ADMA_CTL);
1174	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1175		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1176
1177	tmp = readw(mmio + NV_ADMA_CTL);
1178	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1179	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1180	udelay(1);
1181	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1182	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1183
1184	return 0;
1185}
1186
1187static void nv_adma_port_stop(struct ata_port *ap)
1188{
1189	struct nv_adma_port_priv *pp = ap->private_data;
1190	void __iomem *mmio = pp->ctl_block;
1191
 
1192	writew(0, mmio + NV_ADMA_CTL);
1193}
1194
1195#ifdef CONFIG_PM
1196static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1197{
1198	struct nv_adma_port_priv *pp = ap->private_data;
1199	void __iomem *mmio = pp->ctl_block;
1200
1201	/* Go to register mode - clears GO */
1202	nv_adma_register_mode(ap);
1203
1204	/* clear CPB fetch count */
1205	writew(0, mmio + NV_ADMA_CPB_COUNT);
1206
1207	/* disable interrupt, shut down port */
1208	writew(0, mmio + NV_ADMA_CTL);
1209
1210	return 0;
1211}
1212
1213static int nv_adma_port_resume(struct ata_port *ap)
1214{
1215	struct nv_adma_port_priv *pp = ap->private_data;
1216	void __iomem *mmio = pp->ctl_block;
1217	u16 tmp;
1218
1219	/* set CPB block location */
1220	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1221	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1222
1223	/* clear any outstanding interrupt conditions */
1224	writew(0xffff, mmio + NV_ADMA_STAT);
1225
1226	/* initialize port variables */
1227	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1228
1229	/* clear CPB fetch count */
1230	writew(0, mmio + NV_ADMA_CPB_COUNT);
1231
1232	/* clear GO for register mode, enable interrupt */
1233	tmp = readw(mmio + NV_ADMA_CTL);
1234	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1235		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1236
1237	tmp = readw(mmio + NV_ADMA_CTL);
1238	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1239	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1240	udelay(1);
1241	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1242	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1243
1244	return 0;
1245}
1246#endif
1247
1248static void nv_adma_setup_port(struct ata_port *ap)
1249{
1250	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1251	struct ata_ioports *ioport = &ap->ioaddr;
1252
 
 
1253	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1254
1255	ioport->cmd_addr	= mmio;
1256	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1257	ioport->error_addr	=
1258	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1259	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1260	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1261	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1262	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1263	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1264	ioport->status_addr	=
1265	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1266	ioport->altstatus_addr	=
1267	ioport->ctl_addr	= mmio + 0x20;
1268}
1269
1270static int nv_adma_host_init(struct ata_host *host)
1271{
1272	struct pci_dev *pdev = to_pci_dev(host->dev);
1273	unsigned int i;
1274	u32 tmp32;
1275
 
 
1276	/* enable ADMA on the ports */
1277	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1278	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1279		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1280		 NV_MCP_SATA_CFG_20_PORT1_EN |
1281		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1282
1283	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1284
1285	for (i = 0; i < host->n_ports; i++)
1286		nv_adma_setup_port(host->ports[i]);
1287
1288	return 0;
1289}
1290
1291static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1292			      struct scatterlist *sg,
1293			      int idx,
1294			      struct nv_adma_prd *aprd)
1295{
1296	u8 flags = 0;
1297	if (qc->tf.flags & ATA_TFLAG_WRITE)
1298		flags |= NV_APRD_WRITE;
1299	if (idx == qc->n_elem - 1)
1300		flags |= NV_APRD_END;
1301	else if (idx != 4)
1302		flags |= NV_APRD_CONT;
1303
1304	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1305	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1306	aprd->flags = flags;
1307	aprd->packet_len = 0;
1308}
1309
1310static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1311{
1312	struct nv_adma_port_priv *pp = qc->ap->private_data;
1313	struct nv_adma_prd *aprd;
1314	struct scatterlist *sg;
1315	unsigned int si;
1316
 
 
1317	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1318		aprd = (si < 5) ? &cpb->aprd[si] :
1319			&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1320		nv_adma_fill_aprd(qc, sg, si, aprd);
1321	}
1322	if (si > 5)
1323		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1324	else
1325		cpb->next_aprd = cpu_to_le64(0);
1326}
1327
1328static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1329{
1330	struct nv_adma_port_priv *pp = qc->ap->private_data;
1331
1332	/* ADMA engine can only be used for non-ATAPI DMA commands,
1333	   or interrupt-driven no-data commands. */
1334	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1335	   (qc->tf.flags & ATA_TFLAG_POLLING))
1336		return 1;
1337
1338	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1339	   (qc->tf.protocol == ATA_PROT_NODATA))
1340		return 0;
1341
1342	return 1;
1343}
1344
1345static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1346{
1347	struct nv_adma_port_priv *pp = qc->ap->private_data;
1348	struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1349	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1350		       NV_CPB_CTL_IEN;
1351
1352	if (nv_adma_use_reg_mode(qc)) {
1353		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1354			(qc->flags & ATA_QCFLAG_DMAMAP));
1355		nv_adma_register_mode(qc->ap);
1356		ata_bmdma_qc_prep(qc);
1357		return AC_ERR_OK;
1358	}
1359
1360	cpb->resp_flags = NV_CPB_RESP_DONE;
1361	wmb();
1362	cpb->ctl_flags = 0;
1363	wmb();
1364
1365	cpb->len		= 3;
1366	cpb->tag		= qc->hw_tag;
1367	cpb->next_cpb_idx	= 0;
1368
1369	/* turn on NCQ flags for NCQ commands */
1370	if (qc->tf.protocol == ATA_PROT_NCQ)
1371		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1372
 
 
1373	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1374
1375	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1376		nv_adma_fill_sg(qc, cpb);
1377		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1378	} else
1379		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1380
1381	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1382	   until we are finished filling in all of the contents */
1383	wmb();
1384	cpb->ctl_flags = ctl_flags;
1385	wmb();
1386	cpb->resp_flags = 0;
1387
1388	return AC_ERR_OK;
1389}
1390
1391static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1392{
1393	struct nv_adma_port_priv *pp = qc->ap->private_data;
1394	void __iomem *mmio = pp->ctl_block;
1395	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1396
 
 
1397	/* We can't handle result taskfile with NCQ commands, since
1398	   retrieving the taskfile switches us out of ADMA mode and would abort
1399	   existing commands. */
1400	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1401		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1402		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1403		return AC_ERR_SYSTEM;
1404	}
1405
1406	if (nv_adma_use_reg_mode(qc)) {
1407		/* use ATA register mode */
 
1408		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1409			(qc->flags & ATA_QCFLAG_DMAMAP));
1410		nv_adma_register_mode(qc->ap);
1411		return ata_bmdma_qc_issue(qc);
1412	} else
1413		nv_adma_mode(qc->ap);
1414
1415	/* write append register, command tag in lower 8 bits
1416	   and (number of cpbs to append -1) in top 8 bits */
1417	wmb();
1418
1419	if (curr_ncq != pp->last_issue_ncq) {
1420		/* Seems to need some delay before switching between NCQ and
1421		   non-NCQ commands, else we get command timeouts and such. */
1422		udelay(20);
1423		pp->last_issue_ncq = curr_ncq;
1424	}
1425
1426	writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
 
 
1427
1428	return 0;
1429}
1430
1431static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1432{
1433	struct ata_host *host = dev_instance;
1434	unsigned int i;
1435	unsigned int handled = 0;
1436	unsigned long flags;
1437
1438	spin_lock_irqsave(&host->lock, flags);
1439
1440	for (i = 0; i < host->n_ports; i++) {
1441		struct ata_port *ap = host->ports[i];
1442		struct ata_queued_cmd *qc;
1443
1444		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1445		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1446			handled += ata_bmdma_port_intr(ap, qc);
1447		} else {
1448			/*
1449			 * No request pending?  Clear interrupt status
1450			 * anyway, in case there's one pending.
1451			 */
1452			ap->ops->sff_check_status(ap);
1453		}
1454	}
1455
1456	spin_unlock_irqrestore(&host->lock, flags);
1457
1458	return IRQ_RETVAL(handled);
1459}
1460
1461static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1462{
1463	int i, handled = 0;
1464
1465	for (i = 0; i < host->n_ports; i++) {
1466		handled += nv_host_intr(host->ports[i], irq_stat);
1467		irq_stat >>= NV_INT_PORT_SHIFT;
1468	}
1469
1470	return IRQ_RETVAL(handled);
1471}
1472
1473static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1474{
1475	struct ata_host *host = dev_instance;
1476	u8 irq_stat;
1477	irqreturn_t ret;
1478
1479	spin_lock(&host->lock);
1480	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1481	ret = nv_do_interrupt(host, irq_stat);
1482	spin_unlock(&host->lock);
1483
1484	return ret;
1485}
1486
1487static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1488{
1489	struct ata_host *host = dev_instance;
1490	u8 irq_stat;
1491	irqreturn_t ret;
1492
1493	spin_lock(&host->lock);
1494	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1495	ret = nv_do_interrupt(host, irq_stat);
1496	spin_unlock(&host->lock);
1497
1498	return ret;
1499}
1500
1501static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1502{
1503	if (sc_reg > SCR_CONTROL)
1504		return -EINVAL;
1505
1506	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1507	return 0;
1508}
1509
1510static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1511{
1512	if (sc_reg > SCR_CONTROL)
1513		return -EINVAL;
1514
1515	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1516	return 0;
1517}
1518
1519static int nv_hardreset(struct ata_link *link, unsigned int *class,
1520			unsigned long deadline)
1521{
1522	struct ata_eh_context *ehc = &link->eh_context;
1523
1524	/* Do hardreset iff it's post-boot probing, please read the
1525	 * comment above port ops for details.
1526	 */
1527	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1528	    !ata_dev_enabled(link->device))
1529		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1530				    NULL, NULL);
1531	else {
1532		const unsigned int *timing = sata_ehc_deb_timing(ehc);
1533		int rc;
1534
1535		if (!(ehc->i.flags & ATA_EHI_QUIET))
1536			ata_link_info(link,
1537				      "nv: skipping hardreset on occupied port\n");
1538
1539		/* make sure the link is online */
1540		rc = sata_link_resume(link, timing, deadline);
1541		/* whine about phy resume failure but proceed */
1542		if (rc && rc != -EOPNOTSUPP)
1543			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1544				      rc);
1545	}
1546
1547	/* device signature acquisition is unreliable */
1548	return -EAGAIN;
1549}
1550
1551static void nv_nf2_freeze(struct ata_port *ap)
1552{
1553	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1554	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1555	u8 mask;
1556
1557	mask = ioread8(scr_addr + NV_INT_ENABLE);
1558	mask &= ~(NV_INT_ALL << shift);
1559	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1560}
1561
1562static void nv_nf2_thaw(struct ata_port *ap)
1563{
1564	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1565	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1566	u8 mask;
1567
1568	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1569
1570	mask = ioread8(scr_addr + NV_INT_ENABLE);
1571	mask |= (NV_INT_MASK << shift);
1572	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1573}
1574
1575static void nv_ck804_freeze(struct ata_port *ap)
1576{
1577	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1578	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1579	u8 mask;
1580
1581	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1582	mask &= ~(NV_INT_ALL << shift);
1583	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1584}
1585
1586static void nv_ck804_thaw(struct ata_port *ap)
1587{
1588	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1589	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1590	u8 mask;
1591
1592	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1593
1594	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1595	mask |= (NV_INT_MASK << shift);
1596	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1597}
1598
1599static void nv_mcp55_freeze(struct ata_port *ap)
1600{
1601	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1602	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1603	u32 mask;
1604
1605	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1606
1607	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1608	mask &= ~(NV_INT_ALL_MCP55 << shift);
1609	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1610}
1611
1612static void nv_mcp55_thaw(struct ata_port *ap)
1613{
1614	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1615	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1616	u32 mask;
1617
1618	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1619
1620	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1621	mask |= (NV_INT_MASK_MCP55 << shift);
1622	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1623}
1624
1625static void nv_adma_error_handler(struct ata_port *ap)
1626{
1627	struct nv_adma_port_priv *pp = ap->private_data;
1628	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1629		void __iomem *mmio = pp->ctl_block;
1630		int i;
1631		u16 tmp;
1632
1633		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1634			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1635			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1636			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1637			u32 status = readw(mmio + NV_ADMA_STAT);
1638			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1639			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1640
1641			ata_port_err(ap,
1642				"EH in ADMA mode, notifier 0x%X "
1643				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1644				"next cpb count 0x%X next cpb idx 0x%x\n",
1645				notifier, notifier_error, gen_ctl, status,
1646				cpb_count, next_cpb_idx);
1647
1648			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1649				struct nv_adma_cpb *cpb = &pp->cpb[i];
1650				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1651				    ap->link.sactive & (1 << i))
1652					ata_port_err(ap,
1653						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1654						i, cpb->ctl_flags, cpb->resp_flags);
1655			}
1656		}
1657
1658		/* Push us back into port register mode for error handling. */
1659		nv_adma_register_mode(ap);
1660
1661		/* Mark all of the CPBs as invalid to prevent them from
1662		   being executed */
1663		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1664			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1665
1666		/* clear CPB fetch count */
1667		writew(0, mmio + NV_ADMA_CPB_COUNT);
1668
1669		/* Reset channel */
1670		tmp = readw(mmio + NV_ADMA_CTL);
1671		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1672		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1673		udelay(1);
1674		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1675		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1676	}
1677
1678	ata_bmdma_error_handler(ap);
1679}
1680
1681static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1682{
1683	struct nv_swncq_port_priv *pp = ap->private_data;
1684	struct defer_queue *dq = &pp->defer_queue;
1685
1686	/* queue is full */
1687	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1688	dq->defer_bits |= (1 << qc->hw_tag);
1689	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1690}
1691
1692static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1693{
1694	struct nv_swncq_port_priv *pp = ap->private_data;
1695	struct defer_queue *dq = &pp->defer_queue;
1696	unsigned int tag;
1697
1698	if (dq->head == dq->tail)	/* null queue */
1699		return NULL;
1700
1701	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1702	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1703	WARN_ON(!(dq->defer_bits & (1 << tag)));
1704	dq->defer_bits &= ~(1 << tag);
1705
1706	return ata_qc_from_tag(ap, tag);
1707}
1708
1709static void nv_swncq_fis_reinit(struct ata_port *ap)
1710{
1711	struct nv_swncq_port_priv *pp = ap->private_data;
1712
1713	pp->dhfis_bits = 0;
1714	pp->dmafis_bits = 0;
1715	pp->sdbfis_bits = 0;
1716	pp->ncq_flags = 0;
1717}
1718
1719static void nv_swncq_pp_reinit(struct ata_port *ap)
1720{
1721	struct nv_swncq_port_priv *pp = ap->private_data;
1722	struct defer_queue *dq = &pp->defer_queue;
1723
1724	dq->head = 0;
1725	dq->tail = 0;
1726	dq->defer_bits = 0;
1727	pp->qc_active = 0;
1728	pp->last_issue_tag = ATA_TAG_POISON;
1729	nv_swncq_fis_reinit(ap);
1730}
1731
1732static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1733{
1734	struct nv_swncq_port_priv *pp = ap->private_data;
1735
1736	writew(fis, pp->irq_block);
1737}
1738
1739static void __ata_bmdma_stop(struct ata_port *ap)
1740{
1741	struct ata_queued_cmd qc;
1742
1743	qc.ap = ap;
1744	ata_bmdma_stop(&qc);
1745}
1746
1747static void nv_swncq_ncq_stop(struct ata_port *ap)
1748{
1749	struct nv_swncq_port_priv *pp = ap->private_data;
1750	unsigned int i;
1751	u32 sactive;
1752	u32 done_mask;
1753
1754	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1755		     ap->qc_active, ap->link.sactive);
1756	ata_port_err(ap,
1757		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1758		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1759		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1760		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1761
1762	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1763		     ap->ops->sff_check_status(ap),
1764		     ioread8(ap->ioaddr.error_addr));
1765
1766	sactive = readl(pp->sactive_block);
1767	done_mask = pp->qc_active ^ sactive;
1768
1769	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1770	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1771		u8 err = 0;
1772		if (pp->qc_active & (1 << i))
1773			err = 0;
1774		else if (done_mask & (1 << i))
1775			err = 1;
1776		else
1777			continue;
1778
1779		ata_port_err(ap,
1780			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1781			     (pp->dhfis_bits >> i) & 0x1,
1782			     (pp->dmafis_bits >> i) & 0x1,
1783			     (pp->sdbfis_bits >> i) & 0x1,
1784			     (sactive >> i) & 0x1,
1785			     (err ? "error! tag doesn't exit" : " "));
1786	}
1787
1788	nv_swncq_pp_reinit(ap);
1789	ap->ops->sff_irq_clear(ap);
1790	__ata_bmdma_stop(ap);
1791	nv_swncq_irq_clear(ap, 0xffff);
1792}
1793
1794static void nv_swncq_error_handler(struct ata_port *ap)
1795{
1796	struct ata_eh_context *ehc = &ap->link.eh_context;
1797
1798	if (ap->link.sactive) {
1799		nv_swncq_ncq_stop(ap);
1800		ehc->i.action |= ATA_EH_RESET;
1801	}
1802
1803	ata_bmdma_error_handler(ap);
1804}
1805
1806#ifdef CONFIG_PM
1807static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1808{
1809	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1810	u32 tmp;
1811
1812	/* clear irq */
1813	writel(~0, mmio + NV_INT_STATUS_MCP55);
1814
1815	/* disable irq */
1816	writel(0, mmio + NV_INT_ENABLE_MCP55);
1817
1818	/* disable swncq */
1819	tmp = readl(mmio + NV_CTL_MCP55);
1820	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1821	writel(tmp, mmio + NV_CTL_MCP55);
1822
1823	return 0;
1824}
1825
1826static int nv_swncq_port_resume(struct ata_port *ap)
1827{
1828	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1829	u32 tmp;
1830
1831	/* clear irq */
1832	writel(~0, mmio + NV_INT_STATUS_MCP55);
1833
1834	/* enable irq */
1835	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1836
1837	/* enable swncq */
1838	tmp = readl(mmio + NV_CTL_MCP55);
1839	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1840
1841	return 0;
1842}
1843#endif
1844
1845static void nv_swncq_host_init(struct ata_host *host)
1846{
1847	u32 tmp;
1848	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1849	struct pci_dev *pdev = to_pci_dev(host->dev);
1850	u8 regval;
1851
1852	/* disable  ECO 398 */
1853	pci_read_config_byte(pdev, 0x7f, &regval);
1854	regval &= ~(1 << 7);
1855	pci_write_config_byte(pdev, 0x7f, regval);
1856
1857	/* enable swncq */
1858	tmp = readl(mmio + NV_CTL_MCP55);
1859	dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp);
1860	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1861
1862	/* enable irq intr */
1863	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1864	dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp);
1865	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1866
1867	/*  clear port irq */
1868	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1869}
1870
1871static int nv_swncq_slave_config(struct scsi_device *sdev)
1872{
1873	struct ata_port *ap = ata_shost_to_port(sdev->host);
1874	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1875	struct ata_device *dev;
1876	int rc;
1877	u8 rev;
1878	u8 check_maxtor = 0;
1879	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1880
1881	rc = ata_scsi_slave_config(sdev);
1882	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1883		/* Not a proper libata device, ignore */
1884		return rc;
1885
1886	dev = &ap->link.device[sdev->id];
1887	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1888		return rc;
1889
1890	/* if MCP51 and Maxtor, then disable ncq */
1891	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1892		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1893		check_maxtor = 1;
1894
1895	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1896	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1897		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1898		pci_read_config_byte(pdev, 0x8, &rev);
1899		if (rev <= 0xa2)
1900			check_maxtor = 1;
1901	}
1902
1903	if (!check_maxtor)
1904		return rc;
1905
1906	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1907
1908	if (strncmp(model_num, "Maxtor", 6) == 0) {
1909		ata_scsi_change_queue_depth(sdev, 1);
1910		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1911			       sdev->queue_depth);
1912	}
1913
1914	return rc;
1915}
1916
1917static int nv_swncq_port_start(struct ata_port *ap)
1918{
1919	struct device *dev = ap->host->dev;
1920	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1921	struct nv_swncq_port_priv *pp;
1922	int rc;
1923
1924	/* we might fallback to bmdma, allocate bmdma resources */
1925	rc = ata_bmdma_port_start(ap);
1926	if (rc)
1927		return rc;
1928
1929	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1930	if (!pp)
1931		return -ENOMEM;
1932
1933	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1934				      &pp->prd_dma, GFP_KERNEL);
1935	if (!pp->prd)
1936		return -ENOMEM;
 
1937
1938	ap->private_data = pp;
1939	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1940	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1941	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1942
1943	return 0;
1944}
1945
1946static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1947{
1948	if (qc->tf.protocol != ATA_PROT_NCQ) {
1949		ata_bmdma_qc_prep(qc);
1950		return AC_ERR_OK;
1951	}
1952
1953	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1954		return AC_ERR_OK;
1955
1956	nv_swncq_fill_sg(qc);
1957
1958	return AC_ERR_OK;
1959}
1960
1961static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1962{
1963	struct ata_port *ap = qc->ap;
1964	struct scatterlist *sg;
1965	struct nv_swncq_port_priv *pp = ap->private_data;
1966	struct ata_bmdma_prd *prd;
1967	unsigned int si, idx;
1968
1969	prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1970
1971	idx = 0;
1972	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1973		u32 addr, offset;
1974		u32 sg_len, len;
1975
1976		addr = (u32)sg_dma_address(sg);
1977		sg_len = sg_dma_len(sg);
1978
1979		while (sg_len) {
1980			offset = addr & 0xffff;
1981			len = sg_len;
1982			if ((offset + sg_len) > 0x10000)
1983				len = 0x10000 - offset;
1984
1985			prd[idx].addr = cpu_to_le32(addr);
1986			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1987
1988			idx++;
1989			sg_len -= len;
1990			addr += len;
1991		}
1992	}
1993
1994	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1995}
1996
1997static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1998					  struct ata_queued_cmd *qc)
1999{
2000	struct nv_swncq_port_priv *pp = ap->private_data;
2001
2002	if (qc == NULL)
2003		return 0;
2004
2005	writel((1 << qc->hw_tag), pp->sactive_block);
2006	pp->last_issue_tag = qc->hw_tag;
2007	pp->dhfis_bits &= ~(1 << qc->hw_tag);
2008	pp->dmafis_bits &= ~(1 << qc->hw_tag);
2009	pp->qc_active |= (0x1 << qc->hw_tag);
 
 
2010
2011	trace_ata_tf_load(ap, &qc->tf);
2012	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2013	trace_ata_exec_command(ap, &qc->tf, qc->hw_tag);
2014	ap->ops->sff_exec_command(ap, &qc->tf);
2015
 
 
2016	return 0;
2017}
2018
2019static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2020{
2021	struct ata_port *ap = qc->ap;
2022	struct nv_swncq_port_priv *pp = ap->private_data;
2023
2024	if (qc->tf.protocol != ATA_PROT_NCQ)
2025		return ata_bmdma_qc_issue(qc);
2026
 
 
2027	if (!pp->qc_active)
2028		nv_swncq_issue_atacmd(ap, qc);
2029	else
2030		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2031
2032	return 0;
2033}
2034
2035static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2036{
2037	u32 serror;
2038	struct ata_eh_info *ehi = &ap->link.eh_info;
2039
2040	ata_ehi_clear_desc(ehi);
2041
2042	/* AHCI needs SError cleared; otherwise, it might lock up */
2043	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2044	sata_scr_write(&ap->link, SCR_ERROR, serror);
2045
2046	/* analyze @irq_stat */
2047	if (fis & NV_SWNCQ_IRQ_ADDED)
2048		ata_ehi_push_desc(ehi, "hot plug");
2049	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2050		ata_ehi_push_desc(ehi, "hot unplug");
2051
2052	ata_ehi_hotplugged(ehi);
2053
2054	/* okay, let's hand over to EH */
2055	ehi->serror |= serror;
2056
2057	ata_port_freeze(ap);
2058}
2059
2060static int nv_swncq_sdbfis(struct ata_port *ap)
2061{
2062	struct ata_queued_cmd *qc;
2063	struct nv_swncq_port_priv *pp = ap->private_data;
2064	struct ata_eh_info *ehi = &ap->link.eh_info;
2065	u32 sactive;
2066	u32 done_mask;
2067	u8 host_stat;
2068	u8 lack_dhfis = 0;
2069
2070	host_stat = ap->ops->bmdma_status(ap);
2071	trace_ata_bmdma_status(ap, host_stat);
2072	if (unlikely(host_stat & ATA_DMA_ERR)) {
2073		/* error when transferring data to/from memory */
2074		ata_ehi_clear_desc(ehi);
2075		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2076		ehi->err_mask |= AC_ERR_HOST_BUS;
2077		ehi->action |= ATA_EH_RESET;
2078		return -EINVAL;
2079	}
2080
2081	ap->ops->sff_irq_clear(ap);
2082	__ata_bmdma_stop(ap);
2083
2084	sactive = readl(pp->sactive_block);
2085	done_mask = pp->qc_active ^ sactive;
2086
2087	pp->qc_active &= ~done_mask;
2088	pp->dhfis_bits &= ~done_mask;
2089	pp->dmafis_bits &= ~done_mask;
2090	pp->sdbfis_bits |= done_mask;
2091	ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2092
2093	if (!ap->qc_active) {
2094		ata_port_dbg(ap, "over\n");
2095		nv_swncq_pp_reinit(ap);
2096		return 0;
2097	}
2098
2099	if (pp->qc_active & pp->dhfis_bits)
2100		return 0;
2101
2102	if ((pp->ncq_flags & ncq_saw_backout) ||
2103	    (pp->qc_active ^ pp->dhfis_bits))
2104		/* if the controller can't get a device to host register FIS,
2105		 * The driver needs to reissue the new command.
2106		 */
2107		lack_dhfis = 1;
2108
2109	ata_port_dbg(ap, "QC: qc_active 0x%llx,"
2110		     "SWNCQ:qc_active 0x%X defer_bits %X "
2111		     "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2112		     ap->qc_active, pp->qc_active,
2113		     pp->defer_queue.defer_bits, pp->dhfis_bits,
2114		     pp->dmafis_bits, pp->last_issue_tag);
2115
2116	nv_swncq_fis_reinit(ap);
2117
2118	if (lack_dhfis) {
2119		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2120		nv_swncq_issue_atacmd(ap, qc);
2121		return 0;
2122	}
2123
2124	if (pp->defer_queue.defer_bits) {
2125		/* send deferral queue command */
2126		qc = nv_swncq_qc_from_dq(ap);
2127		WARN_ON(qc == NULL);
2128		nv_swncq_issue_atacmd(ap, qc);
2129	}
2130
2131	return 0;
2132}
2133
2134static inline u32 nv_swncq_tag(struct ata_port *ap)
2135{
2136	struct nv_swncq_port_priv *pp = ap->private_data;
2137	u32 tag;
2138
2139	tag = readb(pp->tag_block) >> 2;
2140	return (tag & 0x1f);
2141}
2142
2143static void nv_swncq_dmafis(struct ata_port *ap)
2144{
2145	struct ata_queued_cmd *qc;
2146	unsigned int rw;
2147	u8 dmactl;
2148	u32 tag;
2149	struct nv_swncq_port_priv *pp = ap->private_data;
2150
2151	__ata_bmdma_stop(ap);
2152	tag = nv_swncq_tag(ap);
2153
2154	ata_port_dbg(ap, "dma setup tag 0x%x\n", tag);
2155	qc = ata_qc_from_tag(ap, tag);
2156
2157	if (unlikely(!qc))
2158		return;
2159
2160	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2161
2162	/* load PRD table addr. */
2163	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2164		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2165
2166	/* specify data direction, triple-check start bit is clear */
2167	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2168	dmactl &= ~ATA_DMA_WR;
2169	if (!rw)
2170		dmactl |= ATA_DMA_WR;
2171
2172	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2173}
2174
2175static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2176{
2177	struct nv_swncq_port_priv *pp = ap->private_data;
2178	struct ata_queued_cmd *qc;
2179	struct ata_eh_info *ehi = &ap->link.eh_info;
2180	u32 serror;
2181	u8 ata_stat;
2182
2183	ata_stat = ap->ops->sff_check_status(ap);
2184	nv_swncq_irq_clear(ap, fis);
2185	if (!fis)
2186		return;
2187
2188	if (ata_port_is_frozen(ap))
2189		return;
2190
2191	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2192		nv_swncq_hotplug(ap, fis);
2193		return;
2194	}
2195
2196	if (!pp->qc_active)
2197		return;
2198
2199	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2200		return;
2201	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2202
2203	if (ata_stat & ATA_ERR) {
2204		ata_ehi_clear_desc(ehi);
2205		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2206		ehi->err_mask |= AC_ERR_DEV;
2207		ehi->serror |= serror;
2208		ehi->action |= ATA_EH_RESET;
2209		ata_port_freeze(ap);
2210		return;
2211	}
2212
2213	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2214		/* If the IRQ is backout, driver must issue
2215		 * the new command again some time later.
2216		 */
2217		pp->ncq_flags |= ncq_saw_backout;
2218	}
2219
2220	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2221		pp->ncq_flags |= ncq_saw_sdb;
2222		ata_port_dbg(ap, "SWNCQ: qc_active 0x%X "
2223			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2224			pp->qc_active, pp->dhfis_bits,
2225			pp->dmafis_bits, readl(pp->sactive_block));
2226		if (nv_swncq_sdbfis(ap) < 0)
2227			goto irq_error;
2228	}
2229
2230	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2231		/* The interrupt indicates the new command
2232		 * was transmitted correctly to the drive.
2233		 */
2234		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2235		pp->ncq_flags |= ncq_saw_d2h;
2236		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2237			ata_ehi_push_desc(ehi, "illegal fis transaction");
2238			ehi->err_mask |= AC_ERR_HSM;
2239			ehi->action |= ATA_EH_RESET;
2240			goto irq_error;
2241		}
2242
2243		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2244		    !(pp->ncq_flags & ncq_saw_dmas)) {
2245			ata_stat = ap->ops->sff_check_status(ap);
2246			if (ata_stat & ATA_BUSY)
2247				goto irq_exit;
2248
2249			if (pp->defer_queue.defer_bits) {
2250				ata_port_dbg(ap, "send next command\n");
2251				qc = nv_swncq_qc_from_dq(ap);
2252				nv_swncq_issue_atacmd(ap, qc);
2253			}
2254		}
2255	}
2256
2257	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2258		/* program the dma controller with appropriate PRD buffers
2259		 * and start the DMA transfer for requested command.
2260		 */
2261		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2262		pp->ncq_flags |= ncq_saw_dmas;
2263		nv_swncq_dmafis(ap);
2264	}
2265
2266irq_exit:
2267	return;
2268irq_error:
2269	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2270	ata_port_freeze(ap);
2271	return;
2272}
2273
2274static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2275{
2276	struct ata_host *host = dev_instance;
2277	unsigned int i;
2278	unsigned int handled = 0;
2279	unsigned long flags;
2280	u32 irq_stat;
2281
2282	spin_lock_irqsave(&host->lock, flags);
2283
2284	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2285
2286	for (i = 0; i < host->n_ports; i++) {
2287		struct ata_port *ap = host->ports[i];
2288
2289		if (ap->link.sactive) {
2290			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2291			handled = 1;
2292		} else {
2293			if (irq_stat)	/* reserve Hotplug */
2294				nv_swncq_irq_clear(ap, 0xfff0);
2295
2296			handled += nv_host_intr(ap, (u8)irq_stat);
2297		}
2298		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2299	}
2300
2301	spin_unlock_irqrestore(&host->lock, flags);
2302
2303	return IRQ_RETVAL(handled);
2304}
2305
2306static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2307{
2308	const struct ata_port_info *ppi[] = { NULL, NULL };
2309	struct nv_pi_priv *ipriv;
2310	struct ata_host *host;
2311	struct nv_host_priv *hpriv;
2312	int rc;
2313	u32 bar;
2314	void __iomem *base;
2315	unsigned long type = ent->driver_data;
2316
2317        // Make sure this is a SATA controller by counting the number of bars
2318        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2319        // it's an IDE controller and we ignore it.
2320	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
2321		if (pci_resource_start(pdev, bar) == 0)
2322			return -ENODEV;
2323
2324	ata_print_version_once(&pdev->dev, DRV_VERSION);
2325
2326	rc = pcim_enable_device(pdev);
2327	if (rc)
2328		return rc;
2329
2330	/* determine type and allocate host */
2331	if (type == CK804 && adma_enabled) {
2332		dev_notice(&pdev->dev, "Using ADMA mode\n");
2333		type = ADMA;
2334	} else if (type == MCP5x && swncq_enabled) {
2335		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2336		type = SWNCQ;
2337	}
2338
2339	ppi[0] = &nv_port_info[type];
2340	ipriv = ppi[0]->private_data;
2341	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2342	if (rc)
2343		return rc;
2344
2345	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2346	if (!hpriv)
2347		return -ENOMEM;
2348	hpriv->type = type;
2349	host->private_data = hpriv;
2350
2351	/* request and iomap NV_MMIO_BAR */
2352	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2353	if (rc)
2354		return rc;
2355
2356	/* configure SCR access */
2357	base = host->iomap[NV_MMIO_BAR];
2358	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2359	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2360
2361	/* enable SATA space for CK804 */
2362	if (type >= CK804) {
2363		u8 regval;
2364
2365		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2366		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2367		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2368	}
2369
2370	/* init ADMA */
2371	if (type == ADMA) {
2372		rc = nv_adma_host_init(host);
2373		if (rc)
2374			return rc;
2375	} else if (type == SWNCQ)
2376		nv_swncq_host_init(host);
2377
2378	if (msi_enabled) {
2379		dev_notice(&pdev->dev, "Using MSI\n");
2380		pci_enable_msi(pdev);
2381	}
2382
2383	pci_set_master(pdev);
2384	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2385}
2386
2387#ifdef CONFIG_PM_SLEEP
2388static int nv_pci_device_resume(struct pci_dev *pdev)
2389{
2390	struct ata_host *host = pci_get_drvdata(pdev);
2391	struct nv_host_priv *hpriv = host->private_data;
2392	int rc;
2393
2394	rc = ata_pci_device_do_resume(pdev);
2395	if (rc)
2396		return rc;
2397
2398	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2399		if (hpriv->type >= CK804) {
2400			u8 regval;
2401
2402			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2403			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2404			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2405		}
2406		if (hpriv->type == ADMA) {
2407			u32 tmp32;
2408			struct nv_adma_port_priv *pp;
2409			/* enable/disable ADMA on the ports appropriately */
2410			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2411
2412			pp = host->ports[0]->private_data;
2413			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2414				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2415					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2416			else
2417				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2418					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2419			pp = host->ports[1]->private_data;
2420			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2421				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2422					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2423			else
2424				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2425					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2426
2427			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2428		}
2429	}
2430
2431	ata_host_resume(host);
2432
2433	return 0;
2434}
2435#endif
2436
2437static void nv_ck804_host_stop(struct ata_host *host)
2438{
2439	struct pci_dev *pdev = to_pci_dev(host->dev);
2440	u8 regval;
2441
2442	/* disable SATA space for CK804 */
2443	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2444	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2445	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2446}
2447
2448static void nv_adma_host_stop(struct ata_host *host)
2449{
2450	struct pci_dev *pdev = to_pci_dev(host->dev);
2451	u32 tmp32;
2452
2453	/* disable ADMA on the ports */
2454	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2455	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2456		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2457		   NV_MCP_SATA_CFG_20_PORT1_EN |
2458		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2459
2460	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2461
2462	nv_ck804_host_stop(host);
2463}
2464
2465module_pci_driver(nv_pci_driver);
 
 
 
2466
 
 
 
 
 
 
 
2467module_param_named(adma, adma_enabled, bool, 0444);
2468MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2469module_param_named(swncq, swncq_enabled, bool, 0444);
2470MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2471module_param_named(msi, msi_enabled, bool, 0444);
2472MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
v3.1
 
   1/*
   2 *  sata_nv.c - NVIDIA nForce SATA
   3 *
   4 *  Copyright 2004 NVIDIA Corp.  All rights reserved.
   5 *  Copyright 2004 Andrew Chew
   6 *
   7 *
   8 *  This program is free software; you can redistribute it and/or modify
   9 *  it under the terms of the GNU General Public License as published by
  10 *  the Free Software Foundation; either version 2, or (at your option)
  11 *  any later version.
  12 *
  13 *  This program is distributed in the hope that it will be useful,
  14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *  GNU General Public License for more details.
  17 *
  18 *  You should have received a copy of the GNU General Public License
  19 *  along with this program; see the file COPYING.  If not, write to
  20 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  21 *
  22 *
  23 *  libata documentation is available via 'make {ps|pdf}docs',
  24 *  as Documentation/DocBook/libata.*
  25 *
  26 *  No hardware documentation available outside of NVIDIA.
  27 *  This driver programs the NVIDIA SATA controller in a similar
  28 *  fashion as with other PCI IDE BMDMA controllers, with a few
  29 *  NV-specific details such as register offsets, SATA phy location,
  30 *  hotplug info, etc.
  31 *
  32 *  CK804/MCP04 controllers support an alternate programming interface
  33 *  similar to the ADMA specification (with some modifications).
  34 *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
  35 *  sent through the legacy interface.
  36 *
  37 */
  38
  39#include <linux/kernel.h>
  40#include <linux/module.h>
  41#include <linux/gfp.h>
  42#include <linux/pci.h>
  43#include <linux/init.h>
  44#include <linux/blkdev.h>
  45#include <linux/delay.h>
  46#include <linux/interrupt.h>
  47#include <linux/device.h>
  48#include <scsi/scsi_host.h>
  49#include <scsi/scsi_device.h>
  50#include <linux/libata.h>
 
  51
  52#define DRV_NAME			"sata_nv"
  53#define DRV_VERSION			"3.5"
  54
  55#define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
  56
  57enum {
  58	NV_MMIO_BAR			= 5,
  59
  60	NV_PORTS			= 2,
  61	NV_PIO_MASK			= ATA_PIO4,
  62	NV_MWDMA_MASK			= ATA_MWDMA2,
  63	NV_UDMA_MASK			= ATA_UDMA6,
  64	NV_PORT0_SCR_REG_OFFSET		= 0x00,
  65	NV_PORT1_SCR_REG_OFFSET		= 0x40,
  66
  67	/* INT_STATUS/ENABLE */
  68	NV_INT_STATUS			= 0x10,
  69	NV_INT_ENABLE			= 0x11,
  70	NV_INT_STATUS_CK804		= 0x440,
  71	NV_INT_ENABLE_CK804		= 0x441,
  72
  73	/* INT_STATUS/ENABLE bits */
  74	NV_INT_DEV			= 0x01,
  75	NV_INT_PM			= 0x02,
  76	NV_INT_ADDED			= 0x04,
  77	NV_INT_REMOVED			= 0x08,
  78
  79	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
  80
  81	NV_INT_ALL			= 0x0f,
  82	NV_INT_MASK			= NV_INT_DEV |
  83					  NV_INT_ADDED | NV_INT_REMOVED,
  84
  85	/* INT_CONFIG */
  86	NV_INT_CONFIG			= 0x12,
  87	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
  88
  89	// For PCI config register 20
  90	NV_MCP_SATA_CFG_20		= 0x50,
  91	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
  92	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
  93	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
  94	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
  95	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
  96
  97	NV_ADMA_MAX_CPBS		= 32,
  98	NV_ADMA_CPB_SZ			= 128,
  99	NV_ADMA_APRD_SZ			= 16,
 100	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
 101					   NV_ADMA_APRD_SZ,
 102	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
 103	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
 104	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
 105					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
 106
 107	/* BAR5 offset to ADMA general registers */
 108	NV_ADMA_GEN			= 0x400,
 109	NV_ADMA_GEN_CTL			= 0x00,
 110	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
 111
 112	/* BAR5 offset to ADMA ports */
 113	NV_ADMA_PORT			= 0x480,
 114
 115	/* size of ADMA port register space  */
 116	NV_ADMA_PORT_SIZE		= 0x100,
 117
 118	/* ADMA port registers */
 119	NV_ADMA_CTL			= 0x40,
 120	NV_ADMA_CPB_COUNT		= 0x42,
 121	NV_ADMA_NEXT_CPB_IDX		= 0x43,
 122	NV_ADMA_STAT			= 0x44,
 123	NV_ADMA_CPB_BASE_LOW		= 0x48,
 124	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
 125	NV_ADMA_APPEND			= 0x50,
 126	NV_ADMA_NOTIFIER		= 0x68,
 127	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
 128
 129	/* NV_ADMA_CTL register bits */
 130	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
 131	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
 132	NV_ADMA_CTL_GO			= (1 << 7),
 133	NV_ADMA_CTL_AIEN		= (1 << 8),
 134	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
 135	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
 136
 137	/* CPB response flag bits */
 138	NV_CPB_RESP_DONE		= (1 << 0),
 139	NV_CPB_RESP_ATA_ERR		= (1 << 3),
 140	NV_CPB_RESP_CMD_ERR		= (1 << 4),
 141	NV_CPB_RESP_CPB_ERR		= (1 << 7),
 142
 143	/* CPB control flag bits */
 144	NV_CPB_CTL_CPB_VALID		= (1 << 0),
 145	NV_CPB_CTL_QUEUE		= (1 << 1),
 146	NV_CPB_CTL_APRD_VALID		= (1 << 2),
 147	NV_CPB_CTL_IEN			= (1 << 3),
 148	NV_CPB_CTL_FPDMA		= (1 << 4),
 149
 150	/* APRD flags */
 151	NV_APRD_WRITE			= (1 << 1),
 152	NV_APRD_END			= (1 << 2),
 153	NV_APRD_CONT			= (1 << 3),
 154
 155	/* NV_ADMA_STAT flags */
 156	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
 157	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
 158	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
 159	NV_ADMA_STAT_CPBERR		= (1 << 4),
 160	NV_ADMA_STAT_SERROR		= (1 << 5),
 161	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
 162	NV_ADMA_STAT_IDLE		= (1 << 8),
 163	NV_ADMA_STAT_LEGACY		= (1 << 9),
 164	NV_ADMA_STAT_STOPPED		= (1 << 10),
 165	NV_ADMA_STAT_DONE		= (1 << 12),
 166	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
 167					  NV_ADMA_STAT_TIMEOUT,
 168
 169	/* port flags */
 170	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
 171	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
 172
 173	/* MCP55 reg offset */
 174	NV_CTL_MCP55			= 0x400,
 175	NV_INT_STATUS_MCP55		= 0x440,
 176	NV_INT_ENABLE_MCP55		= 0x444,
 177	NV_NCQ_REG_MCP55		= 0x448,
 178
 179	/* MCP55 */
 180	NV_INT_ALL_MCP55		= 0xffff,
 181	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
 182	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
 183
 184	/* SWNCQ ENABLE BITS*/
 185	NV_CTL_PRI_SWNCQ		= 0x02,
 186	NV_CTL_SEC_SWNCQ		= 0x04,
 187
 188	/* SW NCQ status bits*/
 189	NV_SWNCQ_IRQ_DEV		= (1 << 0),
 190	NV_SWNCQ_IRQ_PM			= (1 << 1),
 191	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
 192	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
 193
 194	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
 195	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
 196	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
 197	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
 198
 199	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
 200					  NV_SWNCQ_IRQ_REMOVED,
 201
 202};
 203
 204/* ADMA Physical Region Descriptor - one SG segment */
 205struct nv_adma_prd {
 206	__le64			addr;
 207	__le32			len;
 208	u8			flags;
 209	u8			packet_len;
 210	__le16			reserved;
 211};
 212
 213enum nv_adma_regbits {
 214	CMDEND	= (1 << 15),		/* end of command list */
 215	WNB	= (1 << 14),		/* wait-not-BSY */
 216	IGN	= (1 << 13),		/* ignore this entry */
 217	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
 218	DA2	= (1 << (2 + 8)),
 219	DA1	= (1 << (1 + 8)),
 220	DA0	= (1 << (0 + 8)),
 221};
 222
 223/* ADMA Command Parameter Block
 224   The first 5 SG segments are stored inside the Command Parameter Block itself.
 225   If there are more than 5 segments the remainder are stored in a separate
 226   memory area indicated by next_aprd. */
 227struct nv_adma_cpb {
 228	u8			resp_flags;    /* 0 */
 229	u8			reserved1;     /* 1 */
 230	u8			ctl_flags;     /* 2 */
 231	/* len is length of taskfile in 64 bit words */
 232	u8			len;		/* 3  */
 233	u8			tag;           /* 4 */
 234	u8			next_cpb_idx;  /* 5 */
 235	__le16			reserved2;     /* 6-7 */
 236	__le16			tf[12];        /* 8-31 */
 237	struct nv_adma_prd	aprd[5];       /* 32-111 */
 238	__le64			next_aprd;     /* 112-119 */
 239	__le64			reserved3;     /* 120-127 */
 240};
 241
 242
 243struct nv_adma_port_priv {
 244	struct nv_adma_cpb	*cpb;
 245	dma_addr_t		cpb_dma;
 246	struct nv_adma_prd	*aprd;
 247	dma_addr_t		aprd_dma;
 248	void __iomem		*ctl_block;
 249	void __iomem		*gen_block;
 250	void __iomem		*notifier_clear_block;
 251	u64			adma_dma_mask;
 252	u8			flags;
 253	int			last_issue_ncq;
 254};
 255
 256struct nv_host_priv {
 257	unsigned long		type;
 258};
 259
 260struct defer_queue {
 261	u32		defer_bits;
 262	unsigned int	head;
 263	unsigned int	tail;
 264	unsigned int	tag[ATA_MAX_QUEUE];
 265};
 266
 267enum ncq_saw_flag_list {
 268	ncq_saw_d2h	= (1U << 0),
 269	ncq_saw_dmas	= (1U << 1),
 270	ncq_saw_sdb	= (1U << 2),
 271	ncq_saw_backout	= (1U << 3),
 272};
 273
 274struct nv_swncq_port_priv {
 275	struct ata_bmdma_prd *prd;	 /* our SG list */
 276	dma_addr_t	prd_dma; /* and its DMA mapping */
 277	void __iomem	*sactive_block;
 278	void __iomem	*irq_block;
 279	void __iomem	*tag_block;
 280	u32		qc_active;
 281
 282	unsigned int	last_issue_tag;
 283
 284	/* fifo circular queue to store deferral command */
 285	struct defer_queue defer_queue;
 286
 287	/* for NCQ interrupt analysis */
 288	u32		dhfis_bits;
 289	u32		dmafis_bits;
 290	u32		sdbfis_bits;
 291
 292	unsigned int	ncq_flags;
 293};
 294
 295
 296#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
 297
 298static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 299#ifdef CONFIG_PM
 300static int nv_pci_device_resume(struct pci_dev *pdev);
 301#endif
 302static void nv_ck804_host_stop(struct ata_host *host);
 303static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
 304static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
 305static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
 306static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 307static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 308
 309static int nv_hardreset(struct ata_link *link, unsigned int *class,
 310			unsigned long deadline);
 311static void nv_nf2_freeze(struct ata_port *ap);
 312static void nv_nf2_thaw(struct ata_port *ap);
 313static void nv_ck804_freeze(struct ata_port *ap);
 314static void nv_ck804_thaw(struct ata_port *ap);
 315static int nv_adma_slave_config(struct scsi_device *sdev);
 316static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
 317static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
 318static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
 319static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
 320static void nv_adma_irq_clear(struct ata_port *ap);
 321static int nv_adma_port_start(struct ata_port *ap);
 322static void nv_adma_port_stop(struct ata_port *ap);
 323#ifdef CONFIG_PM
 324static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
 325static int nv_adma_port_resume(struct ata_port *ap);
 326#endif
 327static void nv_adma_freeze(struct ata_port *ap);
 328static void nv_adma_thaw(struct ata_port *ap);
 329static void nv_adma_error_handler(struct ata_port *ap);
 330static void nv_adma_host_stop(struct ata_host *host);
 331static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
 332static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 333
 334static void nv_mcp55_thaw(struct ata_port *ap);
 335static void nv_mcp55_freeze(struct ata_port *ap);
 336static void nv_swncq_error_handler(struct ata_port *ap);
 337static int nv_swncq_slave_config(struct scsi_device *sdev);
 338static int nv_swncq_port_start(struct ata_port *ap);
 339static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
 340static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
 341static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
 342static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
 343static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
 344#ifdef CONFIG_PM
 345static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
 346static int nv_swncq_port_resume(struct ata_port *ap);
 347#endif
 348
 349enum nv_host_type
 350{
 351	GENERIC,
 352	NFORCE2,
 353	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
 354	CK804,
 355	ADMA,
 356	MCP5x,
 357	SWNCQ,
 358};
 359
 360static const struct pci_device_id nv_pci_tbl[] = {
 361	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
 362	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
 363	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
 364	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
 365	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
 366	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
 367	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
 368	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
 369	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
 370	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
 371	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
 372	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
 373	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
 374	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
 375
 376	{ } /* terminate list */
 377};
 378
 379static struct pci_driver nv_pci_driver = {
 380	.name			= DRV_NAME,
 381	.id_table		= nv_pci_tbl,
 382	.probe			= nv_init_one,
 383#ifdef CONFIG_PM
 384	.suspend		= ata_pci_device_suspend,
 385	.resume			= nv_pci_device_resume,
 386#endif
 387	.remove			= ata_pci_remove_one,
 388};
 389
 390static struct scsi_host_template nv_sht = {
 391	ATA_BMDMA_SHT(DRV_NAME),
 392};
 393
 394static struct scsi_host_template nv_adma_sht = {
 395	ATA_NCQ_SHT(DRV_NAME),
 396	.can_queue		= NV_ADMA_MAX_CPBS,
 397	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
 398	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
 399	.slave_configure	= nv_adma_slave_config,
 
 
 
 400};
 401
 402static struct scsi_host_template nv_swncq_sht = {
 403	ATA_NCQ_SHT(DRV_NAME),
 404	.can_queue		= ATA_MAX_QUEUE,
 405	.sg_tablesize		= LIBATA_MAX_PRD,
 406	.dma_boundary		= ATA_DMA_BOUNDARY,
 407	.slave_configure	= nv_swncq_slave_config,
 
 
 
 408};
 409
 410/*
 411 * NV SATA controllers have various different problems with hardreset
 412 * protocol depending on the specific controller and device.
 413 *
 414 * GENERIC:
 415 *
 416 *  bko11195 reports that link doesn't come online after hardreset on
 417 *  generic nv's and there have been several other similar reports on
 418 *  linux-ide.
 419 *
 420 *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
 421 *  softreset.
 422 *
 423 * NF2/3:
 424 *
 425 *  bko3352 reports nf2/3 controllers can't determine device signature
 426 *  reliably after hardreset.  The following thread reports detection
 427 *  failure on cold boot with the standard debouncing timing.
 428 *
 429 *  http://thread.gmane.org/gmane.linux.ide/34098
 430 *
 431 *  bko12176 reports that hardreset fails to bring up the link during
 432 *  boot on nf2.
 433 *
 434 * CK804:
 435 *
 436 *  For initial probing after boot and hot plugging, hardreset mostly
 437 *  works fine on CK804 but curiously, reprobing on the initial port
 438 *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
 439 *  FIS in somewhat undeterministic way.
 440 *
 441 * SWNCQ:
 442 *
 443 *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
 444 *  hardreset should be used and hardreset can't report proper
 445 *  signature, which suggests that mcp5x is closer to nf2 as long as
 446 *  reset quirkiness is concerned.
 447 *
 448 *  bko12703 reports that boot probing fails for intel SSD with
 449 *  hardreset.  Link fails to come online.  Softreset works fine.
 450 *
 451 * The failures are varied but the following patterns seem true for
 452 * all flavors.
 453 *
 454 * - Softreset during boot always works.
 455 *
 456 * - Hardreset during boot sometimes fails to bring up the link on
 457 *   certain comibnations and device signature acquisition is
 458 *   unreliable.
 459 *
 460 * - Hardreset is often necessary after hotplug.
 461 *
 462 * So, preferring softreset for boot probing and error handling (as
 463 * hardreset might bring down the link) but using hardreset for
 464 * post-boot probing should work around the above issues in most
 465 * cases.  Define nv_hardreset() which only kicks in for post-boot
 466 * probing and use it for all variants.
 467 */
 468static struct ata_port_operations nv_generic_ops = {
 469	.inherits		= &ata_bmdma_port_ops,
 470	.lost_interrupt		= ATA_OP_NULL,
 471	.scr_read		= nv_scr_read,
 472	.scr_write		= nv_scr_write,
 473	.hardreset		= nv_hardreset,
 474};
 475
 476static struct ata_port_operations nv_nf2_ops = {
 477	.inherits		= &nv_generic_ops,
 478	.freeze			= nv_nf2_freeze,
 479	.thaw			= nv_nf2_thaw,
 480};
 481
 482static struct ata_port_operations nv_ck804_ops = {
 483	.inherits		= &nv_generic_ops,
 484	.freeze			= nv_ck804_freeze,
 485	.thaw			= nv_ck804_thaw,
 486	.host_stop		= nv_ck804_host_stop,
 487};
 488
 489static struct ata_port_operations nv_adma_ops = {
 490	.inherits		= &nv_ck804_ops,
 491
 492	.check_atapi_dma	= nv_adma_check_atapi_dma,
 493	.sff_tf_read		= nv_adma_tf_read,
 494	.qc_defer		= ata_std_qc_defer,
 495	.qc_prep		= nv_adma_qc_prep,
 496	.qc_issue		= nv_adma_qc_issue,
 497	.sff_irq_clear		= nv_adma_irq_clear,
 498
 499	.freeze			= nv_adma_freeze,
 500	.thaw			= nv_adma_thaw,
 501	.error_handler		= nv_adma_error_handler,
 502	.post_internal_cmd	= nv_adma_post_internal_cmd,
 503
 504	.port_start		= nv_adma_port_start,
 505	.port_stop		= nv_adma_port_stop,
 506#ifdef CONFIG_PM
 507	.port_suspend		= nv_adma_port_suspend,
 508	.port_resume		= nv_adma_port_resume,
 509#endif
 510	.host_stop		= nv_adma_host_stop,
 511};
 512
 513static struct ata_port_operations nv_swncq_ops = {
 514	.inherits		= &nv_generic_ops,
 515
 516	.qc_defer		= ata_std_qc_defer,
 517	.qc_prep		= nv_swncq_qc_prep,
 518	.qc_issue		= nv_swncq_qc_issue,
 519
 520	.freeze			= nv_mcp55_freeze,
 521	.thaw			= nv_mcp55_thaw,
 522	.error_handler		= nv_swncq_error_handler,
 523
 524#ifdef CONFIG_PM
 525	.port_suspend		= nv_swncq_port_suspend,
 526	.port_resume		= nv_swncq_port_resume,
 527#endif
 528	.port_start		= nv_swncq_port_start,
 529};
 530
 531struct nv_pi_priv {
 532	irq_handler_t			irq_handler;
 533	struct scsi_host_template	*sht;
 534};
 535
 536#define NV_PI_PRIV(_irq_handler, _sht) \
 537	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
 538
 539static const struct ata_port_info nv_port_info[] = {
 540	/* generic */
 541	{
 542		.flags		= ATA_FLAG_SATA,
 543		.pio_mask	= NV_PIO_MASK,
 544		.mwdma_mask	= NV_MWDMA_MASK,
 545		.udma_mask	= NV_UDMA_MASK,
 546		.port_ops	= &nv_generic_ops,
 547		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 548	},
 549	/* nforce2/3 */
 550	{
 551		.flags		= ATA_FLAG_SATA,
 552		.pio_mask	= NV_PIO_MASK,
 553		.mwdma_mask	= NV_MWDMA_MASK,
 554		.udma_mask	= NV_UDMA_MASK,
 555		.port_ops	= &nv_nf2_ops,
 556		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
 557	},
 558	/* ck804 */
 559	{
 560		.flags		= ATA_FLAG_SATA,
 561		.pio_mask	= NV_PIO_MASK,
 562		.mwdma_mask	= NV_MWDMA_MASK,
 563		.udma_mask	= NV_UDMA_MASK,
 564		.port_ops	= &nv_ck804_ops,
 565		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
 566	},
 567	/* ADMA */
 568	{
 569		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
 570		.pio_mask	= NV_PIO_MASK,
 571		.mwdma_mask	= NV_MWDMA_MASK,
 572		.udma_mask	= NV_UDMA_MASK,
 573		.port_ops	= &nv_adma_ops,
 574		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
 575	},
 576	/* MCP5x */
 577	{
 578		.flags		= ATA_FLAG_SATA,
 579		.pio_mask	= NV_PIO_MASK,
 580		.mwdma_mask	= NV_MWDMA_MASK,
 581		.udma_mask	= NV_UDMA_MASK,
 582		.port_ops	= &nv_generic_ops,
 583		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
 584	},
 585	/* SWNCQ */
 586	{
 587		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
 588		.pio_mask	= NV_PIO_MASK,
 589		.mwdma_mask	= NV_MWDMA_MASK,
 590		.udma_mask	= NV_UDMA_MASK,
 591		.port_ops	= &nv_swncq_ops,
 592		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
 593	},
 594};
 595
 596MODULE_AUTHOR("NVIDIA");
 597MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
 598MODULE_LICENSE("GPL");
 599MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 600MODULE_VERSION(DRV_VERSION);
 601
 602static int adma_enabled;
 603static int swncq_enabled = 1;
 604static int msi_enabled;
 605
 606static void nv_adma_register_mode(struct ata_port *ap)
 607{
 608	struct nv_adma_port_priv *pp = ap->private_data;
 609	void __iomem *mmio = pp->ctl_block;
 610	u16 tmp, status;
 611	int count = 0;
 612
 613	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
 614		return;
 615
 616	status = readw(mmio + NV_ADMA_STAT);
 617	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
 618		ndelay(50);
 619		status = readw(mmio + NV_ADMA_STAT);
 620		count++;
 621	}
 622	if (count == 20)
 623		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
 624			      status);
 625
 626	tmp = readw(mmio + NV_ADMA_CTL);
 627	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 628
 629	count = 0;
 630	status = readw(mmio + NV_ADMA_STAT);
 631	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
 632		ndelay(50);
 633		status = readw(mmio + NV_ADMA_STAT);
 634		count++;
 635	}
 636	if (count == 20)
 637		ata_port_warn(ap,
 638			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
 639			      status);
 640
 641	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
 642}
 643
 644static void nv_adma_mode(struct ata_port *ap)
 645{
 646	struct nv_adma_port_priv *pp = ap->private_data;
 647	void __iomem *mmio = pp->ctl_block;
 648	u16 tmp, status;
 649	int count = 0;
 650
 651	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
 652		return;
 653
 654	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 655
 656	tmp = readw(mmio + NV_ADMA_CTL);
 657	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 658
 659	status = readw(mmio + NV_ADMA_STAT);
 660	while (((status & NV_ADMA_STAT_LEGACY) ||
 661	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
 662		ndelay(50);
 663		status = readw(mmio + NV_ADMA_STAT);
 664		count++;
 665	}
 666	if (count == 20)
 667		ata_port_warn(ap,
 668			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
 669			status);
 670
 671	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
 672}
 673
 674static int nv_adma_slave_config(struct scsi_device *sdev)
 675{
 676	struct ata_port *ap = ata_shost_to_port(sdev->host);
 677	struct nv_adma_port_priv *pp = ap->private_data;
 678	struct nv_adma_port_priv *port0, *port1;
 679	struct scsi_device *sdev0, *sdev1;
 680	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 681	unsigned long segment_boundary, flags;
 682	unsigned short sg_tablesize;
 683	int rc;
 684	int adma_enable;
 685	u32 current_reg, new_reg, config_mask;
 686
 687	rc = ata_scsi_slave_config(sdev);
 688
 689	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
 690		/* Not a proper libata device, ignore */
 691		return rc;
 692
 693	spin_lock_irqsave(ap->lock, flags);
 694
 695	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
 696		/*
 697		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
 698		 * Therefore ATAPI commands are sent through the legacy interface.
 699		 * However, the legacy interface only supports 32-bit DMA.
 700		 * Restrict DMA parameters as required by the legacy interface
 701		 * when an ATAPI device is connected.
 702		 */
 703		segment_boundary = ATA_DMA_BOUNDARY;
 704		/* Subtract 1 since an extra entry may be needed for padding, see
 705		   libata-scsi.c */
 706		sg_tablesize = LIBATA_MAX_PRD - 1;
 707
 708		/* Since the legacy DMA engine is in use, we need to disable ADMA
 709		   on the port. */
 710		adma_enable = 0;
 711		nv_adma_register_mode(ap);
 712	} else {
 713		segment_boundary = NV_ADMA_DMA_BOUNDARY;
 714		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
 715		adma_enable = 1;
 716	}
 717
 718	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
 719
 720	if (ap->port_no == 1)
 721		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
 722			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
 723	else
 724		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
 725			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
 726
 727	if (adma_enable) {
 728		new_reg = current_reg | config_mask;
 729		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
 730	} else {
 731		new_reg = current_reg & ~config_mask;
 732		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
 733	}
 734
 735	if (current_reg != new_reg)
 736		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
 737
 738	port0 = ap->host->ports[0]->private_data;
 739	port1 = ap->host->ports[1]->private_data;
 740	sdev0 = ap->host->ports[0]->link.device[0].sdev;
 741	sdev1 = ap->host->ports[1]->link.device[0].sdev;
 742	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
 743	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
 744		/** We have to set the DMA mask to 32-bit if either port is in
 745		    ATAPI mode, since they are on the same PCI device which is
 746		    used for DMA mapping. If we set the mask we also need to set
 747		    the bounce limit on both ports to ensure that the block
 748		    layer doesn't feed addresses that cause DMA mapping to
 749		    choke. If either SCSI device is not allocated yet, it's OK
 750		    since that port will discover its correct setting when it
 751		    does get allocated.
 752		    Note: Setting 32-bit mask should not fail. */
 753		if (sdev0)
 754			blk_queue_bounce_limit(sdev0->request_queue,
 755					       ATA_DMA_MASK);
 756		if (sdev1)
 757			blk_queue_bounce_limit(sdev1->request_queue,
 758					       ATA_DMA_MASK);
 759
 760		pci_set_dma_mask(pdev, ATA_DMA_MASK);
 761	} else {
 762		/** This shouldn't fail as it was set to this value before */
 763		pci_set_dma_mask(pdev, pp->adma_dma_mask);
 764		if (sdev0)
 765			blk_queue_bounce_limit(sdev0->request_queue,
 766					       pp->adma_dma_mask);
 767		if (sdev1)
 768			blk_queue_bounce_limit(sdev1->request_queue,
 769					       pp->adma_dma_mask);
 770	}
 771
 772	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
 773	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
 774	ata_port_info(ap,
 775		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
 776		      (unsigned long long)*ap->host->dev->dma_mask,
 777		      segment_boundary, sg_tablesize);
 778
 779	spin_unlock_irqrestore(ap->lock, flags);
 780
 781	return rc;
 782}
 783
 784static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
 785{
 786	struct nv_adma_port_priv *pp = qc->ap->private_data;
 787	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
 788}
 789
 790static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 791{
 792	/* Other than when internal or pass-through commands are executed,
 793	   the only time this function will be called in ADMA mode will be
 794	   if a command fails. In the failure case we don't care about going
 795	   into register mode with ADMA commands pending, as the commands will
 796	   all shortly be aborted anyway. We assume that NCQ commands are not
 797	   issued via passthrough, which is the only way that switching into
 798	   ADMA mode could abort outstanding commands. */
 799	nv_adma_register_mode(ap);
 800
 801	ata_sff_tf_read(ap, tf);
 802}
 803
 804static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
 805{
 806	unsigned int idx = 0;
 807
 808	if (tf->flags & ATA_TFLAG_ISADDR) {
 809		if (tf->flags & ATA_TFLAG_LBA48) {
 810			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
 811			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
 812			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
 813			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
 814			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
 815			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
 816		} else
 817			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
 818
 819		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
 820		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
 821		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
 822		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
 823	}
 824
 825	if (tf->flags & ATA_TFLAG_DEVICE)
 826		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
 827
 828	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
 829
 830	while (idx < 12)
 831		cpb[idx++] = cpu_to_le16(IGN);
 832
 833	return idx;
 834}
 835
 836static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 837{
 838	struct nv_adma_port_priv *pp = ap->private_data;
 839	u8 flags = pp->cpb[cpb_num].resp_flags;
 840
 841	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
 842
 843	if (unlikely((force_err ||
 844		     flags & (NV_CPB_RESP_ATA_ERR |
 845			      NV_CPB_RESP_CMD_ERR |
 846			      NV_CPB_RESP_CPB_ERR)))) {
 847		struct ata_eh_info *ehi = &ap->link.eh_info;
 848		int freeze = 0;
 849
 850		ata_ehi_clear_desc(ehi);
 851		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
 852		if (flags & NV_CPB_RESP_ATA_ERR) {
 853			ata_ehi_push_desc(ehi, "ATA error");
 854			ehi->err_mask |= AC_ERR_DEV;
 855		} else if (flags & NV_CPB_RESP_CMD_ERR) {
 856			ata_ehi_push_desc(ehi, "CMD error");
 857			ehi->err_mask |= AC_ERR_DEV;
 858		} else if (flags & NV_CPB_RESP_CPB_ERR) {
 859			ata_ehi_push_desc(ehi, "CPB error");
 860			ehi->err_mask |= AC_ERR_SYSTEM;
 861			freeze = 1;
 862		} else {
 863			/* notifier error, but no error in CPB flags? */
 864			ata_ehi_push_desc(ehi, "unknown");
 865			ehi->err_mask |= AC_ERR_OTHER;
 866			freeze = 1;
 867		}
 868		/* Kill all commands. EH will determine what actually failed. */
 869		if (freeze)
 870			ata_port_freeze(ap);
 871		else
 872			ata_port_abort(ap);
 873		return -1;
 874	}
 875
 876	if (likely(flags & NV_CPB_RESP_DONE))
 877		return 1;
 878	return 0;
 879}
 880
 881static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
 882{
 883	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 884
 885	/* freeze if hotplugged */
 886	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
 887		ata_port_freeze(ap);
 888		return 1;
 889	}
 890
 891	/* bail out if not our interrupt */
 892	if (!(irq_stat & NV_INT_DEV))
 893		return 0;
 894
 895	/* DEV interrupt w/ no active qc? */
 896	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 897		ata_sff_check_status(ap);
 898		return 1;
 899	}
 900
 901	/* handle interrupt */
 902	return ata_bmdma_port_intr(ap, qc);
 903}
 904
 905static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
 906{
 907	struct ata_host *host = dev_instance;
 908	int i, handled = 0;
 909	u32 notifier_clears[2];
 910
 911	spin_lock(&host->lock);
 912
 913	for (i = 0; i < host->n_ports; i++) {
 914		struct ata_port *ap = host->ports[i];
 915		struct nv_adma_port_priv *pp = ap->private_data;
 916		void __iomem *mmio = pp->ctl_block;
 917		u16 status;
 918		u32 gen_ctl;
 919		u32 notifier, notifier_error;
 920
 921		notifier_clears[i] = 0;
 922
 923		/* if ADMA is disabled, use standard ata interrupt handler */
 924		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
 925			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 926				>> (NV_INT_PORT_SHIFT * i);
 927			handled += nv_host_intr(ap, irq_stat);
 928			continue;
 929		}
 930
 931		/* if in ATA register mode, check for standard interrupts */
 932		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
 933			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 934				>> (NV_INT_PORT_SHIFT * i);
 935			if (ata_tag_valid(ap->link.active_tag))
 936				/** NV_INT_DEV indication seems unreliable
 937				    at times at least in ADMA mode. Force it
 938				    on always when a command is active, to
 939				    prevent losing interrupts. */
 940				irq_stat |= NV_INT_DEV;
 941			handled += nv_host_intr(ap, irq_stat);
 942		}
 943
 944		notifier = readl(mmio + NV_ADMA_NOTIFIER);
 945		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
 946		notifier_clears[i] = notifier | notifier_error;
 947
 948		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
 949
 950		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
 951		    !notifier_error)
 952			/* Nothing to do */
 953			continue;
 954
 955		status = readw(mmio + NV_ADMA_STAT);
 956
 957		/*
 958		 * Clear status. Ensure the controller sees the
 959		 * clearing before we start looking at any of the CPB
 960		 * statuses, so that any CPB completions after this
 961		 * point in the handler will raise another interrupt.
 962		 */
 963		writew(status, mmio + NV_ADMA_STAT);
 964		readw(mmio + NV_ADMA_STAT); /* flush posted write */
 965		rmb();
 966
 967		handled++; /* irq handled if we got here */
 968
 969		/* freeze if hotplugged or controller error */
 970		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
 971				       NV_ADMA_STAT_HOTUNPLUG |
 972				       NV_ADMA_STAT_TIMEOUT |
 973				       NV_ADMA_STAT_SERROR))) {
 974			struct ata_eh_info *ehi = &ap->link.eh_info;
 975
 976			ata_ehi_clear_desc(ehi);
 977			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
 978			if (status & NV_ADMA_STAT_TIMEOUT) {
 979				ehi->err_mask |= AC_ERR_SYSTEM;
 980				ata_ehi_push_desc(ehi, "timeout");
 981			} else if (status & NV_ADMA_STAT_HOTPLUG) {
 982				ata_ehi_hotplugged(ehi);
 983				ata_ehi_push_desc(ehi, "hotplug");
 984			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
 985				ata_ehi_hotplugged(ehi);
 986				ata_ehi_push_desc(ehi, "hot unplug");
 987			} else if (status & NV_ADMA_STAT_SERROR) {
 988				/* let EH analyze SError and figure out cause */
 989				ata_ehi_push_desc(ehi, "SError");
 990			} else
 991				ata_ehi_push_desc(ehi, "unknown");
 992			ata_port_freeze(ap);
 993			continue;
 994		}
 995
 996		if (status & (NV_ADMA_STAT_DONE |
 997			      NV_ADMA_STAT_CPBERR |
 998			      NV_ADMA_STAT_CMD_COMPLETE)) {
 999			u32 check_commands = notifier_clears[i];
1000			u32 done_mask = 0;
1001			int pos, rc;
1002
1003			if (status & NV_ADMA_STAT_CPBERR) {
1004				/* check all active commands */
1005				if (ata_tag_valid(ap->link.active_tag))
1006					check_commands = 1 <<
1007						ap->link.active_tag;
1008				else
1009					check_commands = ap->link.sactive;
1010			}
1011
1012			/* check CPBs for completed commands */
1013			while ((pos = ffs(check_commands))) {
1014				pos--;
1015				rc = nv_adma_check_cpb(ap, pos,
1016						notifier_error & (1 << pos));
1017				if (rc > 0)
1018					done_mask |= 1 << pos;
1019				else if (unlikely(rc < 0))
1020					check_commands = 0;
1021				check_commands &= ~(1 << pos);
1022			}
1023			ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1024		}
1025	}
1026
1027	if (notifier_clears[0] || notifier_clears[1]) {
1028		/* Note: Both notifier clear registers must be written
1029		   if either is set, even if one is zero, according to NVIDIA. */
1030		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1031		writel(notifier_clears[0], pp->notifier_clear_block);
1032		pp = host->ports[1]->private_data;
1033		writel(notifier_clears[1], pp->notifier_clear_block);
1034	}
1035
1036	spin_unlock(&host->lock);
1037
1038	return IRQ_RETVAL(handled);
1039}
1040
1041static void nv_adma_freeze(struct ata_port *ap)
1042{
1043	struct nv_adma_port_priv *pp = ap->private_data;
1044	void __iomem *mmio = pp->ctl_block;
1045	u16 tmp;
1046
1047	nv_ck804_freeze(ap);
1048
1049	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1050		return;
1051
1052	/* clear any outstanding CK804 notifications */
1053	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1054		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1055
1056	/* Disable interrupt */
1057	tmp = readw(mmio + NV_ADMA_CTL);
1058	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1059		mmio + NV_ADMA_CTL);
1060	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1061}
1062
1063static void nv_adma_thaw(struct ata_port *ap)
1064{
1065	struct nv_adma_port_priv *pp = ap->private_data;
1066	void __iomem *mmio = pp->ctl_block;
1067	u16 tmp;
1068
1069	nv_ck804_thaw(ap);
1070
1071	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1072		return;
1073
1074	/* Enable interrupt */
1075	tmp = readw(mmio + NV_ADMA_CTL);
1076	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1077		mmio + NV_ADMA_CTL);
1078	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1079}
1080
1081static void nv_adma_irq_clear(struct ata_port *ap)
1082{
1083	struct nv_adma_port_priv *pp = ap->private_data;
1084	void __iomem *mmio = pp->ctl_block;
1085	u32 notifier_clears[2];
1086
1087	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1088		ata_bmdma_irq_clear(ap);
1089		return;
1090	}
1091
1092	/* clear any outstanding CK804 notifications */
1093	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1094		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1095
1096	/* clear ADMA status */
1097	writew(0xffff, mmio + NV_ADMA_STAT);
1098
1099	/* clear notifiers - note both ports need to be written with
1100	   something even though we are only clearing on one */
1101	if (ap->port_no == 0) {
1102		notifier_clears[0] = 0xFFFFFFFF;
1103		notifier_clears[1] = 0;
1104	} else {
1105		notifier_clears[0] = 0;
1106		notifier_clears[1] = 0xFFFFFFFF;
1107	}
1108	pp = ap->host->ports[0]->private_data;
1109	writel(notifier_clears[0], pp->notifier_clear_block);
1110	pp = ap->host->ports[1]->private_data;
1111	writel(notifier_clears[1], pp->notifier_clear_block);
1112}
1113
1114static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1115{
1116	struct nv_adma_port_priv *pp = qc->ap->private_data;
1117
1118	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1119		ata_bmdma_post_internal_cmd(qc);
1120}
1121
1122static int nv_adma_port_start(struct ata_port *ap)
1123{
1124	struct device *dev = ap->host->dev;
1125	struct nv_adma_port_priv *pp;
1126	int rc;
1127	void *mem;
1128	dma_addr_t mem_dma;
1129	void __iomem *mmio;
1130	struct pci_dev *pdev = to_pci_dev(dev);
1131	u16 tmp;
1132
1133	VPRINTK("ENTER\n");
1134
1135	/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1136	   pad buffers */
1137	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1138	if (rc)
1139		return rc;
1140	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1141	if (rc)
1142		return rc;
1143
1144	/* we might fallback to bmdma, allocate bmdma resources */
1145	rc = ata_bmdma_port_start(ap);
1146	if (rc)
1147		return rc;
1148
1149	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1150	if (!pp)
1151		return -ENOMEM;
1152
1153	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1154	       ap->port_no * NV_ADMA_PORT_SIZE;
1155	pp->ctl_block = mmio;
1156	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1157	pp->notifier_clear_block = pp->gen_block +
1158	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1159
1160	/* Now that the legacy PRD and padding buffer are allocated we can
1161	   safely raise the DMA mask to allocate the CPB/APRD table.
1162	   These are allowed to fail since we store the value that ends up
1163	   being used to set as the bounce limit in slave_config later if
1164	   needed. */
1165	pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1166	pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1167	pp->adma_dma_mask = *dev->dma_mask;
1168
1169	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1170				  &mem_dma, GFP_KERNEL);
1171	if (!mem)
1172		return -ENOMEM;
1173	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1174
1175	/*
1176	 * First item in chunk of DMA memory:
1177	 * 128-byte command parameter block (CPB)
1178	 * one for each command tag
1179	 */
1180	pp->cpb     = mem;
1181	pp->cpb_dma = mem_dma;
1182
1183	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1184	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1185
1186	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1187	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1188
1189	/*
1190	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1191	 */
1192	pp->aprd = mem;
1193	pp->aprd_dma = mem_dma;
1194
1195	ap->private_data = pp;
1196
1197	/* clear any outstanding interrupt conditions */
1198	writew(0xffff, mmio + NV_ADMA_STAT);
1199
1200	/* initialize port variables */
1201	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1202
1203	/* clear CPB fetch count */
1204	writew(0, mmio + NV_ADMA_CPB_COUNT);
1205
1206	/* clear GO for register mode, enable interrupt */
1207	tmp = readw(mmio + NV_ADMA_CTL);
1208	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1209		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1210
1211	tmp = readw(mmio + NV_ADMA_CTL);
1212	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1213	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1214	udelay(1);
1215	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1216	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1217
1218	return 0;
1219}
1220
1221static void nv_adma_port_stop(struct ata_port *ap)
1222{
1223	struct nv_adma_port_priv *pp = ap->private_data;
1224	void __iomem *mmio = pp->ctl_block;
1225
1226	VPRINTK("ENTER\n");
1227	writew(0, mmio + NV_ADMA_CTL);
1228}
1229
1230#ifdef CONFIG_PM
1231static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1232{
1233	struct nv_adma_port_priv *pp = ap->private_data;
1234	void __iomem *mmio = pp->ctl_block;
1235
1236	/* Go to register mode - clears GO */
1237	nv_adma_register_mode(ap);
1238
1239	/* clear CPB fetch count */
1240	writew(0, mmio + NV_ADMA_CPB_COUNT);
1241
1242	/* disable interrupt, shut down port */
1243	writew(0, mmio + NV_ADMA_CTL);
1244
1245	return 0;
1246}
1247
1248static int nv_adma_port_resume(struct ata_port *ap)
1249{
1250	struct nv_adma_port_priv *pp = ap->private_data;
1251	void __iomem *mmio = pp->ctl_block;
1252	u16 tmp;
1253
1254	/* set CPB block location */
1255	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1256	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1257
1258	/* clear any outstanding interrupt conditions */
1259	writew(0xffff, mmio + NV_ADMA_STAT);
1260
1261	/* initialize port variables */
1262	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1263
1264	/* clear CPB fetch count */
1265	writew(0, mmio + NV_ADMA_CPB_COUNT);
1266
1267	/* clear GO for register mode, enable interrupt */
1268	tmp = readw(mmio + NV_ADMA_CTL);
1269	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1270		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1271
1272	tmp = readw(mmio + NV_ADMA_CTL);
1273	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1274	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1275	udelay(1);
1276	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1277	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1278
1279	return 0;
1280}
1281#endif
1282
1283static void nv_adma_setup_port(struct ata_port *ap)
1284{
1285	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1286	struct ata_ioports *ioport = &ap->ioaddr;
1287
1288	VPRINTK("ENTER\n");
1289
1290	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1291
1292	ioport->cmd_addr	= mmio;
1293	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1294	ioport->error_addr	=
1295	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1296	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1297	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1298	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1299	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1300	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1301	ioport->status_addr	=
1302	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1303	ioport->altstatus_addr	=
1304	ioport->ctl_addr	= mmio + 0x20;
1305}
1306
1307static int nv_adma_host_init(struct ata_host *host)
1308{
1309	struct pci_dev *pdev = to_pci_dev(host->dev);
1310	unsigned int i;
1311	u32 tmp32;
1312
1313	VPRINTK("ENTER\n");
1314
1315	/* enable ADMA on the ports */
1316	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1317	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1318		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1319		 NV_MCP_SATA_CFG_20_PORT1_EN |
1320		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1321
1322	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1323
1324	for (i = 0; i < host->n_ports; i++)
1325		nv_adma_setup_port(host->ports[i]);
1326
1327	return 0;
1328}
1329
1330static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1331			      struct scatterlist *sg,
1332			      int idx,
1333			      struct nv_adma_prd *aprd)
1334{
1335	u8 flags = 0;
1336	if (qc->tf.flags & ATA_TFLAG_WRITE)
1337		flags |= NV_APRD_WRITE;
1338	if (idx == qc->n_elem - 1)
1339		flags |= NV_APRD_END;
1340	else if (idx != 4)
1341		flags |= NV_APRD_CONT;
1342
1343	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1344	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1345	aprd->flags = flags;
1346	aprd->packet_len = 0;
1347}
1348
1349static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1350{
1351	struct nv_adma_port_priv *pp = qc->ap->private_data;
1352	struct nv_adma_prd *aprd;
1353	struct scatterlist *sg;
1354	unsigned int si;
1355
1356	VPRINTK("ENTER\n");
1357
1358	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1359		aprd = (si < 5) ? &cpb->aprd[si] :
1360			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1361		nv_adma_fill_aprd(qc, sg, si, aprd);
1362	}
1363	if (si > 5)
1364		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1365	else
1366		cpb->next_aprd = cpu_to_le64(0);
1367}
1368
1369static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1370{
1371	struct nv_adma_port_priv *pp = qc->ap->private_data;
1372
1373	/* ADMA engine can only be used for non-ATAPI DMA commands,
1374	   or interrupt-driven no-data commands. */
1375	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1376	   (qc->tf.flags & ATA_TFLAG_POLLING))
1377		return 1;
1378
1379	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1380	   (qc->tf.protocol == ATA_PROT_NODATA))
1381		return 0;
1382
1383	return 1;
1384}
1385
1386static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1387{
1388	struct nv_adma_port_priv *pp = qc->ap->private_data;
1389	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1390	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1391		       NV_CPB_CTL_IEN;
1392
1393	if (nv_adma_use_reg_mode(qc)) {
1394		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1395			(qc->flags & ATA_QCFLAG_DMAMAP));
1396		nv_adma_register_mode(qc->ap);
1397		ata_bmdma_qc_prep(qc);
1398		return;
1399	}
1400
1401	cpb->resp_flags = NV_CPB_RESP_DONE;
1402	wmb();
1403	cpb->ctl_flags = 0;
1404	wmb();
1405
1406	cpb->len		= 3;
1407	cpb->tag		= qc->tag;
1408	cpb->next_cpb_idx	= 0;
1409
1410	/* turn on NCQ flags for NCQ commands */
1411	if (qc->tf.protocol == ATA_PROT_NCQ)
1412		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1413
1414	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1415
1416	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1417
1418	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1419		nv_adma_fill_sg(qc, cpb);
1420		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1421	} else
1422		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1423
1424	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1425	   until we are finished filling in all of the contents */
1426	wmb();
1427	cpb->ctl_flags = ctl_flags;
1428	wmb();
1429	cpb->resp_flags = 0;
 
 
1430}
1431
1432static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1433{
1434	struct nv_adma_port_priv *pp = qc->ap->private_data;
1435	void __iomem *mmio = pp->ctl_block;
1436	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1437
1438	VPRINTK("ENTER\n");
1439
1440	/* We can't handle result taskfile with NCQ commands, since
1441	   retrieving the taskfile switches us out of ADMA mode and would abort
1442	   existing commands. */
1443	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1444		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1445		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1446		return AC_ERR_SYSTEM;
1447	}
1448
1449	if (nv_adma_use_reg_mode(qc)) {
1450		/* use ATA register mode */
1451		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1452		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1453			(qc->flags & ATA_QCFLAG_DMAMAP));
1454		nv_adma_register_mode(qc->ap);
1455		return ata_bmdma_qc_issue(qc);
1456	} else
1457		nv_adma_mode(qc->ap);
1458
1459	/* write append register, command tag in lower 8 bits
1460	   and (number of cpbs to append -1) in top 8 bits */
1461	wmb();
1462
1463	if (curr_ncq != pp->last_issue_ncq) {
1464		/* Seems to need some delay before switching between NCQ and
1465		   non-NCQ commands, else we get command timeouts and such. */
1466		udelay(20);
1467		pp->last_issue_ncq = curr_ncq;
1468	}
1469
1470	writew(qc->tag, mmio + NV_ADMA_APPEND);
1471
1472	DPRINTK("Issued tag %u\n", qc->tag);
1473
1474	return 0;
1475}
1476
1477static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1478{
1479	struct ata_host *host = dev_instance;
1480	unsigned int i;
1481	unsigned int handled = 0;
1482	unsigned long flags;
1483
1484	spin_lock_irqsave(&host->lock, flags);
1485
1486	for (i = 0; i < host->n_ports; i++) {
1487		struct ata_port *ap = host->ports[i];
1488		struct ata_queued_cmd *qc;
1489
1490		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1491		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1492			handled += ata_bmdma_port_intr(ap, qc);
1493		} else {
1494			/*
1495			 * No request pending?  Clear interrupt status
1496			 * anyway, in case there's one pending.
1497			 */
1498			ap->ops->sff_check_status(ap);
1499		}
1500	}
1501
1502	spin_unlock_irqrestore(&host->lock, flags);
1503
1504	return IRQ_RETVAL(handled);
1505}
1506
1507static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1508{
1509	int i, handled = 0;
1510
1511	for (i = 0; i < host->n_ports; i++) {
1512		handled += nv_host_intr(host->ports[i], irq_stat);
1513		irq_stat >>= NV_INT_PORT_SHIFT;
1514	}
1515
1516	return IRQ_RETVAL(handled);
1517}
1518
1519static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1520{
1521	struct ata_host *host = dev_instance;
1522	u8 irq_stat;
1523	irqreturn_t ret;
1524
1525	spin_lock(&host->lock);
1526	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1527	ret = nv_do_interrupt(host, irq_stat);
1528	spin_unlock(&host->lock);
1529
1530	return ret;
1531}
1532
1533static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1534{
1535	struct ata_host *host = dev_instance;
1536	u8 irq_stat;
1537	irqreturn_t ret;
1538
1539	spin_lock(&host->lock);
1540	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1541	ret = nv_do_interrupt(host, irq_stat);
1542	spin_unlock(&host->lock);
1543
1544	return ret;
1545}
1546
1547static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1548{
1549	if (sc_reg > SCR_CONTROL)
1550		return -EINVAL;
1551
1552	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1553	return 0;
1554}
1555
1556static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1557{
1558	if (sc_reg > SCR_CONTROL)
1559		return -EINVAL;
1560
1561	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1562	return 0;
1563}
1564
1565static int nv_hardreset(struct ata_link *link, unsigned int *class,
1566			unsigned long deadline)
1567{
1568	struct ata_eh_context *ehc = &link->eh_context;
1569
1570	/* Do hardreset iff it's post-boot probing, please read the
1571	 * comment above port ops for details.
1572	 */
1573	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1574	    !ata_dev_enabled(link->device))
1575		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1576				    NULL, NULL);
1577	else {
1578		const unsigned long *timing = sata_ehc_deb_timing(ehc);
1579		int rc;
1580
1581		if (!(ehc->i.flags & ATA_EHI_QUIET))
1582			ata_link_info(link,
1583				      "nv: skipping hardreset on occupied port\n");
1584
1585		/* make sure the link is online */
1586		rc = sata_link_resume(link, timing, deadline);
1587		/* whine about phy resume failure but proceed */
1588		if (rc && rc != -EOPNOTSUPP)
1589			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1590				      rc);
1591	}
1592
1593	/* device signature acquisition is unreliable */
1594	return -EAGAIN;
1595}
1596
1597static void nv_nf2_freeze(struct ata_port *ap)
1598{
1599	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1600	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1601	u8 mask;
1602
1603	mask = ioread8(scr_addr + NV_INT_ENABLE);
1604	mask &= ~(NV_INT_ALL << shift);
1605	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1606}
1607
1608static void nv_nf2_thaw(struct ata_port *ap)
1609{
1610	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1611	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1612	u8 mask;
1613
1614	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1615
1616	mask = ioread8(scr_addr + NV_INT_ENABLE);
1617	mask |= (NV_INT_MASK << shift);
1618	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1619}
1620
1621static void nv_ck804_freeze(struct ata_port *ap)
1622{
1623	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1624	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1625	u8 mask;
1626
1627	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1628	mask &= ~(NV_INT_ALL << shift);
1629	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1630}
1631
1632static void nv_ck804_thaw(struct ata_port *ap)
1633{
1634	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1635	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1636	u8 mask;
1637
1638	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1639
1640	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1641	mask |= (NV_INT_MASK << shift);
1642	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1643}
1644
1645static void nv_mcp55_freeze(struct ata_port *ap)
1646{
1647	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1648	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1649	u32 mask;
1650
1651	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1652
1653	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1654	mask &= ~(NV_INT_ALL_MCP55 << shift);
1655	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1656}
1657
1658static void nv_mcp55_thaw(struct ata_port *ap)
1659{
1660	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1661	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1662	u32 mask;
1663
1664	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1665
1666	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1667	mask |= (NV_INT_MASK_MCP55 << shift);
1668	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1669}
1670
1671static void nv_adma_error_handler(struct ata_port *ap)
1672{
1673	struct nv_adma_port_priv *pp = ap->private_data;
1674	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1675		void __iomem *mmio = pp->ctl_block;
1676		int i;
1677		u16 tmp;
1678
1679		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1680			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1681			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1682			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1683			u32 status = readw(mmio + NV_ADMA_STAT);
1684			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1685			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1686
1687			ata_port_err(ap,
1688				"EH in ADMA mode, notifier 0x%X "
1689				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1690				"next cpb count 0x%X next cpb idx 0x%x\n",
1691				notifier, notifier_error, gen_ctl, status,
1692				cpb_count, next_cpb_idx);
1693
1694			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1695				struct nv_adma_cpb *cpb = &pp->cpb[i];
1696				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1697				    ap->link.sactive & (1 << i))
1698					ata_port_err(ap,
1699						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1700						i, cpb->ctl_flags, cpb->resp_flags);
1701			}
1702		}
1703
1704		/* Push us back into port register mode for error handling. */
1705		nv_adma_register_mode(ap);
1706
1707		/* Mark all of the CPBs as invalid to prevent them from
1708		   being executed */
1709		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1710			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1711
1712		/* clear CPB fetch count */
1713		writew(0, mmio + NV_ADMA_CPB_COUNT);
1714
1715		/* Reset channel */
1716		tmp = readw(mmio + NV_ADMA_CTL);
1717		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1718		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1719		udelay(1);
1720		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1721		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1722	}
1723
1724	ata_bmdma_error_handler(ap);
1725}
1726
1727static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1728{
1729	struct nv_swncq_port_priv *pp = ap->private_data;
1730	struct defer_queue *dq = &pp->defer_queue;
1731
1732	/* queue is full */
1733	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1734	dq->defer_bits |= (1 << qc->tag);
1735	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1736}
1737
1738static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1739{
1740	struct nv_swncq_port_priv *pp = ap->private_data;
1741	struct defer_queue *dq = &pp->defer_queue;
1742	unsigned int tag;
1743
1744	if (dq->head == dq->tail)	/* null queue */
1745		return NULL;
1746
1747	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1748	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1749	WARN_ON(!(dq->defer_bits & (1 << tag)));
1750	dq->defer_bits &= ~(1 << tag);
1751
1752	return ata_qc_from_tag(ap, tag);
1753}
1754
1755static void nv_swncq_fis_reinit(struct ata_port *ap)
1756{
1757	struct nv_swncq_port_priv *pp = ap->private_data;
1758
1759	pp->dhfis_bits = 0;
1760	pp->dmafis_bits = 0;
1761	pp->sdbfis_bits = 0;
1762	pp->ncq_flags = 0;
1763}
1764
1765static void nv_swncq_pp_reinit(struct ata_port *ap)
1766{
1767	struct nv_swncq_port_priv *pp = ap->private_data;
1768	struct defer_queue *dq = &pp->defer_queue;
1769
1770	dq->head = 0;
1771	dq->tail = 0;
1772	dq->defer_bits = 0;
1773	pp->qc_active = 0;
1774	pp->last_issue_tag = ATA_TAG_POISON;
1775	nv_swncq_fis_reinit(ap);
1776}
1777
1778static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1779{
1780	struct nv_swncq_port_priv *pp = ap->private_data;
1781
1782	writew(fis, pp->irq_block);
1783}
1784
1785static void __ata_bmdma_stop(struct ata_port *ap)
1786{
1787	struct ata_queued_cmd qc;
1788
1789	qc.ap = ap;
1790	ata_bmdma_stop(&qc);
1791}
1792
1793static void nv_swncq_ncq_stop(struct ata_port *ap)
1794{
1795	struct nv_swncq_port_priv *pp = ap->private_data;
1796	unsigned int i;
1797	u32 sactive;
1798	u32 done_mask;
1799
1800	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1801		     ap->qc_active, ap->link.sactive);
1802	ata_port_err(ap,
1803		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1804		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1805		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1806		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1807
1808	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1809		     ap->ops->sff_check_status(ap),
1810		     ioread8(ap->ioaddr.error_addr));
1811
1812	sactive = readl(pp->sactive_block);
1813	done_mask = pp->qc_active ^ sactive;
1814
1815	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1816	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1817		u8 err = 0;
1818		if (pp->qc_active & (1 << i))
1819			err = 0;
1820		else if (done_mask & (1 << i))
1821			err = 1;
1822		else
1823			continue;
1824
1825		ata_port_err(ap,
1826			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1827			     (pp->dhfis_bits >> i) & 0x1,
1828			     (pp->dmafis_bits >> i) & 0x1,
1829			     (pp->sdbfis_bits >> i) & 0x1,
1830			     (sactive >> i) & 0x1,
1831			     (err ? "error! tag doesn't exit" : " "));
1832	}
1833
1834	nv_swncq_pp_reinit(ap);
1835	ap->ops->sff_irq_clear(ap);
1836	__ata_bmdma_stop(ap);
1837	nv_swncq_irq_clear(ap, 0xffff);
1838}
1839
1840static void nv_swncq_error_handler(struct ata_port *ap)
1841{
1842	struct ata_eh_context *ehc = &ap->link.eh_context;
1843
1844	if (ap->link.sactive) {
1845		nv_swncq_ncq_stop(ap);
1846		ehc->i.action |= ATA_EH_RESET;
1847	}
1848
1849	ata_bmdma_error_handler(ap);
1850}
1851
1852#ifdef CONFIG_PM
1853static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1854{
1855	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1856	u32 tmp;
1857
1858	/* clear irq */
1859	writel(~0, mmio + NV_INT_STATUS_MCP55);
1860
1861	/* disable irq */
1862	writel(0, mmio + NV_INT_ENABLE_MCP55);
1863
1864	/* disable swncq */
1865	tmp = readl(mmio + NV_CTL_MCP55);
1866	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1867	writel(tmp, mmio + NV_CTL_MCP55);
1868
1869	return 0;
1870}
1871
1872static int nv_swncq_port_resume(struct ata_port *ap)
1873{
1874	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1875	u32 tmp;
1876
1877	/* clear irq */
1878	writel(~0, mmio + NV_INT_STATUS_MCP55);
1879
1880	/* enable irq */
1881	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1882
1883	/* enable swncq */
1884	tmp = readl(mmio + NV_CTL_MCP55);
1885	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1886
1887	return 0;
1888}
1889#endif
1890
1891static void nv_swncq_host_init(struct ata_host *host)
1892{
1893	u32 tmp;
1894	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1895	struct pci_dev *pdev = to_pci_dev(host->dev);
1896	u8 regval;
1897
1898	/* disable  ECO 398 */
1899	pci_read_config_byte(pdev, 0x7f, &regval);
1900	regval &= ~(1 << 7);
1901	pci_write_config_byte(pdev, 0x7f, regval);
1902
1903	/* enable swncq */
1904	tmp = readl(mmio + NV_CTL_MCP55);
1905	VPRINTK("HOST_CTL:0x%X\n", tmp);
1906	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1907
1908	/* enable irq intr */
1909	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1910	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1911	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1912
1913	/*  clear port irq */
1914	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1915}
1916
1917static int nv_swncq_slave_config(struct scsi_device *sdev)
1918{
1919	struct ata_port *ap = ata_shost_to_port(sdev->host);
1920	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1921	struct ata_device *dev;
1922	int rc;
1923	u8 rev;
1924	u8 check_maxtor = 0;
1925	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1926
1927	rc = ata_scsi_slave_config(sdev);
1928	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1929		/* Not a proper libata device, ignore */
1930		return rc;
1931
1932	dev = &ap->link.device[sdev->id];
1933	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1934		return rc;
1935
1936	/* if MCP51 and Maxtor, then disable ncq */
1937	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1938		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1939		check_maxtor = 1;
1940
1941	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1942	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1943		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1944		pci_read_config_byte(pdev, 0x8, &rev);
1945		if (rev <= 0xa2)
1946			check_maxtor = 1;
1947	}
1948
1949	if (!check_maxtor)
1950		return rc;
1951
1952	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1953
1954	if (strncmp(model_num, "Maxtor", 6) == 0) {
1955		ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
1956		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1957			       sdev->queue_depth);
1958	}
1959
1960	return rc;
1961}
1962
1963static int nv_swncq_port_start(struct ata_port *ap)
1964{
1965	struct device *dev = ap->host->dev;
1966	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1967	struct nv_swncq_port_priv *pp;
1968	int rc;
1969
1970	/* we might fallback to bmdma, allocate bmdma resources */
1971	rc = ata_bmdma_port_start(ap);
1972	if (rc)
1973		return rc;
1974
1975	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1976	if (!pp)
1977		return -ENOMEM;
1978
1979	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1980				      &pp->prd_dma, GFP_KERNEL);
1981	if (!pp->prd)
1982		return -ENOMEM;
1983	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1984
1985	ap->private_data = pp;
1986	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1987	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1988	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1989
1990	return 0;
1991}
1992
1993static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1994{
1995	if (qc->tf.protocol != ATA_PROT_NCQ) {
1996		ata_bmdma_qc_prep(qc);
1997		return;
1998	}
1999
2000	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2001		return;
2002
2003	nv_swncq_fill_sg(qc);
 
 
2004}
2005
2006static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2007{
2008	struct ata_port *ap = qc->ap;
2009	struct scatterlist *sg;
2010	struct nv_swncq_port_priv *pp = ap->private_data;
2011	struct ata_bmdma_prd *prd;
2012	unsigned int si, idx;
2013
2014	prd = pp->prd + ATA_MAX_PRD * qc->tag;
2015
2016	idx = 0;
2017	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2018		u32 addr, offset;
2019		u32 sg_len, len;
2020
2021		addr = (u32)sg_dma_address(sg);
2022		sg_len = sg_dma_len(sg);
2023
2024		while (sg_len) {
2025			offset = addr & 0xffff;
2026			len = sg_len;
2027			if ((offset + sg_len) > 0x10000)
2028				len = 0x10000 - offset;
2029
2030			prd[idx].addr = cpu_to_le32(addr);
2031			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2032
2033			idx++;
2034			sg_len -= len;
2035			addr += len;
2036		}
2037	}
2038
2039	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2040}
2041
2042static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2043					  struct ata_queued_cmd *qc)
2044{
2045	struct nv_swncq_port_priv *pp = ap->private_data;
2046
2047	if (qc == NULL)
2048		return 0;
2049
2050	DPRINTK("Enter\n");
2051
2052	writel((1 << qc->tag), pp->sactive_block);
2053	pp->last_issue_tag = qc->tag;
2054	pp->dhfis_bits &= ~(1 << qc->tag);
2055	pp->dmafis_bits &= ~(1 << qc->tag);
2056	pp->qc_active |= (0x1 << qc->tag);
2057
 
2058	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
 
2059	ap->ops->sff_exec_command(ap, &qc->tf);
2060
2061	DPRINTK("Issued tag %u\n", qc->tag);
2062
2063	return 0;
2064}
2065
2066static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2067{
2068	struct ata_port *ap = qc->ap;
2069	struct nv_swncq_port_priv *pp = ap->private_data;
2070
2071	if (qc->tf.protocol != ATA_PROT_NCQ)
2072		return ata_bmdma_qc_issue(qc);
2073
2074	DPRINTK("Enter\n");
2075
2076	if (!pp->qc_active)
2077		nv_swncq_issue_atacmd(ap, qc);
2078	else
2079		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2080
2081	return 0;
2082}
2083
2084static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2085{
2086	u32 serror;
2087	struct ata_eh_info *ehi = &ap->link.eh_info;
2088
2089	ata_ehi_clear_desc(ehi);
2090
2091	/* AHCI needs SError cleared; otherwise, it might lock up */
2092	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2093	sata_scr_write(&ap->link, SCR_ERROR, serror);
2094
2095	/* analyze @irq_stat */
2096	if (fis & NV_SWNCQ_IRQ_ADDED)
2097		ata_ehi_push_desc(ehi, "hot plug");
2098	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2099		ata_ehi_push_desc(ehi, "hot unplug");
2100
2101	ata_ehi_hotplugged(ehi);
2102
2103	/* okay, let's hand over to EH */
2104	ehi->serror |= serror;
2105
2106	ata_port_freeze(ap);
2107}
2108
2109static int nv_swncq_sdbfis(struct ata_port *ap)
2110{
2111	struct ata_queued_cmd *qc;
2112	struct nv_swncq_port_priv *pp = ap->private_data;
2113	struct ata_eh_info *ehi = &ap->link.eh_info;
2114	u32 sactive;
2115	u32 done_mask;
2116	u8 host_stat;
2117	u8 lack_dhfis = 0;
2118
2119	host_stat = ap->ops->bmdma_status(ap);
 
2120	if (unlikely(host_stat & ATA_DMA_ERR)) {
2121		/* error when transferring data to/from memory */
2122		ata_ehi_clear_desc(ehi);
2123		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2124		ehi->err_mask |= AC_ERR_HOST_BUS;
2125		ehi->action |= ATA_EH_RESET;
2126		return -EINVAL;
2127	}
2128
2129	ap->ops->sff_irq_clear(ap);
2130	__ata_bmdma_stop(ap);
2131
2132	sactive = readl(pp->sactive_block);
2133	done_mask = pp->qc_active ^ sactive;
2134
2135	pp->qc_active &= ~done_mask;
2136	pp->dhfis_bits &= ~done_mask;
2137	pp->dmafis_bits &= ~done_mask;
2138	pp->sdbfis_bits |= done_mask;
2139	ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2140
2141	if (!ap->qc_active) {
2142		DPRINTK("over\n");
2143		nv_swncq_pp_reinit(ap);
2144		return 0;
2145	}
2146
2147	if (pp->qc_active & pp->dhfis_bits)
2148		return 0;
2149
2150	if ((pp->ncq_flags & ncq_saw_backout) ||
2151	    (pp->qc_active ^ pp->dhfis_bits))
2152		/* if the controller can't get a device to host register FIS,
2153		 * The driver needs to reissue the new command.
2154		 */
2155		lack_dhfis = 1;
2156
2157	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2158		"SWNCQ:qc_active 0x%X defer_bits %X "
2159		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2160		ap->print_id, ap->qc_active, pp->qc_active,
2161		pp->defer_queue.defer_bits, pp->dhfis_bits,
2162		pp->dmafis_bits, pp->last_issue_tag);
2163
2164	nv_swncq_fis_reinit(ap);
2165
2166	if (lack_dhfis) {
2167		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2168		nv_swncq_issue_atacmd(ap, qc);
2169		return 0;
2170	}
2171
2172	if (pp->defer_queue.defer_bits) {
2173		/* send deferral queue command */
2174		qc = nv_swncq_qc_from_dq(ap);
2175		WARN_ON(qc == NULL);
2176		nv_swncq_issue_atacmd(ap, qc);
2177	}
2178
2179	return 0;
2180}
2181
2182static inline u32 nv_swncq_tag(struct ata_port *ap)
2183{
2184	struct nv_swncq_port_priv *pp = ap->private_data;
2185	u32 tag;
2186
2187	tag = readb(pp->tag_block) >> 2;
2188	return (tag & 0x1f);
2189}
2190
2191static void nv_swncq_dmafis(struct ata_port *ap)
2192{
2193	struct ata_queued_cmd *qc;
2194	unsigned int rw;
2195	u8 dmactl;
2196	u32 tag;
2197	struct nv_swncq_port_priv *pp = ap->private_data;
2198
2199	__ata_bmdma_stop(ap);
2200	tag = nv_swncq_tag(ap);
2201
2202	DPRINTK("dma setup tag 0x%x\n", tag);
2203	qc = ata_qc_from_tag(ap, tag);
2204
2205	if (unlikely(!qc))
2206		return;
2207
2208	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2209
2210	/* load PRD table addr. */
2211	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2212		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2213
2214	/* specify data direction, triple-check start bit is clear */
2215	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2216	dmactl &= ~ATA_DMA_WR;
2217	if (!rw)
2218		dmactl |= ATA_DMA_WR;
2219
2220	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2221}
2222
2223static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2224{
2225	struct nv_swncq_port_priv *pp = ap->private_data;
2226	struct ata_queued_cmd *qc;
2227	struct ata_eh_info *ehi = &ap->link.eh_info;
2228	u32 serror;
2229	u8 ata_stat;
2230
2231	ata_stat = ap->ops->sff_check_status(ap);
2232	nv_swncq_irq_clear(ap, fis);
2233	if (!fis)
2234		return;
2235
2236	if (ap->pflags & ATA_PFLAG_FROZEN)
2237		return;
2238
2239	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2240		nv_swncq_hotplug(ap, fis);
2241		return;
2242	}
2243
2244	if (!pp->qc_active)
2245		return;
2246
2247	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2248		return;
2249	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2250
2251	if (ata_stat & ATA_ERR) {
2252		ata_ehi_clear_desc(ehi);
2253		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2254		ehi->err_mask |= AC_ERR_DEV;
2255		ehi->serror |= serror;
2256		ehi->action |= ATA_EH_RESET;
2257		ata_port_freeze(ap);
2258		return;
2259	}
2260
2261	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2262		/* If the IRQ is backout, driver must issue
2263		 * the new command again some time later.
2264		 */
2265		pp->ncq_flags |= ncq_saw_backout;
2266	}
2267
2268	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2269		pp->ncq_flags |= ncq_saw_sdb;
2270		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2271			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2272			ap->print_id, pp->qc_active, pp->dhfis_bits,
2273			pp->dmafis_bits, readl(pp->sactive_block));
2274		if (nv_swncq_sdbfis(ap) < 0)
2275			goto irq_error;
2276	}
2277
2278	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2279		/* The interrupt indicates the new command
2280		 * was transmitted correctly to the drive.
2281		 */
2282		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2283		pp->ncq_flags |= ncq_saw_d2h;
2284		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2285			ata_ehi_push_desc(ehi, "illegal fis transaction");
2286			ehi->err_mask |= AC_ERR_HSM;
2287			ehi->action |= ATA_EH_RESET;
2288			goto irq_error;
2289		}
2290
2291		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2292		    !(pp->ncq_flags & ncq_saw_dmas)) {
2293			ata_stat = ap->ops->sff_check_status(ap);
2294			if (ata_stat & ATA_BUSY)
2295				goto irq_exit;
2296
2297			if (pp->defer_queue.defer_bits) {
2298				DPRINTK("send next command\n");
2299				qc = nv_swncq_qc_from_dq(ap);
2300				nv_swncq_issue_atacmd(ap, qc);
2301			}
2302		}
2303	}
2304
2305	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2306		/* program the dma controller with appropriate PRD buffers
2307		 * and start the DMA transfer for requested command.
2308		 */
2309		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2310		pp->ncq_flags |= ncq_saw_dmas;
2311		nv_swncq_dmafis(ap);
2312	}
2313
2314irq_exit:
2315	return;
2316irq_error:
2317	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2318	ata_port_freeze(ap);
2319	return;
2320}
2321
2322static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2323{
2324	struct ata_host *host = dev_instance;
2325	unsigned int i;
2326	unsigned int handled = 0;
2327	unsigned long flags;
2328	u32 irq_stat;
2329
2330	spin_lock_irqsave(&host->lock, flags);
2331
2332	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2333
2334	for (i = 0; i < host->n_ports; i++) {
2335		struct ata_port *ap = host->ports[i];
2336
2337		if (ap->link.sactive) {
2338			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2339			handled = 1;
2340		} else {
2341			if (irq_stat)	/* reserve Hotplug */
2342				nv_swncq_irq_clear(ap, 0xfff0);
2343
2344			handled += nv_host_intr(ap, (u8)irq_stat);
2345		}
2346		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2347	}
2348
2349	spin_unlock_irqrestore(&host->lock, flags);
2350
2351	return IRQ_RETVAL(handled);
2352}
2353
2354static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2355{
2356	const struct ata_port_info *ppi[] = { NULL, NULL };
2357	struct nv_pi_priv *ipriv;
2358	struct ata_host *host;
2359	struct nv_host_priv *hpriv;
2360	int rc;
2361	u32 bar;
2362	void __iomem *base;
2363	unsigned long type = ent->driver_data;
2364
2365        // Make sure this is a SATA controller by counting the number of bars
2366        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2367        // it's an IDE controller and we ignore it.
2368	for (bar = 0; bar < 6; bar++)
2369		if (pci_resource_start(pdev, bar) == 0)
2370			return -ENODEV;
2371
2372	ata_print_version_once(&pdev->dev, DRV_VERSION);
2373
2374	rc = pcim_enable_device(pdev);
2375	if (rc)
2376		return rc;
2377
2378	/* determine type and allocate host */
2379	if (type == CK804 && adma_enabled) {
2380		dev_notice(&pdev->dev, "Using ADMA mode\n");
2381		type = ADMA;
2382	} else if (type == MCP5x && swncq_enabled) {
2383		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2384		type = SWNCQ;
2385	}
2386
2387	ppi[0] = &nv_port_info[type];
2388	ipriv = ppi[0]->private_data;
2389	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2390	if (rc)
2391		return rc;
2392
2393	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2394	if (!hpriv)
2395		return -ENOMEM;
2396	hpriv->type = type;
2397	host->private_data = hpriv;
2398
2399	/* request and iomap NV_MMIO_BAR */
2400	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2401	if (rc)
2402		return rc;
2403
2404	/* configure SCR access */
2405	base = host->iomap[NV_MMIO_BAR];
2406	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2407	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2408
2409	/* enable SATA space for CK804 */
2410	if (type >= CK804) {
2411		u8 regval;
2412
2413		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2414		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2415		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2416	}
2417
2418	/* init ADMA */
2419	if (type == ADMA) {
2420		rc = nv_adma_host_init(host);
2421		if (rc)
2422			return rc;
2423	} else if (type == SWNCQ)
2424		nv_swncq_host_init(host);
2425
2426	if (msi_enabled) {
2427		dev_notice(&pdev->dev, "Using MSI\n");
2428		pci_enable_msi(pdev);
2429	}
2430
2431	pci_set_master(pdev);
2432	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2433}
2434
2435#ifdef CONFIG_PM
2436static int nv_pci_device_resume(struct pci_dev *pdev)
2437{
2438	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2439	struct nv_host_priv *hpriv = host->private_data;
2440	int rc;
2441
2442	rc = ata_pci_device_do_resume(pdev);
2443	if (rc)
2444		return rc;
2445
2446	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2447		if (hpriv->type >= CK804) {
2448			u8 regval;
2449
2450			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2451			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2452			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2453		}
2454		if (hpriv->type == ADMA) {
2455			u32 tmp32;
2456			struct nv_adma_port_priv *pp;
2457			/* enable/disable ADMA on the ports appropriately */
2458			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2459
2460			pp = host->ports[0]->private_data;
2461			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2462				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2463					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2464			else
2465				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2466					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2467			pp = host->ports[1]->private_data;
2468			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2469				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2470					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2471			else
2472				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2473					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2474
2475			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2476		}
2477	}
2478
2479	ata_host_resume(host);
2480
2481	return 0;
2482}
2483#endif
2484
2485static void nv_ck804_host_stop(struct ata_host *host)
2486{
2487	struct pci_dev *pdev = to_pci_dev(host->dev);
2488	u8 regval;
2489
2490	/* disable SATA space for CK804 */
2491	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2492	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2493	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2494}
2495
2496static void nv_adma_host_stop(struct ata_host *host)
2497{
2498	struct pci_dev *pdev = to_pci_dev(host->dev);
2499	u32 tmp32;
2500
2501	/* disable ADMA on the ports */
2502	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2503	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2504		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2505		   NV_MCP_SATA_CFG_20_PORT1_EN |
2506		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2507
2508	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2509
2510	nv_ck804_host_stop(host);
2511}
2512
2513static int __init nv_init(void)
2514{
2515	return pci_register_driver(&nv_pci_driver);
2516}
2517
2518static void __exit nv_exit(void)
2519{
2520	pci_unregister_driver(&nv_pci_driver);
2521}
2522
2523module_init(nv_init);
2524module_exit(nv_exit);
2525module_param_named(adma, adma_enabled, bool, 0444);
2526MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2527module_param_named(swncq, swncq_enabled, bool, 0444);
2528MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2529module_param_named(msi, msi_enabled, bool, 0444);
2530MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2531