Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  sata_sx4.c - Promise SATA
   4 *
   5 *  Maintained by:  Tejun Heo <tj@kernel.org>
   6 *  		    Please ALWAYS copy linux-ide@vger.kernel.org
   7 *		    on emails.
   8 *
   9 *  Copyright 2003-2004 Red Hat, Inc.
  10 *
  11 *  libata documentation is available via 'make {ps|pdf}docs',
  12 *  as Documentation/driver-api/libata.rst
  13 *
  14 *  Hardware documentation available under NDA.
  15 */
  16
  17/*
  18	Theory of operation
  19	-------------------
  20
  21	The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
  22	engine, DIMM memory, and four ATA engines (one per SATA port).
  23	Data is copied to/from DIMM memory by the HDMA engine, before
  24	handing off to one (or more) of the ATA engines.  The ATA
  25	engines operate solely on DIMM memory.
  26
  27	The SX4 behaves like a PATA chip, with no SATA controls or
  28	knowledge whatsoever, leading to the presumption that
  29	PATA<->SATA bridges exist on SX4 boards, external to the
  30	PDC20621 chip itself.
  31
  32	The chip is quite capable, supporting an XOR engine and linked
  33	hardware commands (permits a string to transactions to be
  34	submitted and waited-on as a single unit), and an optional
  35	microprocessor.
  36
  37	The limiting factor is largely software.  This Linux driver was
  38	written to multiplex the single HDMA engine to copy disk
  39	transactions into a fixed DIMM memory space, from where an ATA
  40	engine takes over.  As a result, each WRITE looks like this:
  41
  42		submit HDMA packet to hardware
  43		hardware copies data from system memory to DIMM
  44		hardware raises interrupt
  45
  46		submit ATA packet to hardware
  47		hardware executes ATA WRITE command, w/ data in DIMM
  48		hardware raises interrupt
  49
  50	and each READ looks like this:
  51
  52		submit ATA packet to hardware
  53		hardware executes ATA READ command, w/ data in DIMM
  54		hardware raises interrupt
  55
  56		submit HDMA packet to hardware
  57		hardware copies data from DIMM to system memory
  58		hardware raises interrupt
  59
  60	This is a very slow, lock-step way of doing things that can
  61	certainly be improved by motivated kernel hackers.
  62
  63 */
  64
  65#include <linux/kernel.h>
  66#include <linux/module.h>
  67#include <linux/pci.h>
  68#include <linux/slab.h>
  69#include <linux/blkdev.h>
  70#include <linux/delay.h>
  71#include <linux/interrupt.h>
  72#include <linux/device.h>
  73#include <scsi/scsi_host.h>
  74#include <scsi/scsi_cmnd.h>
  75#include <linux/libata.h>
  76#include "sata_promise.h"
  77
  78#define DRV_NAME	"sata_sx4"
  79#define DRV_VERSION	"0.12"
  80
  81static int dimm_test;
  82module_param(dimm_test, int, 0644);
  83MODULE_PARM_DESC(dimm_test, "Enable DIMM test during startup (1 = enabled)");
  84
  85enum {
  86	PDC_MMIO_BAR		= 3,
  87	PDC_DIMM_BAR		= 4,
  88
  89	PDC_PRD_TBL		= 0x44,	/* Direct command DMA table addr */
  90
  91	PDC_PKT_SUBMIT		= 0x40, /* Command packet pointer addr */
  92	PDC_HDMA_PKT_SUBMIT	= 0x100, /* Host DMA packet pointer addr */
  93	PDC_INT_SEQMASK		= 0x40,	/* Mask of asserted SEQ INTs */
  94	PDC_HDMA_CTLSTAT	= 0x12C, /* Host DMA control / status */
  95
  96	PDC_CTLSTAT		= 0x60,	/* IDEn control / status */
  97
  98	PDC_20621_SEQCTL	= 0x400,
  99	PDC_20621_SEQMASK	= 0x480,
 100	PDC_20621_GENERAL_CTL	= 0x484,
 101	PDC_20621_PAGE_SIZE	= (32 * 1024),
 102
 103	/* chosen, not constant, values; we design our own DIMM mem map */
 104	PDC_20621_DIMM_WINDOW	= 0x0C,	/* page# for 32K DIMM window */
 105	PDC_20621_DIMM_BASE	= 0x00200000,
 106	PDC_20621_DIMM_DATA	= (64 * 1024),
 107	PDC_DIMM_DATA_STEP	= (256 * 1024),
 108	PDC_DIMM_WINDOW_STEP	= (8 * 1024),
 109	PDC_DIMM_HOST_PRD	= (6 * 1024),
 110	PDC_DIMM_HOST_PKT	= (128 * 0),
 111	PDC_DIMM_HPKT_PRD	= (128 * 1),
 112	PDC_DIMM_ATA_PKT	= (128 * 2),
 113	PDC_DIMM_APKT_PRD	= (128 * 3),
 114	PDC_DIMM_HEADER_SZ	= PDC_DIMM_APKT_PRD + 128,
 115	PDC_PAGE_WINDOW		= 0x40,
 116	PDC_PAGE_DATA		= PDC_PAGE_WINDOW +
 117				  (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
 118	PDC_PAGE_SET		= PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
 119
 120	PDC_CHIP0_OFS		= 0xC0000, /* offset of chip #0 */
 121
 122	PDC_20621_ERR_MASK	= (1<<19) | (1<<20) | (1<<21) | (1<<22) |
 123				  (1<<23),
 124
 125	board_20621		= 0,	/* FastTrak S150 SX4 */
 126
 127	PDC_MASK_INT		= (1 << 10), /* HDMA/ATA mask int */
 128	PDC_RESET		= (1 << 11), /* HDMA/ATA reset */
 129	PDC_DMA_ENABLE		= (1 << 7),  /* DMA start/stop */
 130
 131	PDC_MAX_HDMA		= 32,
 132	PDC_HDMA_Q_MASK		= (PDC_MAX_HDMA - 1),
 133
 134	PDC_DIMM0_SPD_DEV_ADDRESS	= 0x50,
 135	PDC_DIMM1_SPD_DEV_ADDRESS	= 0x51,
 136	PDC_I2C_CONTROL			= 0x48,
 137	PDC_I2C_ADDR_DATA		= 0x4C,
 138	PDC_DIMM0_CONTROL		= 0x80,
 139	PDC_DIMM1_CONTROL		= 0x84,
 140	PDC_SDRAM_CONTROL		= 0x88,
 141	PDC_I2C_WRITE			= 0,		/* master -> slave */
 142	PDC_I2C_READ			= (1 << 6),	/* master <- slave */
 143	PDC_I2C_START			= (1 << 7),	/* start I2C proto */
 144	PDC_I2C_MASK_INT		= (1 << 5),	/* mask I2C interrupt */
 145	PDC_I2C_COMPLETE		= (1 << 16),	/* I2C normal compl. */
 146	PDC_I2C_NO_ACK			= (1 << 20),	/* slave no-ack addr */
 147	PDC_DIMM_SPD_SUBADDRESS_START	= 0x00,
 148	PDC_DIMM_SPD_SUBADDRESS_END	= 0x7F,
 149	PDC_DIMM_SPD_ROW_NUM		= 3,
 150	PDC_DIMM_SPD_COLUMN_NUM		= 4,
 151	PDC_DIMM_SPD_MODULE_ROW		= 5,
 152	PDC_DIMM_SPD_TYPE		= 11,
 153	PDC_DIMM_SPD_FRESH_RATE		= 12,
 154	PDC_DIMM_SPD_BANK_NUM		= 17,
 155	PDC_DIMM_SPD_CAS_LATENCY	= 18,
 156	PDC_DIMM_SPD_ATTRIBUTE		= 21,
 157	PDC_DIMM_SPD_ROW_PRE_CHARGE	= 27,
 158	PDC_DIMM_SPD_ROW_ACTIVE_DELAY	= 28,
 159	PDC_DIMM_SPD_RAS_CAS_DELAY	= 29,
 160	PDC_DIMM_SPD_ACTIVE_PRECHARGE	= 30,
 161	PDC_DIMM_SPD_SYSTEM_FREQ	= 126,
 162	PDC_CTL_STATUS			= 0x08,
 163	PDC_DIMM_WINDOW_CTLR		= 0x0C,
 164	PDC_TIME_CONTROL		= 0x3C,
 165	PDC_TIME_PERIOD			= 0x40,
 166	PDC_TIME_COUNTER		= 0x44,
 167	PDC_GENERAL_CTLR		= 0x484,
 168	PCI_PLL_INIT			= 0x8A531824,
 169	PCI_X_TCOUNT			= 0xEE1E5CFF,
 170
 171	/* PDC_TIME_CONTROL bits */
 172	PDC_TIMER_BUZZER		= (1 << 10),
 173	PDC_TIMER_MODE_PERIODIC		= 0,		/* bits 9:8 == 00 */
 174	PDC_TIMER_MODE_ONCE		= (1 << 8),	/* bits 9:8 == 01 */
 175	PDC_TIMER_ENABLE		= (1 << 7),
 176	PDC_TIMER_MASK_INT		= (1 << 5),
 177	PDC_TIMER_SEQ_MASK		= 0x1f,		/* SEQ ID for timer */
 178	PDC_TIMER_DEFAULT		= PDC_TIMER_MODE_ONCE |
 179					  PDC_TIMER_ENABLE |
 180					  PDC_TIMER_MASK_INT,
 181};
 182
 183#define ECC_ERASE_BUF_SZ (128 * 1024)
 184
 185struct pdc_port_priv {
 186	u8			dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
 187	u8			*pkt;
 188	dma_addr_t		pkt_dma;
 189};
 190
 191struct pdc_host_priv {
 192	unsigned int		doing_hdma;
 193	unsigned int		hdma_prod;
 194	unsigned int		hdma_cons;
 195	struct {
 196		struct ata_queued_cmd *qc;
 197		unsigned int	seq;
 198		unsigned long	pkt_ofs;
 199	} hdma[32];
 200};
 201
 202
 203static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 204static void pdc_error_handler(struct ata_port *ap);
 205static void pdc_freeze(struct ata_port *ap);
 206static void pdc_thaw(struct ata_port *ap);
 207static int pdc_port_start(struct ata_port *ap);
 208static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
 209static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 210static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 211static unsigned int pdc20621_dimm_init(struct ata_host *host);
 212static int pdc20621_detect_dimm(struct ata_host *host);
 213static unsigned int pdc20621_i2c_read(struct ata_host *host,
 214				      u32 device, u32 subaddr, u32 *pdata);
 215static int pdc20621_prog_dimm0(struct ata_host *host);
 216static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
 
 217static void pdc20621_get_from_dimm(struct ata_host *host,
 218				   void *psource, u32 offset, u32 size);
 
 219static void pdc20621_put_to_dimm(struct ata_host *host,
 220				 void *psource, u32 offset, u32 size);
 221static void pdc20621_irq_clear(struct ata_port *ap);
 222static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
 223static int pdc_softreset(struct ata_link *link, unsigned int *class,
 224			 unsigned long deadline);
 225static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
 226static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
 227
 228
 229static const struct scsi_host_template pdc_sata_sht = {
 230	ATA_BASE_SHT(DRV_NAME),
 231	.sg_tablesize		= LIBATA_MAX_PRD,
 232	.dma_boundary		= ATA_DMA_BOUNDARY,
 233};
 234
 
 235static struct ata_port_operations pdc_20621_ops = {
 236	.inherits		= &ata_sff_port_ops,
 237
 238	.check_atapi_dma	= pdc_check_atapi_dma,
 239	.qc_prep		= pdc20621_qc_prep,
 240	.qc_issue		= pdc20621_qc_issue,
 241
 242	.freeze			= pdc_freeze,
 243	.thaw			= pdc_thaw,
 244	.softreset		= pdc_softreset,
 245	.error_handler		= pdc_error_handler,
 246	.lost_interrupt		= ATA_OP_NULL,
 247	.post_internal_cmd	= pdc_post_internal_cmd,
 248
 249	.port_start		= pdc_port_start,
 250
 251	.sff_tf_load		= pdc_tf_load_mmio,
 252	.sff_exec_command	= pdc_exec_command_mmio,
 253	.sff_irq_clear		= pdc20621_irq_clear,
 254};
 255
 256static const struct ata_port_info pdc_port_info[] = {
 257	/* board_20621 */
 258	{
 259		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
 260				  ATA_FLAG_PIO_POLLING,
 261		.pio_mask	= ATA_PIO4,
 262		.mwdma_mask	= ATA_MWDMA2,
 263		.udma_mask	= ATA_UDMA6,
 264		.port_ops	= &pdc_20621_ops,
 265	},
 266
 267};
 268
 269static const struct pci_device_id pdc_sata_pci_tbl[] = {
 270	{ PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
 271
 272	{ }	/* terminate list */
 273};
 274
 275static struct pci_driver pdc_sata_pci_driver = {
 276	.name			= DRV_NAME,
 277	.id_table		= pdc_sata_pci_tbl,
 278	.probe			= pdc_sata_init_one,
 279	.remove			= ata_pci_remove_one,
 280};
 281
 282
 283static int pdc_port_start(struct ata_port *ap)
 284{
 285	struct device *dev = ap->host->dev;
 286	struct pdc_port_priv *pp;
 287
 288	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
 289	if (!pp)
 290		return -ENOMEM;
 291
 292	pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
 293	if (!pp->pkt)
 294		return -ENOMEM;
 295
 296	ap->private_data = pp;
 297
 298	return 0;
 299}
 300
 301static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
 302				   unsigned int total_len)
 303{
 304	u32 addr;
 305	unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
 306	__le32 *buf32 = (__le32 *) buf;
 307
 308	/* output ATA packet S/G table */
 309	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
 310	       (PDC_DIMM_DATA_STEP * portno);
 311
 312	buf32[dw] = cpu_to_le32(addr);
 313	buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
 
 
 
 
 
 
 314}
 315
 316static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
 317				    unsigned int total_len)
 318{
 319	u32 addr;
 320	unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
 321	__le32 *buf32 = (__le32 *) buf;
 322
 323	/* output Host DMA packet S/G table */
 324	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
 325	       (PDC_DIMM_DATA_STEP * portno);
 326
 327	buf32[dw] = cpu_to_le32(addr);
 328	buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
 
 
 
 
 
 
 329}
 330
 331static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
 332					    unsigned int devno, u8 *buf,
 333					    unsigned int portno)
 334{
 335	unsigned int i, dw;
 336	__le32 *buf32 = (__le32 *) buf;
 337	u8 dev_reg;
 338
 339	unsigned int dimm_sg = PDC_20621_DIMM_BASE +
 340			       (PDC_DIMM_WINDOW_STEP * portno) +
 341			       PDC_DIMM_APKT_PRD;
 
 342
 343	i = PDC_DIMM_ATA_PKT;
 344
 345	/*
 346	 * Set up ATA packet
 347	 */
 348	if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
 349		buf[i++] = PDC_PKT_READ;
 350	else if (tf->protocol == ATA_PROT_NODATA)
 351		buf[i++] = PDC_PKT_NODATA;
 352	else
 353		buf[i++] = 0;
 354	buf[i++] = 0;			/* reserved */
 355	buf[i++] = portno + 1;		/* seq. id */
 356	buf[i++] = 0xff;		/* delay seq. id */
 357
 358	/* dimm dma S/G, and next-pkt */
 359	dw = i >> 2;
 360	if (tf->protocol == ATA_PROT_NODATA)
 361		buf32[dw] = 0;
 362	else
 363		buf32[dw] = cpu_to_le32(dimm_sg);
 364	buf32[dw + 1] = 0;
 365	i += 8;
 366
 367	if (devno == 0)
 368		dev_reg = ATA_DEVICE_OBS;
 369	else
 370		dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
 371
 372	/* select device */
 373	buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
 374	buf[i++] = dev_reg;
 375
 376	/* device control register */
 377	buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
 378	buf[i++] = tf->ctl;
 379
 380	return i;
 381}
 382
 383static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
 384				     unsigned int portno)
 385{
 386	unsigned int dw;
 387	u32 tmp;
 388	__le32 *buf32 = (__le32 *) buf;
 389
 390	unsigned int host_sg = PDC_20621_DIMM_BASE +
 391			       (PDC_DIMM_WINDOW_STEP * portno) +
 392			       PDC_DIMM_HOST_PRD;
 393	unsigned int dimm_sg = PDC_20621_DIMM_BASE +
 394			       (PDC_DIMM_WINDOW_STEP * portno) +
 395			       PDC_DIMM_HPKT_PRD;
 
 
 396
 397	dw = PDC_DIMM_HOST_PKT >> 2;
 398
 399	/*
 400	 * Set up Host DMA packet
 401	 */
 402	if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
 403		tmp = PDC_PKT_READ;
 404	else
 405		tmp = 0;
 406	tmp |= ((portno + 1 + 4) << 16);	/* seq. id */
 407	tmp |= (0xff << 24);			/* delay seq. id */
 408	buf32[dw + 0] = cpu_to_le32(tmp);
 409	buf32[dw + 1] = cpu_to_le32(host_sg);
 410	buf32[dw + 2] = cpu_to_le32(dimm_sg);
 411	buf32[dw + 3] = 0;
 
 
 
 
 
 
 
 
 412}
 413
 414static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
 415{
 416	struct scatterlist *sg;
 417	struct ata_port *ap = qc->ap;
 418	struct pdc_port_priv *pp = ap->private_data;
 419	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
 420	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
 421	unsigned int portno = ap->port_no;
 422	unsigned int i, si, idx, total_len = 0, sgt_len;
 423	__le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
 424
 425	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
 426
 
 
 427	/* hard-code chip #0 */
 428	mmio += PDC_CHIP0_OFS;
 429
 430	/*
 431	 * Build S/G table
 432	 */
 433	idx = 0;
 434	for_each_sg(qc->sg, sg, qc->n_elem, si) {
 435		buf[idx++] = cpu_to_le32(sg_dma_address(sg));
 436		buf[idx++] = cpu_to_le32(sg_dma_len(sg));
 437		total_len += sg_dma_len(sg);
 438	}
 439	buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
 440	sgt_len = idx * 4;
 441
 442	/*
 443	 * Build ATA, host DMA packets
 444	 */
 445	pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
 446	pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
 447
 448	pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
 449	i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
 450
 451	if (qc->tf.flags & ATA_TFLAG_LBA48)
 452		i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
 453	else
 454		i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
 455
 456	pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
 457
 458	/* copy three S/G tables and two packets to DIMM MMIO window */
 459	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
 460		    &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
 461	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
 462		    PDC_DIMM_HOST_PRD,
 463		    &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
 464
 465	/* force host FIFO dump */
 466	writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
 467
 468	readl(dimm_mmio);	/* MMIO PCI posting flush */
 469
 470	ata_port_dbg(ap, "ata pkt buf ofs %u, prd size %u, mmio copied\n",
 471		     i, sgt_len);
 472}
 473
 474static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
 475{
 476	struct ata_port *ap = qc->ap;
 477	struct pdc_port_priv *pp = ap->private_data;
 478	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
 479	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
 480	unsigned int portno = ap->port_no;
 481	unsigned int i;
 482
 
 
 483	/* hard-code chip #0 */
 484	mmio += PDC_CHIP0_OFS;
 485
 486	i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
 487
 488	if (qc->tf.flags & ATA_TFLAG_LBA48)
 489		i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
 490	else
 491		i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
 492
 493	pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
 494
 495	/* copy three S/G tables and two packets to DIMM MMIO window */
 496	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
 497		    &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
 498
 499	/* force host FIFO dump */
 500	writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
 501
 502	readl(dimm_mmio);	/* MMIO PCI posting flush */
 503
 504	ata_port_dbg(ap, "ata pkt buf ofs %u, mmio copied\n", i);
 505}
 506
 507static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
 508{
 509	switch (qc->tf.protocol) {
 510	case ATA_PROT_DMA:
 511		pdc20621_dma_prep(qc);
 512		break;
 513	case ATA_PROT_NODATA:
 514		pdc20621_nodata_prep(qc);
 515		break;
 516	default:
 517		break;
 518	}
 519
 520	return AC_ERR_OK;
 521}
 522
 523static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
 524				 unsigned int seq,
 525				 u32 pkt_ofs)
 526{
 527	struct ata_port *ap = qc->ap;
 528	struct ata_host *host = ap->host;
 529	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 530
 531	/* hard-code chip #0 */
 532	mmio += PDC_CHIP0_OFS;
 533
 534	writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
 535	readl(mmio + PDC_20621_SEQCTL + (seq * 4));	/* flush */
 536
 537	writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
 538	readl(mmio + PDC_HDMA_PKT_SUBMIT);	/* flush */
 539}
 540
 541static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
 542				unsigned int seq,
 543				u32 pkt_ofs)
 544{
 545	struct ata_port *ap = qc->ap;
 546	struct pdc_host_priv *pp = ap->host->private_data;
 547	unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
 548
 549	if (!pp->doing_hdma) {
 550		__pdc20621_push_hdma(qc, seq, pkt_ofs);
 551		pp->doing_hdma = 1;
 552		return;
 553	}
 554
 555	pp->hdma[idx].qc = qc;
 556	pp->hdma[idx].seq = seq;
 557	pp->hdma[idx].pkt_ofs = pkt_ofs;
 558	pp->hdma_prod++;
 559}
 560
 561static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
 562{
 563	struct ata_port *ap = qc->ap;
 564	struct pdc_host_priv *pp = ap->host->private_data;
 565	unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
 566
 567	/* if nothing on queue, we're done */
 568	if (pp->hdma_prod == pp->hdma_cons) {
 569		pp->doing_hdma = 0;
 570		return;
 571	}
 572
 573	__pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
 574			     pp->hdma[idx].pkt_ofs);
 575	pp->hdma_cons++;
 576}
 577
 
 578static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
 579{
 580	struct ata_port *ap = qc->ap;
 581	unsigned int port_no = ap->port_no;
 582	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
 583
 584	dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
 585	dimm_mmio += PDC_DIMM_HOST_PKT;
 586
 587	ata_port_dbg(ap, "HDMA 0x%08X 0x%08X 0x%08X 0x%08X\n",
 588		     readl(dimm_mmio), readl(dimm_mmio + 4),
 589		     readl(dimm_mmio + 8), readl(dimm_mmio + 12));
 590}
 
 
 
 
 591
 592static void pdc20621_packet_start(struct ata_queued_cmd *qc)
 593{
 594	struct ata_port *ap = qc->ap;
 595	struct ata_host *host = ap->host;
 596	unsigned int port_no = ap->port_no;
 597	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 598	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
 599	u8 seq = (u8) (port_no + 1);
 600	unsigned int port_ofs;
 601
 602	/* hard-code chip #0 */
 603	mmio += PDC_CHIP0_OFS;
 604
 
 
 605	wmb();			/* flush PRD, pkt writes */
 606
 607	port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
 608
 609	/* if writing, we (1) DMA to DIMM, then (2) do ATA command */
 610	if (rw && qc->tf.protocol == ATA_PROT_DMA) {
 611		seq += 4;
 612
 613		pdc20621_dump_hdma(qc);
 614		pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
 615		ata_port_dbg(ap, "queued ofs 0x%x (%u), seq %u\n",
 616			port_ofs + PDC_DIMM_HOST_PKT,
 617			port_ofs + PDC_DIMM_HOST_PKT,
 618			seq);
 619	} else {
 620		writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
 621		readl(mmio + PDC_20621_SEQCTL + (seq * 4));	/* flush */
 622
 623		writel(port_ofs + PDC_DIMM_ATA_PKT,
 624		       ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
 625		readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
 626		ata_port_dbg(ap, "submitted ofs 0x%x (%u), seq %u\n",
 627			port_ofs + PDC_DIMM_ATA_PKT,
 628			port_ofs + PDC_DIMM_ATA_PKT,
 629			seq);
 630	}
 631}
 632
 633static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
 634{
 635	switch (qc->tf.protocol) {
 636	case ATA_PROT_NODATA:
 637		if (qc->tf.flags & ATA_TFLAG_POLLING)
 638			break;
 639		fallthrough;
 640	case ATA_PROT_DMA:
 641		pdc20621_packet_start(qc);
 642		return 0;
 643
 644	case ATAPI_PROT_DMA:
 645		BUG();
 646		break;
 647
 648	default:
 649		break;
 650	}
 651
 652	return ata_sff_qc_issue(qc);
 653}
 654
 655static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
 656					  struct ata_queued_cmd *qc,
 657					  unsigned int doing_hdma,
 658					  void __iomem *mmio)
 659{
 660	unsigned int port_no = ap->port_no;
 661	unsigned int port_ofs =
 662		PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
 663	u8 status;
 664	unsigned int handled = 0;
 665
 
 
 666	if ((qc->tf.protocol == ATA_PROT_DMA) &&	/* read */
 667	    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
 668
 669		/* step two - DMA from DIMM to host */
 670		if (doing_hdma) {
 671			ata_port_dbg(ap, "read hdma, 0x%x 0x%x\n",
 672				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 673			/* get drive status; clear intr; complete txn */
 674			qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
 675			ata_qc_complete(qc);
 676			pdc20621_pop_hdma(qc);
 677		}
 678
 679		/* step one - exec ATA command */
 680		else {
 681			u8 seq = (u8) (port_no + 1 + 4);
 682			ata_port_dbg(ap, "read ata, 0x%x 0x%x\n",
 683				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 684
 685			/* submit hdma pkt */
 686			pdc20621_dump_hdma(qc);
 687			pdc20621_push_hdma(qc, seq,
 688					   port_ofs + PDC_DIMM_HOST_PKT);
 689		}
 690		handled = 1;
 691
 692	} else if (qc->tf.protocol == ATA_PROT_DMA) {	/* write */
 693
 694		/* step one - DMA from host to DIMM */
 695		if (doing_hdma) {
 696			u8 seq = (u8) (port_no + 1);
 697			ata_port_dbg(ap, "write hdma, 0x%x 0x%x\n",
 698				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 699
 700			/* submit ata pkt */
 701			writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
 702			readl(mmio + PDC_20621_SEQCTL + (seq * 4));
 703			writel(port_ofs + PDC_DIMM_ATA_PKT,
 704			       ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
 705			readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
 706		}
 707
 708		/* step two - execute ATA command */
 709		else {
 710			ata_port_dbg(ap, "write ata, 0x%x 0x%x\n",
 711				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 712			/* get drive status; clear intr; complete txn */
 713			qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
 714			ata_qc_complete(qc);
 715			pdc20621_pop_hdma(qc);
 716		}
 717		handled = 1;
 718
 719	/* command completion, but no data xfer */
 720	} else if (qc->tf.protocol == ATA_PROT_NODATA) {
 721
 722		status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
 723		ata_port_dbg(ap, "BUS_NODATA (drv_stat 0x%X)\n", status);
 724		qc->err_mask |= ac_err_mask(status);
 725		ata_qc_complete(qc);
 726		handled = 1;
 727
 728	} else {
 729		ap->stats.idle_irq++;
 730	}
 731
 732	return handled;
 733}
 734
 735static void pdc20621_irq_clear(struct ata_port *ap)
 736{
 737	ioread8(ap->ioaddr.status_addr);
 738}
 739
 740static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
 741{
 742	struct ata_host *host = dev_instance;
 743	struct ata_port *ap;
 744	u32 mask = 0;
 745	unsigned int i, tmp, port_no;
 746	unsigned int handled = 0;
 747	void __iomem *mmio_base;
 748
 749	if (!host || !host->iomap[PDC_MMIO_BAR])
 
 
 
 750		return IRQ_NONE;
 
 751
 752	mmio_base = host->iomap[PDC_MMIO_BAR];
 753
 754	/* reading should also clear interrupts */
 755	mmio_base += PDC_CHIP0_OFS;
 756	mask = readl(mmio_base + PDC_20621_SEQMASK);
 
 757
 758	if (mask == 0xffffffff)
 
 759		return IRQ_NONE;
 760
 761	mask &= 0xffff;		/* only 16 tags possible */
 762	if (!mask)
 
 763		return IRQ_NONE;
 
 764
 765	spin_lock(&host->lock);
 766
 767	for (i = 1; i < 9; i++) {
 768		port_no = i - 1;
 769		if (port_no > 3)
 770			port_no -= 4;
 771		if (port_no >= host->n_ports)
 772			ap = NULL;
 773		else
 774			ap = host->ports[port_no];
 775		tmp = mask & (1 << i);
 776		if (ap)
 777			ata_port_dbg(ap, "seq %u, tmp %x\n", i, tmp);
 778		if (tmp && ap) {
 779			struct ata_queued_cmd *qc;
 780
 781			qc = ata_qc_from_tag(ap, ap->link.active_tag);
 782			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
 783				handled += pdc20621_host_intr(ap, qc, (i > 4),
 784							      mmio_base);
 785		}
 786	}
 787
 788	spin_unlock(&host->lock);
 789
 
 
 
 
 790	return IRQ_RETVAL(handled);
 791}
 792
 793static void pdc_freeze(struct ata_port *ap)
 794{
 795	void __iomem *mmio = ap->ioaddr.cmd_addr;
 796	u32 tmp;
 797
 798	/* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
 799
 800	tmp = readl(mmio + PDC_CTLSTAT);
 801	tmp |= PDC_MASK_INT;
 802	tmp &= ~PDC_DMA_ENABLE;
 803	writel(tmp, mmio + PDC_CTLSTAT);
 804	readl(mmio + PDC_CTLSTAT); /* flush */
 805}
 806
 807static void pdc_thaw(struct ata_port *ap)
 808{
 809	void __iomem *mmio = ap->ioaddr.cmd_addr;
 810	u32 tmp;
 811
 812	/* FIXME: start HDMA engine, if zero ATA engines running */
 813
 814	/* clear IRQ */
 815	ioread8(ap->ioaddr.status_addr);
 816
 817	/* turn IRQ back on */
 818	tmp = readl(mmio + PDC_CTLSTAT);
 819	tmp &= ~PDC_MASK_INT;
 820	writel(tmp, mmio + PDC_CTLSTAT);
 821	readl(mmio + PDC_CTLSTAT); /* flush */
 822}
 823
 824static void pdc_reset_port(struct ata_port *ap)
 825{
 826	void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
 827	unsigned int i;
 828	u32 tmp;
 829
 830	/* FIXME: handle HDMA copy engine */
 831
 832	for (i = 11; i > 0; i--) {
 833		tmp = readl(mmio);
 834		if (tmp & PDC_RESET)
 835			break;
 836
 837		udelay(100);
 838
 839		tmp |= PDC_RESET;
 840		writel(tmp, mmio);
 841	}
 842
 843	tmp &= ~PDC_RESET;
 844	writel(tmp, mmio);
 845	readl(mmio);	/* flush */
 846}
 847
 848static int pdc_softreset(struct ata_link *link, unsigned int *class,
 849			 unsigned long deadline)
 850{
 851	pdc_reset_port(link->ap);
 852	return ata_sff_softreset(link, class, deadline);
 853}
 854
 855static void pdc_error_handler(struct ata_port *ap)
 856{
 857	if (!ata_port_is_frozen(ap))
 858		pdc_reset_port(ap);
 859
 860	ata_sff_error_handler(ap);
 861}
 862
 863static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
 864{
 865	struct ata_port *ap = qc->ap;
 866
 867	/* make DMA engine forget about the failed command */
 868	if (qc->flags & ATA_QCFLAG_EH)
 869		pdc_reset_port(ap);
 870}
 871
 872static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
 873{
 874	u8 *scsicmd = qc->scsicmd->cmnd;
 875	int pio = 1; /* atapi dma off by default */
 876
 877	/* Whitelist commands that may use DMA. */
 878	switch (scsicmd[0]) {
 879	case WRITE_12:
 880	case WRITE_10:
 881	case WRITE_6:
 882	case READ_12:
 883	case READ_10:
 884	case READ_6:
 885	case 0xad: /* READ_DVD_STRUCTURE */
 886	case 0xbe: /* READ_CD */
 887		pio = 0;
 888	}
 889	/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
 890	if (scsicmd[0] == WRITE_10) {
 891		unsigned int lba =
 892			(scsicmd[2] << 24) |
 893			(scsicmd[3] << 16) |
 894			(scsicmd[4] << 8) |
 895			scsicmd[5];
 896		if (lba >= 0xFFFF4FA2)
 897			pio = 1;
 898	}
 899	return pio;
 900}
 901
 902static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 903{
 904	WARN_ON(tf->protocol == ATA_PROT_DMA ||
 905		tf->protocol == ATAPI_PROT_DMA);
 906	ata_sff_tf_load(ap, tf);
 907}
 908
 909
 910static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 911{
 912	WARN_ON(tf->protocol == ATA_PROT_DMA ||
 913		tf->protocol == ATAPI_PROT_DMA);
 914	ata_sff_exec_command(ap, tf);
 915}
 916
 917
 918static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
 919{
 920	port->cmd_addr		= base;
 921	port->data_addr		= base;
 922	port->feature_addr	=
 923	port->error_addr	= base + 0x4;
 924	port->nsect_addr	= base + 0x8;
 925	port->lbal_addr		= base + 0xc;
 926	port->lbam_addr		= base + 0x10;
 927	port->lbah_addr		= base + 0x14;
 928	port->device_addr	= base + 0x18;
 929	port->command_addr	=
 930	port->status_addr	= base + 0x1c;
 931	port->altstatus_addr	=
 932	port->ctl_addr		= base + 0x38;
 933}
 934
 935
 
 936static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
 937				   u32 offset, u32 size)
 938{
 939	u32 window_size;
 940	u16 idx;
 941	u8 page_mask;
 942	long dist;
 943	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 944	void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
 945
 946	/* hard-code chip #0 */
 947	mmio += PDC_CHIP0_OFS;
 948
 949	page_mask = 0x00;
 950	window_size = 0x2000 * 4; /* 32K byte uchar size */
 951	idx = (u16) (offset / window_size);
 952
 953	writel(0x01, mmio + PDC_GENERAL_CTLR);
 954	readl(mmio + PDC_GENERAL_CTLR);
 955	writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
 956	readl(mmio + PDC_DIMM_WINDOW_CTLR);
 957
 958	offset -= (idx * window_size);
 959	idx++;
 960	dist = ((long) (window_size - (offset + size))) >= 0 ? size :
 961		(long) (window_size - offset);
 962	memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
 963
 964	psource += dist;
 965	size -= dist;
 966	for (; (long) size >= (long) window_size ;) {
 967		writel(0x01, mmio + PDC_GENERAL_CTLR);
 968		readl(mmio + PDC_GENERAL_CTLR);
 969		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
 970		readl(mmio + PDC_DIMM_WINDOW_CTLR);
 971		memcpy_fromio(psource, dimm_mmio, window_size / 4);
 972		psource += window_size;
 973		size -= window_size;
 974		idx++;
 975	}
 976
 977	if (size) {
 978		writel(0x01, mmio + PDC_GENERAL_CTLR);
 979		readl(mmio + PDC_GENERAL_CTLR);
 980		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
 981		readl(mmio + PDC_DIMM_WINDOW_CTLR);
 982		memcpy_fromio(psource, dimm_mmio, size / 4);
 983	}
 984}
 
 985
 986
 987static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
 988				 u32 offset, u32 size)
 989{
 990	u32 window_size;
 991	u16 idx;
 992	u8 page_mask;
 993	long dist;
 994	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 995	void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
 996
 997	/* hard-code chip #0 */
 998	mmio += PDC_CHIP0_OFS;
 999
1000	page_mask = 0x00;
1001	window_size = 0x2000 * 4;       /* 32K byte uchar size */
1002	idx = (u16) (offset / window_size);
1003
1004	writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1005	readl(mmio + PDC_DIMM_WINDOW_CTLR);
1006	offset -= (idx * window_size);
1007	idx++;
1008	dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1009		(long) (window_size - offset);
1010	memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1011	writel(0x01, mmio + PDC_GENERAL_CTLR);
1012	readl(mmio + PDC_GENERAL_CTLR);
1013
1014	psource += dist;
1015	size -= dist;
1016	for (; (long) size >= (long) window_size ;) {
1017		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1018		readl(mmio + PDC_DIMM_WINDOW_CTLR);
1019		memcpy_toio(dimm_mmio, psource, window_size / 4);
1020		writel(0x01, mmio + PDC_GENERAL_CTLR);
1021		readl(mmio + PDC_GENERAL_CTLR);
1022		psource += window_size;
1023		size -= window_size;
1024		idx++;
1025	}
1026
1027	if (size) {
1028		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1029		readl(mmio + PDC_DIMM_WINDOW_CTLR);
1030		memcpy_toio(dimm_mmio, psource, size / 4);
1031		writel(0x01, mmio + PDC_GENERAL_CTLR);
1032		readl(mmio + PDC_GENERAL_CTLR);
1033	}
1034}
1035
1036
1037static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1038				      u32 subaddr, u32 *pdata)
1039{
1040	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1041	u32 i2creg  = 0;
1042	u32 status;
1043	u32 count = 0;
1044
1045	/* hard-code chip #0 */
1046	mmio += PDC_CHIP0_OFS;
1047
1048	i2creg |= device << 24;
1049	i2creg |= subaddr << 16;
1050
1051	/* Set the device and subaddress */
1052	writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1053	readl(mmio + PDC_I2C_ADDR_DATA);
1054
1055	/* Write Control to perform read operation, mask int */
1056	writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1057	       mmio + PDC_I2C_CONTROL);
1058
1059	for (count = 0; count <= 1000; count ++) {
1060		status = readl(mmio + PDC_I2C_CONTROL);
1061		if (status & PDC_I2C_COMPLETE) {
1062			status = readl(mmio + PDC_I2C_ADDR_DATA);
1063			break;
1064		} else if (count == 1000)
1065			return 0;
1066	}
1067
1068	*pdata = (status >> 8) & 0x000000ff;
1069	return 1;
1070}
1071
1072
1073static int pdc20621_detect_dimm(struct ata_host *host)
1074{
1075	u32 data = 0;
1076	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1077			     PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1078		if (data == 100)
1079			return 100;
1080	} else
1081		return 0;
1082
1083	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1084		if (data <= 0x75)
1085			return 133;
1086	} else
1087		return 0;
1088
1089	return 0;
1090}
1091
1092
1093static int pdc20621_prog_dimm0(struct ata_host *host)
1094{
1095	u32 spd0[50];
1096	u32 data = 0;
1097	int size, i;
1098	u8 bdimmsize;
1099	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1100	static const struct {
1101		unsigned int reg;
1102		unsigned int ofs;
1103	} pdc_i2c_read_data [] = {
1104		{ PDC_DIMM_SPD_TYPE, 11 },
1105		{ PDC_DIMM_SPD_FRESH_RATE, 12 },
1106		{ PDC_DIMM_SPD_COLUMN_NUM, 4 },
1107		{ PDC_DIMM_SPD_ATTRIBUTE, 21 },
1108		{ PDC_DIMM_SPD_ROW_NUM, 3 },
1109		{ PDC_DIMM_SPD_BANK_NUM, 17 },
1110		{ PDC_DIMM_SPD_MODULE_ROW, 5 },
1111		{ PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1112		{ PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1113		{ PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1114		{ PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1115		{ PDC_DIMM_SPD_CAS_LATENCY, 18 },
1116	};
1117
1118	/* hard-code chip #0 */
1119	mmio += PDC_CHIP0_OFS;
1120
1121	for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1122		pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1123				  pdc_i2c_read_data[i].reg,
1124				  &spd0[pdc_i2c_read_data[i].ofs]);
1125
1126	data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1127	data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1128		((((spd0[27] + 9) / 10) - 1) << 8) ;
1129	data |= (((((spd0[29] > spd0[28])
1130		    ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1131	data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1132
1133	if (spd0[18] & 0x08)
1134		data |= ((0x03) << 14);
1135	else if (spd0[18] & 0x04)
1136		data |= ((0x02) << 14);
1137	else if (spd0[18] & 0x01)
1138		data |= ((0x01) << 14);
1139	else
1140		data |= (0 << 14);
1141
1142	/*
1143	   Calculate the size of bDIMMSize (power of 2) and
1144	   merge the DIMM size by program start/end address.
1145	*/
1146
1147	bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1148	size = (1 << bdimmsize) >> 20;	/* size = xxx(MB) */
1149	data |= (((size / 16) - 1) << 16);
1150	data |= (0 << 23);
1151	data |= 8;
1152	writel(data, mmio + PDC_DIMM0_CONTROL);
1153	readl(mmio + PDC_DIMM0_CONTROL);
1154	return size;
1155}
1156
1157
1158static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1159{
1160	u32 data, spd0;
1161	int error, i;
1162	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1163
1164	/* hard-code chip #0 */
1165	mmio += PDC_CHIP0_OFS;
1166
1167	/*
1168	  Set To Default : DIMM Module Global Control Register (0x022259F1)
1169	  DIMM Arbitration Disable (bit 20)
1170	  DIMM Data/Control Output Driving Selection (bit12 - bit15)
1171	  Refresh Enable (bit 17)
1172	*/
1173
1174	data = 0x022259F1;
1175	writel(data, mmio + PDC_SDRAM_CONTROL);
1176	readl(mmio + PDC_SDRAM_CONTROL);
1177
1178	/* Turn on for ECC */
1179	if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1180			       PDC_DIMM_SPD_TYPE, &spd0)) {
1181		dev_err(host->dev,
1182			"Failed in i2c read: device=%#x, subaddr=%#x\n",
1183			PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1184		return 1;
1185	}
1186	if (spd0 == 0x02) {
1187		data |= (0x01 << 16);
1188		writel(data, mmio + PDC_SDRAM_CONTROL);
1189		readl(mmio + PDC_SDRAM_CONTROL);
1190		dev_err(host->dev, "Local DIMM ECC Enabled\n");
1191	}
1192
1193	/* DIMM Initialization Select/Enable (bit 18/19) */
1194	data &= (~(1<<18));
1195	data |= (1<<19);
1196	writel(data, mmio + PDC_SDRAM_CONTROL);
1197
1198	error = 1;
1199	for (i = 1; i <= 10; i++) {   /* polling ~5 secs */
1200		data = readl(mmio + PDC_SDRAM_CONTROL);
1201		if (!(data & (1<<19))) {
1202			error = 0;
1203			break;
1204		}
1205		msleep(i*100);
1206	}
1207	return error;
1208}
1209
1210
1211static unsigned int pdc20621_dimm_init(struct ata_host *host)
1212{
1213	int speed, size, length;
1214	u32 addr, spd0, pci_status;
1215	u32 time_period = 0;
1216	u32 tcount = 0;
1217	u32 ticks = 0;
1218	u32 clock = 0;
1219	u32 fparam = 0;
1220	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1221
1222	/* hard-code chip #0 */
1223	mmio += PDC_CHIP0_OFS;
1224
1225	/* Initialize PLL based upon PCI Bus Frequency */
1226
1227	/* Initialize Time Period Register */
1228	writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1229	time_period = readl(mmio + PDC_TIME_PERIOD);
1230	dev_dbg(host->dev, "Time Period Register (0x40): 0x%x\n", time_period);
1231
1232	/* Enable timer */
1233	writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1234	readl(mmio + PDC_TIME_CONTROL);
1235
1236	/* Wait 3 seconds */
1237	msleep(3000);
1238
1239	/*
1240	   When timer is enabled, counter is decreased every internal
1241	   clock cycle.
1242	*/
1243
1244	tcount = readl(mmio + PDC_TIME_COUNTER);
1245	dev_dbg(host->dev, "Time Counter Register (0x44): 0x%x\n", tcount);
1246
1247	/*
1248	   If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1249	   register should be >= (0xffffffff - 3x10^8).
1250	*/
1251	if (tcount >= PCI_X_TCOUNT) {
1252		ticks = (time_period - tcount);
1253		dev_dbg(host->dev, "Num counters 0x%x (%d)\n", ticks, ticks);
1254
1255		clock = (ticks / 300000);
1256		dev_dbg(host->dev, "10 * Internal clk = 0x%x (%d)\n",
1257			clock, clock);
1258
1259		clock = (clock * 33);
1260		dev_dbg(host->dev, "10 * Internal clk * 33 = 0x%x (%d)\n",
1261			clock, clock);
1262
1263		/* PLL F Param (bit 22:16) */
1264		fparam = (1400000 / clock) - 2;
1265		dev_dbg(host->dev, "PLL F Param: 0x%x (%d)\n", fparam, fparam);
1266
1267		/* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1268		pci_status = (0x8a001824 | (fparam << 16));
1269	} else
1270		pci_status = PCI_PLL_INIT;
1271
1272	/* Initialize PLL. */
1273	dev_dbg(host->dev, "pci_status: 0x%x\n", pci_status);
1274	writel(pci_status, mmio + PDC_CTL_STATUS);
1275	readl(mmio + PDC_CTL_STATUS);
1276
1277	/*
1278	   Read SPD of DIMM by I2C interface,
1279	   and program the DIMM Module Controller.
1280	*/
1281	if (!(speed = pdc20621_detect_dimm(host))) {
1282		dev_err(host->dev, "Detect Local DIMM Fail\n");
1283		return 1;	/* DIMM error */
1284	}
1285	dev_dbg(host->dev, "Local DIMM Speed = %d\n", speed);
1286
1287	/* Programming DIMM0 Module Control Register (index_CID0:80h) */
1288	size = pdc20621_prog_dimm0(host);
1289	dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
1290
1291	/* Programming DIMM Module Global Control Register (index_CID0:88h) */
1292	if (pdc20621_prog_dimm_global(host)) {
1293		dev_err(host->dev,
1294			"Programming DIMM Module Global Control Register Fail\n");
1295		return 1;
1296	}
1297
1298	if (dimm_test) {
 
1299		u8 test_parttern1[40] =
1300			{0x55,0xAA,'P','r','o','m','i','s','e',' ',
1301			'N','o','t',' ','Y','e','t',' ',
1302			'D','e','f','i','n','e','d',' ',
1303			'1','.','1','0',
1304			'9','8','0','3','1','6','1','2',0,0};
1305		u8 test_parttern2[40] = {0};
1306
1307		pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1308		pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1309
1310		pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1311		pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1312		dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
1313		       test_parttern2[1], &(test_parttern2[2]));
1314		pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1315				       40);
1316		dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
1317			 test_parttern2[0],
1318			 test_parttern2[1], &(test_parttern2[2]));
1319
1320		pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1321		pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1322		dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
1323			 test_parttern2[0],
1324			 test_parttern2[1], &(test_parttern2[2]));
1325	}
 
1326
1327	/* ECC initiliazation. */
1328
1329	if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1330			       PDC_DIMM_SPD_TYPE, &spd0)) {
1331		dev_err(host->dev,
1332			"Failed in i2c read: device=%#x, subaddr=%#x\n",
1333		       PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1334		return 1;
1335	}
1336	if (spd0 == 0x02) {
1337		void *buf;
1338		dev_dbg(host->dev, "Start ECC initialization\n");
1339		addr = 0;
1340		length = size * 1024 * 1024;
1341		buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1342		if (!buf)
1343			return 1;
1344		while (addr < length) {
1345			pdc20621_put_to_dimm(host, buf, addr,
1346					     ECC_ERASE_BUF_SZ);
1347			addr += ECC_ERASE_BUF_SZ;
1348		}
1349		kfree(buf);
1350		dev_dbg(host->dev, "Finish ECC initialization\n");
1351	}
1352	return 0;
1353}
1354
1355
1356static void pdc_20621_init(struct ata_host *host)
1357{
1358	u32 tmp;
1359	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1360
1361	/* hard-code chip #0 */
1362	mmio += PDC_CHIP0_OFS;
1363
1364	/*
1365	 * Select page 0x40 for our 32k DIMM window
1366	 */
1367	tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1368	tmp |= PDC_PAGE_WINDOW;	/* page 40h; arbitrarily selected */
1369	writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1370
1371	/*
1372	 * Reset Host DMA
1373	 */
1374	tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1375	tmp |= PDC_RESET;
1376	writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1377	readl(mmio + PDC_HDMA_CTLSTAT);		/* flush */
1378
1379	udelay(10);
1380
1381	tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1382	tmp &= ~PDC_RESET;
1383	writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1384	readl(mmio + PDC_HDMA_CTLSTAT);		/* flush */
1385}
1386
1387static int pdc_sata_init_one(struct pci_dev *pdev,
1388			     const struct pci_device_id *ent)
1389{
1390	const struct ata_port_info *ppi[] =
1391		{ &pdc_port_info[ent->driver_data], NULL };
1392	struct ata_host *host;
1393	struct pdc_host_priv *hpriv;
1394	int i, rc;
1395
1396	ata_print_version_once(&pdev->dev, DRV_VERSION);
1397
1398	/* allocate host */
1399	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1400	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1401	if (!host || !hpriv)
1402		return -ENOMEM;
1403
1404	host->private_data = hpriv;
1405
1406	/* acquire resources and fill host */
1407	rc = pcim_enable_device(pdev);
1408	if (rc)
1409		return rc;
1410
1411	rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1412				DRV_NAME);
1413	if (rc == -EBUSY)
1414		pcim_pin_device(pdev);
1415	if (rc)
1416		return rc;
1417	host->iomap = pcim_iomap_table(pdev);
1418
1419	for (i = 0; i < 4; i++) {
1420		struct ata_port *ap = host->ports[i];
1421		void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1422		unsigned int offset = 0x200 + i * 0x80;
1423
1424		pdc_sata_setup_port(&ap->ioaddr, base + offset);
1425
1426		ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1427		ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1428		ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1429	}
1430
1431	/* configure and activate */
1432	rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
1433	if (rc)
1434		return rc;
1435
1436	if (pdc20621_dimm_init(host))
1437		return -ENOMEM;
1438	pdc_20621_init(host);
1439
1440	pci_set_master(pdev);
1441	return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1442				 IRQF_SHARED, &pdc_sata_sht);
1443}
1444
1445module_pci_driver(pdc_sata_pci_driver);
1446
1447MODULE_AUTHOR("Jeff Garzik");
1448MODULE_DESCRIPTION("Promise SATA low-level driver");
1449MODULE_LICENSE("GPL");
1450MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1451MODULE_VERSION(DRV_VERSION);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  sata_sx4.c - Promise SATA
   4 *
   5 *  Maintained by:  Tejun Heo <tj@kernel.org>
   6 *  		    Please ALWAYS copy linux-ide@vger.kernel.org
   7 *		    on emails.
   8 *
   9 *  Copyright 2003-2004 Red Hat, Inc.
  10 *
  11 *  libata documentation is available via 'make {ps|pdf}docs',
  12 *  as Documentation/driver-api/libata.rst
  13 *
  14 *  Hardware documentation available under NDA.
  15 */
  16
  17/*
  18	Theory of operation
  19	-------------------
  20
  21	The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
  22	engine, DIMM memory, and four ATA engines (one per SATA port).
  23	Data is copied to/from DIMM memory by the HDMA engine, before
  24	handing off to one (or more) of the ATA engines.  The ATA
  25	engines operate solely on DIMM memory.
  26
  27	The SX4 behaves like a PATA chip, with no SATA controls or
  28	knowledge whatsoever, leading to the presumption that
  29	PATA<->SATA bridges exist on SX4 boards, external to the
  30	PDC20621 chip itself.
  31
  32	The chip is quite capable, supporting an XOR engine and linked
  33	hardware commands (permits a string to transactions to be
  34	submitted and waited-on as a single unit), and an optional
  35	microprocessor.
  36
  37	The limiting factor is largely software.  This Linux driver was
  38	written to multiplex the single HDMA engine to copy disk
  39	transactions into a fixed DIMM memory space, from where an ATA
  40	engine takes over.  As a result, each WRITE looks like this:
  41
  42		submit HDMA packet to hardware
  43		hardware copies data from system memory to DIMM
  44		hardware raises interrupt
  45
  46		submit ATA packet to hardware
  47		hardware executes ATA WRITE command, w/ data in DIMM
  48		hardware raises interrupt
  49
  50	and each READ looks like this:
  51
  52		submit ATA packet to hardware
  53		hardware executes ATA READ command, w/ data in DIMM
  54		hardware raises interrupt
  55
  56		submit HDMA packet to hardware
  57		hardware copies data from DIMM to system memory
  58		hardware raises interrupt
  59
  60	This is a very slow, lock-step way of doing things that can
  61	certainly be improved by motivated kernel hackers.
  62
  63 */
  64
  65#include <linux/kernel.h>
  66#include <linux/module.h>
  67#include <linux/pci.h>
  68#include <linux/slab.h>
  69#include <linux/blkdev.h>
  70#include <linux/delay.h>
  71#include <linux/interrupt.h>
  72#include <linux/device.h>
  73#include <scsi/scsi_host.h>
  74#include <scsi/scsi_cmnd.h>
  75#include <linux/libata.h>
  76#include "sata_promise.h"
  77
  78#define DRV_NAME	"sata_sx4"
  79#define DRV_VERSION	"0.12"
  80
 
 
 
  81
  82enum {
  83	PDC_MMIO_BAR		= 3,
  84	PDC_DIMM_BAR		= 4,
  85
  86	PDC_PRD_TBL		= 0x44,	/* Direct command DMA table addr */
  87
  88	PDC_PKT_SUBMIT		= 0x40, /* Command packet pointer addr */
  89	PDC_HDMA_PKT_SUBMIT	= 0x100, /* Host DMA packet pointer addr */
  90	PDC_INT_SEQMASK		= 0x40,	/* Mask of asserted SEQ INTs */
  91	PDC_HDMA_CTLSTAT	= 0x12C, /* Host DMA control / status */
  92
  93	PDC_CTLSTAT		= 0x60,	/* IDEn control / status */
  94
  95	PDC_20621_SEQCTL	= 0x400,
  96	PDC_20621_SEQMASK	= 0x480,
  97	PDC_20621_GENERAL_CTL	= 0x484,
  98	PDC_20621_PAGE_SIZE	= (32 * 1024),
  99
 100	/* chosen, not constant, values; we design our own DIMM mem map */
 101	PDC_20621_DIMM_WINDOW	= 0x0C,	/* page# for 32K DIMM window */
 102	PDC_20621_DIMM_BASE	= 0x00200000,
 103	PDC_20621_DIMM_DATA	= (64 * 1024),
 104	PDC_DIMM_DATA_STEP	= (256 * 1024),
 105	PDC_DIMM_WINDOW_STEP	= (8 * 1024),
 106	PDC_DIMM_HOST_PRD	= (6 * 1024),
 107	PDC_DIMM_HOST_PKT	= (128 * 0),
 108	PDC_DIMM_HPKT_PRD	= (128 * 1),
 109	PDC_DIMM_ATA_PKT	= (128 * 2),
 110	PDC_DIMM_APKT_PRD	= (128 * 3),
 111	PDC_DIMM_HEADER_SZ	= PDC_DIMM_APKT_PRD + 128,
 112	PDC_PAGE_WINDOW		= 0x40,
 113	PDC_PAGE_DATA		= PDC_PAGE_WINDOW +
 114				  (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
 115	PDC_PAGE_SET		= PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
 116
 117	PDC_CHIP0_OFS		= 0xC0000, /* offset of chip #0 */
 118
 119	PDC_20621_ERR_MASK	= (1<<19) | (1<<20) | (1<<21) | (1<<22) |
 120				  (1<<23),
 121
 122	board_20621		= 0,	/* FastTrak S150 SX4 */
 123
 124	PDC_MASK_INT		= (1 << 10), /* HDMA/ATA mask int */
 125	PDC_RESET		= (1 << 11), /* HDMA/ATA reset */
 126	PDC_DMA_ENABLE		= (1 << 7),  /* DMA start/stop */
 127
 128	PDC_MAX_HDMA		= 32,
 129	PDC_HDMA_Q_MASK		= (PDC_MAX_HDMA - 1),
 130
 131	PDC_DIMM0_SPD_DEV_ADDRESS	= 0x50,
 132	PDC_DIMM1_SPD_DEV_ADDRESS	= 0x51,
 133	PDC_I2C_CONTROL			= 0x48,
 134	PDC_I2C_ADDR_DATA		= 0x4C,
 135	PDC_DIMM0_CONTROL		= 0x80,
 136	PDC_DIMM1_CONTROL		= 0x84,
 137	PDC_SDRAM_CONTROL		= 0x88,
 138	PDC_I2C_WRITE			= 0,		/* master -> slave */
 139	PDC_I2C_READ			= (1 << 6),	/* master <- slave */
 140	PDC_I2C_START			= (1 << 7),	/* start I2C proto */
 141	PDC_I2C_MASK_INT		= (1 << 5),	/* mask I2C interrupt */
 142	PDC_I2C_COMPLETE		= (1 << 16),	/* I2C normal compl. */
 143	PDC_I2C_NO_ACK			= (1 << 20),	/* slave no-ack addr */
 144	PDC_DIMM_SPD_SUBADDRESS_START	= 0x00,
 145	PDC_DIMM_SPD_SUBADDRESS_END	= 0x7F,
 146	PDC_DIMM_SPD_ROW_NUM		= 3,
 147	PDC_DIMM_SPD_COLUMN_NUM		= 4,
 148	PDC_DIMM_SPD_MODULE_ROW		= 5,
 149	PDC_DIMM_SPD_TYPE		= 11,
 150	PDC_DIMM_SPD_FRESH_RATE		= 12,
 151	PDC_DIMM_SPD_BANK_NUM		= 17,
 152	PDC_DIMM_SPD_CAS_LATENCY	= 18,
 153	PDC_DIMM_SPD_ATTRIBUTE		= 21,
 154	PDC_DIMM_SPD_ROW_PRE_CHARGE	= 27,
 155	PDC_DIMM_SPD_ROW_ACTIVE_DELAY	= 28,
 156	PDC_DIMM_SPD_RAS_CAS_DELAY	= 29,
 157	PDC_DIMM_SPD_ACTIVE_PRECHARGE	= 30,
 158	PDC_DIMM_SPD_SYSTEM_FREQ	= 126,
 159	PDC_CTL_STATUS			= 0x08,
 160	PDC_DIMM_WINDOW_CTLR		= 0x0C,
 161	PDC_TIME_CONTROL		= 0x3C,
 162	PDC_TIME_PERIOD			= 0x40,
 163	PDC_TIME_COUNTER		= 0x44,
 164	PDC_GENERAL_CTLR		= 0x484,
 165	PCI_PLL_INIT			= 0x8A531824,
 166	PCI_X_TCOUNT			= 0xEE1E5CFF,
 167
 168	/* PDC_TIME_CONTROL bits */
 169	PDC_TIMER_BUZZER		= (1 << 10),
 170	PDC_TIMER_MODE_PERIODIC		= 0,		/* bits 9:8 == 00 */
 171	PDC_TIMER_MODE_ONCE		= (1 << 8),	/* bits 9:8 == 01 */
 172	PDC_TIMER_ENABLE		= (1 << 7),
 173	PDC_TIMER_MASK_INT		= (1 << 5),
 174	PDC_TIMER_SEQ_MASK		= 0x1f,		/* SEQ ID for timer */
 175	PDC_TIMER_DEFAULT		= PDC_TIMER_MODE_ONCE |
 176					  PDC_TIMER_ENABLE |
 177					  PDC_TIMER_MASK_INT,
 178};
 179
 180#define ECC_ERASE_BUF_SZ (128 * 1024)
 181
 182struct pdc_port_priv {
 183	u8			dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
 184	u8			*pkt;
 185	dma_addr_t		pkt_dma;
 186};
 187
 188struct pdc_host_priv {
 189	unsigned int		doing_hdma;
 190	unsigned int		hdma_prod;
 191	unsigned int		hdma_cons;
 192	struct {
 193		struct ata_queued_cmd *qc;
 194		unsigned int	seq;
 195		unsigned long	pkt_ofs;
 196	} hdma[32];
 197};
 198
 199
 200static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 201static void pdc_error_handler(struct ata_port *ap);
 202static void pdc_freeze(struct ata_port *ap);
 203static void pdc_thaw(struct ata_port *ap);
 204static int pdc_port_start(struct ata_port *ap);
 205static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
 206static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 207static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 208static unsigned int pdc20621_dimm_init(struct ata_host *host);
 209static int pdc20621_detect_dimm(struct ata_host *host);
 210static unsigned int pdc20621_i2c_read(struct ata_host *host,
 211				      u32 device, u32 subaddr, u32 *pdata);
 212static int pdc20621_prog_dimm0(struct ata_host *host);
 213static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
 214#ifdef ATA_VERBOSE_DEBUG
 215static void pdc20621_get_from_dimm(struct ata_host *host,
 216				   void *psource, u32 offset, u32 size);
 217#endif
 218static void pdc20621_put_to_dimm(struct ata_host *host,
 219				 void *psource, u32 offset, u32 size);
 220static void pdc20621_irq_clear(struct ata_port *ap);
 221static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
 222static int pdc_softreset(struct ata_link *link, unsigned int *class,
 223			 unsigned long deadline);
 224static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
 225static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
 226
 227
 228static struct scsi_host_template pdc_sata_sht = {
 229	ATA_BASE_SHT(DRV_NAME),
 230	.sg_tablesize		= LIBATA_MAX_PRD,
 231	.dma_boundary		= ATA_DMA_BOUNDARY,
 232};
 233
 234/* TODO: inherit from base port_ops after converting to new EH */
 235static struct ata_port_operations pdc_20621_ops = {
 236	.inherits		= &ata_sff_port_ops,
 237
 238	.check_atapi_dma	= pdc_check_atapi_dma,
 239	.qc_prep		= pdc20621_qc_prep,
 240	.qc_issue		= pdc20621_qc_issue,
 241
 242	.freeze			= pdc_freeze,
 243	.thaw			= pdc_thaw,
 244	.softreset		= pdc_softreset,
 245	.error_handler		= pdc_error_handler,
 246	.lost_interrupt		= ATA_OP_NULL,
 247	.post_internal_cmd	= pdc_post_internal_cmd,
 248
 249	.port_start		= pdc_port_start,
 250
 251	.sff_tf_load		= pdc_tf_load_mmio,
 252	.sff_exec_command	= pdc_exec_command_mmio,
 253	.sff_irq_clear		= pdc20621_irq_clear,
 254};
 255
 256static const struct ata_port_info pdc_port_info[] = {
 257	/* board_20621 */
 258	{
 259		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
 260				  ATA_FLAG_PIO_POLLING,
 261		.pio_mask	= ATA_PIO4,
 262		.mwdma_mask	= ATA_MWDMA2,
 263		.udma_mask	= ATA_UDMA6,
 264		.port_ops	= &pdc_20621_ops,
 265	},
 266
 267};
 268
 269static const struct pci_device_id pdc_sata_pci_tbl[] = {
 270	{ PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
 271
 272	{ }	/* terminate list */
 273};
 274
 275static struct pci_driver pdc_sata_pci_driver = {
 276	.name			= DRV_NAME,
 277	.id_table		= pdc_sata_pci_tbl,
 278	.probe			= pdc_sata_init_one,
 279	.remove			= ata_pci_remove_one,
 280};
 281
 282
 283static int pdc_port_start(struct ata_port *ap)
 284{
 285	struct device *dev = ap->host->dev;
 286	struct pdc_port_priv *pp;
 287
 288	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
 289	if (!pp)
 290		return -ENOMEM;
 291
 292	pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
 293	if (!pp->pkt)
 294		return -ENOMEM;
 295
 296	ap->private_data = pp;
 297
 298	return 0;
 299}
 300
 301static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
 302				   unsigned int total_len)
 303{
 304	u32 addr;
 305	unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
 306	__le32 *buf32 = (__le32 *) buf;
 307
 308	/* output ATA packet S/G table */
 309	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
 310	       (PDC_DIMM_DATA_STEP * portno);
 311	VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
 312	buf32[dw] = cpu_to_le32(addr);
 313	buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
 314
 315	VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
 316		PDC_20621_DIMM_BASE +
 317		       (PDC_DIMM_WINDOW_STEP * portno) +
 318		       PDC_DIMM_APKT_PRD,
 319		buf32[dw], buf32[dw + 1]);
 320}
 321
 322static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
 323				    unsigned int total_len)
 324{
 325	u32 addr;
 326	unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
 327	__le32 *buf32 = (__le32 *) buf;
 328
 329	/* output Host DMA packet S/G table */
 330	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
 331	       (PDC_DIMM_DATA_STEP * portno);
 332
 333	buf32[dw] = cpu_to_le32(addr);
 334	buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
 335
 336	VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
 337		PDC_20621_DIMM_BASE +
 338		       (PDC_DIMM_WINDOW_STEP * portno) +
 339		       PDC_DIMM_HPKT_PRD,
 340		buf32[dw], buf32[dw + 1]);
 341}
 342
 343static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
 344					    unsigned int devno, u8 *buf,
 345					    unsigned int portno)
 346{
 347	unsigned int i, dw;
 348	__le32 *buf32 = (__le32 *) buf;
 349	u8 dev_reg;
 350
 351	unsigned int dimm_sg = PDC_20621_DIMM_BASE +
 352			       (PDC_DIMM_WINDOW_STEP * portno) +
 353			       PDC_DIMM_APKT_PRD;
 354	VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
 355
 356	i = PDC_DIMM_ATA_PKT;
 357
 358	/*
 359	 * Set up ATA packet
 360	 */
 361	if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
 362		buf[i++] = PDC_PKT_READ;
 363	else if (tf->protocol == ATA_PROT_NODATA)
 364		buf[i++] = PDC_PKT_NODATA;
 365	else
 366		buf[i++] = 0;
 367	buf[i++] = 0;			/* reserved */
 368	buf[i++] = portno + 1;		/* seq. id */
 369	buf[i++] = 0xff;		/* delay seq. id */
 370
 371	/* dimm dma S/G, and next-pkt */
 372	dw = i >> 2;
 373	if (tf->protocol == ATA_PROT_NODATA)
 374		buf32[dw] = 0;
 375	else
 376		buf32[dw] = cpu_to_le32(dimm_sg);
 377	buf32[dw + 1] = 0;
 378	i += 8;
 379
 380	if (devno == 0)
 381		dev_reg = ATA_DEVICE_OBS;
 382	else
 383		dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
 384
 385	/* select device */
 386	buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
 387	buf[i++] = dev_reg;
 388
 389	/* device control register */
 390	buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
 391	buf[i++] = tf->ctl;
 392
 393	return i;
 394}
 395
 396static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
 397				     unsigned int portno)
 398{
 399	unsigned int dw;
 400	u32 tmp;
 401	__le32 *buf32 = (__le32 *) buf;
 402
 403	unsigned int host_sg = PDC_20621_DIMM_BASE +
 404			       (PDC_DIMM_WINDOW_STEP * portno) +
 405			       PDC_DIMM_HOST_PRD;
 406	unsigned int dimm_sg = PDC_20621_DIMM_BASE +
 407			       (PDC_DIMM_WINDOW_STEP * portno) +
 408			       PDC_DIMM_HPKT_PRD;
 409	VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
 410	VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
 411
 412	dw = PDC_DIMM_HOST_PKT >> 2;
 413
 414	/*
 415	 * Set up Host DMA packet
 416	 */
 417	if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
 418		tmp = PDC_PKT_READ;
 419	else
 420		tmp = 0;
 421	tmp |= ((portno + 1 + 4) << 16);	/* seq. id */
 422	tmp |= (0xff << 24);			/* delay seq. id */
 423	buf32[dw + 0] = cpu_to_le32(tmp);
 424	buf32[dw + 1] = cpu_to_le32(host_sg);
 425	buf32[dw + 2] = cpu_to_le32(dimm_sg);
 426	buf32[dw + 3] = 0;
 427
 428	VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
 429		PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
 430			PDC_DIMM_HOST_PKT,
 431		buf32[dw + 0],
 432		buf32[dw + 1],
 433		buf32[dw + 2],
 434		buf32[dw + 3]);
 435}
 436
 437static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
 438{
 439	struct scatterlist *sg;
 440	struct ata_port *ap = qc->ap;
 441	struct pdc_port_priv *pp = ap->private_data;
 442	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
 443	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
 444	unsigned int portno = ap->port_no;
 445	unsigned int i, si, idx, total_len = 0, sgt_len;
 446	__le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
 447
 448	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
 449
 450	VPRINTK("ata%u: ENTER\n", ap->print_id);
 451
 452	/* hard-code chip #0 */
 453	mmio += PDC_CHIP0_OFS;
 454
 455	/*
 456	 * Build S/G table
 457	 */
 458	idx = 0;
 459	for_each_sg(qc->sg, sg, qc->n_elem, si) {
 460		buf[idx++] = cpu_to_le32(sg_dma_address(sg));
 461		buf[idx++] = cpu_to_le32(sg_dma_len(sg));
 462		total_len += sg_dma_len(sg);
 463	}
 464	buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
 465	sgt_len = idx * 4;
 466
 467	/*
 468	 * Build ATA, host DMA packets
 469	 */
 470	pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
 471	pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
 472
 473	pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
 474	i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
 475
 476	if (qc->tf.flags & ATA_TFLAG_LBA48)
 477		i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
 478	else
 479		i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
 480
 481	pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
 482
 483	/* copy three S/G tables and two packets to DIMM MMIO window */
 484	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
 485		    &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
 486	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
 487		    PDC_DIMM_HOST_PRD,
 488		    &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
 489
 490	/* force host FIFO dump */
 491	writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
 492
 493	readl(dimm_mmio);	/* MMIO PCI posting flush */
 494
 495	VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
 
 496}
 497
 498static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
 499{
 500	struct ata_port *ap = qc->ap;
 501	struct pdc_port_priv *pp = ap->private_data;
 502	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
 503	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
 504	unsigned int portno = ap->port_no;
 505	unsigned int i;
 506
 507	VPRINTK("ata%u: ENTER\n", ap->print_id);
 508
 509	/* hard-code chip #0 */
 510	mmio += PDC_CHIP0_OFS;
 511
 512	i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
 513
 514	if (qc->tf.flags & ATA_TFLAG_LBA48)
 515		i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
 516	else
 517		i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
 518
 519	pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
 520
 521	/* copy three S/G tables and two packets to DIMM MMIO window */
 522	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
 523		    &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
 524
 525	/* force host FIFO dump */
 526	writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
 527
 528	readl(dimm_mmio);	/* MMIO PCI posting flush */
 529
 530	VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
 531}
 532
 533static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
 534{
 535	switch (qc->tf.protocol) {
 536	case ATA_PROT_DMA:
 537		pdc20621_dma_prep(qc);
 538		break;
 539	case ATA_PROT_NODATA:
 540		pdc20621_nodata_prep(qc);
 541		break;
 542	default:
 543		break;
 544	}
 545
 546	return AC_ERR_OK;
 547}
 548
 549static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
 550				 unsigned int seq,
 551				 u32 pkt_ofs)
 552{
 553	struct ata_port *ap = qc->ap;
 554	struct ata_host *host = ap->host;
 555	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 556
 557	/* hard-code chip #0 */
 558	mmio += PDC_CHIP0_OFS;
 559
 560	writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
 561	readl(mmio + PDC_20621_SEQCTL + (seq * 4));	/* flush */
 562
 563	writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
 564	readl(mmio + PDC_HDMA_PKT_SUBMIT);	/* flush */
 565}
 566
 567static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
 568				unsigned int seq,
 569				u32 pkt_ofs)
 570{
 571	struct ata_port *ap = qc->ap;
 572	struct pdc_host_priv *pp = ap->host->private_data;
 573	unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
 574
 575	if (!pp->doing_hdma) {
 576		__pdc20621_push_hdma(qc, seq, pkt_ofs);
 577		pp->doing_hdma = 1;
 578		return;
 579	}
 580
 581	pp->hdma[idx].qc = qc;
 582	pp->hdma[idx].seq = seq;
 583	pp->hdma[idx].pkt_ofs = pkt_ofs;
 584	pp->hdma_prod++;
 585}
 586
 587static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
 588{
 589	struct ata_port *ap = qc->ap;
 590	struct pdc_host_priv *pp = ap->host->private_data;
 591	unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
 592
 593	/* if nothing on queue, we're done */
 594	if (pp->hdma_prod == pp->hdma_cons) {
 595		pp->doing_hdma = 0;
 596		return;
 597	}
 598
 599	__pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
 600			     pp->hdma[idx].pkt_ofs);
 601	pp->hdma_cons++;
 602}
 603
 604#ifdef ATA_VERBOSE_DEBUG
 605static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
 606{
 607	struct ata_port *ap = qc->ap;
 608	unsigned int port_no = ap->port_no;
 609	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
 610
 611	dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
 612	dimm_mmio += PDC_DIMM_HOST_PKT;
 613
 614	printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
 615	printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
 616	printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
 617	printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
 618}
 619#else
 620static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
 621#endif /* ATA_VERBOSE_DEBUG */
 622
 623static void pdc20621_packet_start(struct ata_queued_cmd *qc)
 624{
 625	struct ata_port *ap = qc->ap;
 626	struct ata_host *host = ap->host;
 627	unsigned int port_no = ap->port_no;
 628	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 629	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
 630	u8 seq = (u8) (port_no + 1);
 631	unsigned int port_ofs;
 632
 633	/* hard-code chip #0 */
 634	mmio += PDC_CHIP0_OFS;
 635
 636	VPRINTK("ata%u: ENTER\n", ap->print_id);
 637
 638	wmb();			/* flush PRD, pkt writes */
 639
 640	port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
 641
 642	/* if writing, we (1) DMA to DIMM, then (2) do ATA command */
 643	if (rw && qc->tf.protocol == ATA_PROT_DMA) {
 644		seq += 4;
 645
 646		pdc20621_dump_hdma(qc);
 647		pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
 648		VPRINTK("queued ofs 0x%x (%u), seq %u\n",
 649			port_ofs + PDC_DIMM_HOST_PKT,
 650			port_ofs + PDC_DIMM_HOST_PKT,
 651			seq);
 652	} else {
 653		writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
 654		readl(mmio + PDC_20621_SEQCTL + (seq * 4));	/* flush */
 655
 656		writel(port_ofs + PDC_DIMM_ATA_PKT,
 657		       ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
 658		readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
 659		VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
 660			port_ofs + PDC_DIMM_ATA_PKT,
 661			port_ofs + PDC_DIMM_ATA_PKT,
 662			seq);
 663	}
 664}
 665
 666static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
 667{
 668	switch (qc->tf.protocol) {
 669	case ATA_PROT_NODATA:
 670		if (qc->tf.flags & ATA_TFLAG_POLLING)
 671			break;
 672		fallthrough;
 673	case ATA_PROT_DMA:
 674		pdc20621_packet_start(qc);
 675		return 0;
 676
 677	case ATAPI_PROT_DMA:
 678		BUG();
 679		break;
 680
 681	default:
 682		break;
 683	}
 684
 685	return ata_sff_qc_issue(qc);
 686}
 687
 688static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
 689					  struct ata_queued_cmd *qc,
 690					  unsigned int doing_hdma,
 691					  void __iomem *mmio)
 692{
 693	unsigned int port_no = ap->port_no;
 694	unsigned int port_ofs =
 695		PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
 696	u8 status;
 697	unsigned int handled = 0;
 698
 699	VPRINTK("ENTER\n");
 700
 701	if ((qc->tf.protocol == ATA_PROT_DMA) &&	/* read */
 702	    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
 703
 704		/* step two - DMA from DIMM to host */
 705		if (doing_hdma) {
 706			VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
 707				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 708			/* get drive status; clear intr; complete txn */
 709			qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
 710			ata_qc_complete(qc);
 711			pdc20621_pop_hdma(qc);
 712		}
 713
 714		/* step one - exec ATA command */
 715		else {
 716			u8 seq = (u8) (port_no + 1 + 4);
 717			VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
 718				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 719
 720			/* submit hdma pkt */
 721			pdc20621_dump_hdma(qc);
 722			pdc20621_push_hdma(qc, seq,
 723					   port_ofs + PDC_DIMM_HOST_PKT);
 724		}
 725		handled = 1;
 726
 727	} else if (qc->tf.protocol == ATA_PROT_DMA) {	/* write */
 728
 729		/* step one - DMA from host to DIMM */
 730		if (doing_hdma) {
 731			u8 seq = (u8) (port_no + 1);
 732			VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
 733				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 734
 735			/* submit ata pkt */
 736			writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
 737			readl(mmio + PDC_20621_SEQCTL + (seq * 4));
 738			writel(port_ofs + PDC_DIMM_ATA_PKT,
 739			       ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
 740			readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
 741		}
 742
 743		/* step two - execute ATA command */
 744		else {
 745			VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
 746				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 747			/* get drive status; clear intr; complete txn */
 748			qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
 749			ata_qc_complete(qc);
 750			pdc20621_pop_hdma(qc);
 751		}
 752		handled = 1;
 753
 754	/* command completion, but no data xfer */
 755	} else if (qc->tf.protocol == ATA_PROT_NODATA) {
 756
 757		status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
 758		DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
 759		qc->err_mask |= ac_err_mask(status);
 760		ata_qc_complete(qc);
 761		handled = 1;
 762
 763	} else {
 764		ap->stats.idle_irq++;
 765	}
 766
 767	return handled;
 768}
 769
 770static void pdc20621_irq_clear(struct ata_port *ap)
 771{
 772	ioread8(ap->ioaddr.status_addr);
 773}
 774
 775static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
 776{
 777	struct ata_host *host = dev_instance;
 778	struct ata_port *ap;
 779	u32 mask = 0;
 780	unsigned int i, tmp, port_no;
 781	unsigned int handled = 0;
 782	void __iomem *mmio_base;
 783
 784	VPRINTK("ENTER\n");
 785
 786	if (!host || !host->iomap[PDC_MMIO_BAR]) {
 787		VPRINTK("QUICK EXIT\n");
 788		return IRQ_NONE;
 789	}
 790
 791	mmio_base = host->iomap[PDC_MMIO_BAR];
 792
 793	/* reading should also clear interrupts */
 794	mmio_base += PDC_CHIP0_OFS;
 795	mask = readl(mmio_base + PDC_20621_SEQMASK);
 796	VPRINTK("mask == 0x%x\n", mask);
 797
 798	if (mask == 0xffffffff) {
 799		VPRINTK("QUICK EXIT 2\n");
 800		return IRQ_NONE;
 801	}
 802	mask &= 0xffff;		/* only 16 tags possible */
 803	if (!mask) {
 804		VPRINTK("QUICK EXIT 3\n");
 805		return IRQ_NONE;
 806	}
 807
 808	spin_lock(&host->lock);
 809
 810	for (i = 1; i < 9; i++) {
 811		port_no = i - 1;
 812		if (port_no > 3)
 813			port_no -= 4;
 814		if (port_no >= host->n_ports)
 815			ap = NULL;
 816		else
 817			ap = host->ports[port_no];
 818		tmp = mask & (1 << i);
 819		VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
 
 820		if (tmp && ap) {
 821			struct ata_queued_cmd *qc;
 822
 823			qc = ata_qc_from_tag(ap, ap->link.active_tag);
 824			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
 825				handled += pdc20621_host_intr(ap, qc, (i > 4),
 826							      mmio_base);
 827		}
 828	}
 829
 830	spin_unlock(&host->lock);
 831
 832	VPRINTK("mask == 0x%x\n", mask);
 833
 834	VPRINTK("EXIT\n");
 835
 836	return IRQ_RETVAL(handled);
 837}
 838
 839static void pdc_freeze(struct ata_port *ap)
 840{
 841	void __iomem *mmio = ap->ioaddr.cmd_addr;
 842	u32 tmp;
 843
 844	/* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
 845
 846	tmp = readl(mmio + PDC_CTLSTAT);
 847	tmp |= PDC_MASK_INT;
 848	tmp &= ~PDC_DMA_ENABLE;
 849	writel(tmp, mmio + PDC_CTLSTAT);
 850	readl(mmio + PDC_CTLSTAT); /* flush */
 851}
 852
 853static void pdc_thaw(struct ata_port *ap)
 854{
 855	void __iomem *mmio = ap->ioaddr.cmd_addr;
 856	u32 tmp;
 857
 858	/* FIXME: start HDMA engine, if zero ATA engines running */
 859
 860	/* clear IRQ */
 861	ioread8(ap->ioaddr.status_addr);
 862
 863	/* turn IRQ back on */
 864	tmp = readl(mmio + PDC_CTLSTAT);
 865	tmp &= ~PDC_MASK_INT;
 866	writel(tmp, mmio + PDC_CTLSTAT);
 867	readl(mmio + PDC_CTLSTAT); /* flush */
 868}
 869
 870static void pdc_reset_port(struct ata_port *ap)
 871{
 872	void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
 873	unsigned int i;
 874	u32 tmp;
 875
 876	/* FIXME: handle HDMA copy engine */
 877
 878	for (i = 11; i > 0; i--) {
 879		tmp = readl(mmio);
 880		if (tmp & PDC_RESET)
 881			break;
 882
 883		udelay(100);
 884
 885		tmp |= PDC_RESET;
 886		writel(tmp, mmio);
 887	}
 888
 889	tmp &= ~PDC_RESET;
 890	writel(tmp, mmio);
 891	readl(mmio);	/* flush */
 892}
 893
 894static int pdc_softreset(struct ata_link *link, unsigned int *class,
 895			 unsigned long deadline)
 896{
 897	pdc_reset_port(link->ap);
 898	return ata_sff_softreset(link, class, deadline);
 899}
 900
 901static void pdc_error_handler(struct ata_port *ap)
 902{
 903	if (!(ap->pflags & ATA_PFLAG_FROZEN))
 904		pdc_reset_port(ap);
 905
 906	ata_sff_error_handler(ap);
 907}
 908
 909static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
 910{
 911	struct ata_port *ap = qc->ap;
 912
 913	/* make DMA engine forget about the failed command */
 914	if (qc->flags & ATA_QCFLAG_FAILED)
 915		pdc_reset_port(ap);
 916}
 917
 918static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
 919{
 920	u8 *scsicmd = qc->scsicmd->cmnd;
 921	int pio = 1; /* atapi dma off by default */
 922
 923	/* Whitelist commands that may use DMA. */
 924	switch (scsicmd[0]) {
 925	case WRITE_12:
 926	case WRITE_10:
 927	case WRITE_6:
 928	case READ_12:
 929	case READ_10:
 930	case READ_6:
 931	case 0xad: /* READ_DVD_STRUCTURE */
 932	case 0xbe: /* READ_CD */
 933		pio = 0;
 934	}
 935	/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
 936	if (scsicmd[0] == WRITE_10) {
 937		unsigned int lba =
 938			(scsicmd[2] << 24) |
 939			(scsicmd[3] << 16) |
 940			(scsicmd[4] << 8) |
 941			scsicmd[5];
 942		if (lba >= 0xFFFF4FA2)
 943			pio = 1;
 944	}
 945	return pio;
 946}
 947
 948static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 949{
 950	WARN_ON(tf->protocol == ATA_PROT_DMA ||
 951		tf->protocol == ATAPI_PROT_DMA);
 952	ata_sff_tf_load(ap, tf);
 953}
 954
 955
 956static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 957{
 958	WARN_ON(tf->protocol == ATA_PROT_DMA ||
 959		tf->protocol == ATAPI_PROT_DMA);
 960	ata_sff_exec_command(ap, tf);
 961}
 962
 963
 964static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
 965{
 966	port->cmd_addr		= base;
 967	port->data_addr		= base;
 968	port->feature_addr	=
 969	port->error_addr	= base + 0x4;
 970	port->nsect_addr	= base + 0x8;
 971	port->lbal_addr		= base + 0xc;
 972	port->lbam_addr		= base + 0x10;
 973	port->lbah_addr		= base + 0x14;
 974	port->device_addr	= base + 0x18;
 975	port->command_addr	=
 976	port->status_addr	= base + 0x1c;
 977	port->altstatus_addr	=
 978	port->ctl_addr		= base + 0x38;
 979}
 980
 981
 982#ifdef ATA_VERBOSE_DEBUG
 983static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
 984				   u32 offset, u32 size)
 985{
 986	u32 window_size;
 987	u16 idx;
 988	u8 page_mask;
 989	long dist;
 990	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 991	void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
 992
 993	/* hard-code chip #0 */
 994	mmio += PDC_CHIP0_OFS;
 995
 996	page_mask = 0x00;
 997	window_size = 0x2000 * 4; /* 32K byte uchar size */
 998	idx = (u16) (offset / window_size);
 999
1000	writel(0x01, mmio + PDC_GENERAL_CTLR);
1001	readl(mmio + PDC_GENERAL_CTLR);
1002	writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1003	readl(mmio + PDC_DIMM_WINDOW_CTLR);
1004
1005	offset -= (idx * window_size);
1006	idx++;
1007	dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1008		(long) (window_size - offset);
1009	memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
1010
1011	psource += dist;
1012	size -= dist;
1013	for (; (long) size >= (long) window_size ;) {
1014		writel(0x01, mmio + PDC_GENERAL_CTLR);
1015		readl(mmio + PDC_GENERAL_CTLR);
1016		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1017		readl(mmio + PDC_DIMM_WINDOW_CTLR);
1018		memcpy_fromio(psource, dimm_mmio, window_size / 4);
1019		psource += window_size;
1020		size -= window_size;
1021		idx++;
1022	}
1023
1024	if (size) {
1025		writel(0x01, mmio + PDC_GENERAL_CTLR);
1026		readl(mmio + PDC_GENERAL_CTLR);
1027		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1028		readl(mmio + PDC_DIMM_WINDOW_CTLR);
1029		memcpy_fromio(psource, dimm_mmio, size / 4);
1030	}
1031}
1032#endif
1033
1034
1035static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1036				 u32 offset, u32 size)
1037{
1038	u32 window_size;
1039	u16 idx;
1040	u8 page_mask;
1041	long dist;
1042	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1043	void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1044
1045	/* hard-code chip #0 */
1046	mmio += PDC_CHIP0_OFS;
1047
1048	page_mask = 0x00;
1049	window_size = 0x2000 * 4;       /* 32K byte uchar size */
1050	idx = (u16) (offset / window_size);
1051
1052	writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1053	readl(mmio + PDC_DIMM_WINDOW_CTLR);
1054	offset -= (idx * window_size);
1055	idx++;
1056	dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1057		(long) (window_size - offset);
1058	memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1059	writel(0x01, mmio + PDC_GENERAL_CTLR);
1060	readl(mmio + PDC_GENERAL_CTLR);
1061
1062	psource += dist;
1063	size -= dist;
1064	for (; (long) size >= (long) window_size ;) {
1065		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1066		readl(mmio + PDC_DIMM_WINDOW_CTLR);
1067		memcpy_toio(dimm_mmio, psource, window_size / 4);
1068		writel(0x01, mmio + PDC_GENERAL_CTLR);
1069		readl(mmio + PDC_GENERAL_CTLR);
1070		psource += window_size;
1071		size -= window_size;
1072		idx++;
1073	}
1074
1075	if (size) {
1076		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1077		readl(mmio + PDC_DIMM_WINDOW_CTLR);
1078		memcpy_toio(dimm_mmio, psource, size / 4);
1079		writel(0x01, mmio + PDC_GENERAL_CTLR);
1080		readl(mmio + PDC_GENERAL_CTLR);
1081	}
1082}
1083
1084
1085static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1086				      u32 subaddr, u32 *pdata)
1087{
1088	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1089	u32 i2creg  = 0;
1090	u32 status;
1091	u32 count = 0;
1092
1093	/* hard-code chip #0 */
1094	mmio += PDC_CHIP0_OFS;
1095
1096	i2creg |= device << 24;
1097	i2creg |= subaddr << 16;
1098
1099	/* Set the device and subaddress */
1100	writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1101	readl(mmio + PDC_I2C_ADDR_DATA);
1102
1103	/* Write Control to perform read operation, mask int */
1104	writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1105	       mmio + PDC_I2C_CONTROL);
1106
1107	for (count = 0; count <= 1000; count ++) {
1108		status = readl(mmio + PDC_I2C_CONTROL);
1109		if (status & PDC_I2C_COMPLETE) {
1110			status = readl(mmio + PDC_I2C_ADDR_DATA);
1111			break;
1112		} else if (count == 1000)
1113			return 0;
1114	}
1115
1116	*pdata = (status >> 8) & 0x000000ff;
1117	return 1;
1118}
1119
1120
1121static int pdc20621_detect_dimm(struct ata_host *host)
1122{
1123	u32 data = 0;
1124	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1125			     PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1126		if (data == 100)
1127			return 100;
1128	} else
1129		return 0;
1130
1131	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1132		if (data <= 0x75)
1133			return 133;
1134	} else
1135		return 0;
1136
1137	return 0;
1138}
1139
1140
1141static int pdc20621_prog_dimm0(struct ata_host *host)
1142{
1143	u32 spd0[50];
1144	u32 data = 0;
1145	int size, i;
1146	u8 bdimmsize;
1147	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1148	static const struct {
1149		unsigned int reg;
1150		unsigned int ofs;
1151	} pdc_i2c_read_data [] = {
1152		{ PDC_DIMM_SPD_TYPE, 11 },
1153		{ PDC_DIMM_SPD_FRESH_RATE, 12 },
1154		{ PDC_DIMM_SPD_COLUMN_NUM, 4 },
1155		{ PDC_DIMM_SPD_ATTRIBUTE, 21 },
1156		{ PDC_DIMM_SPD_ROW_NUM, 3 },
1157		{ PDC_DIMM_SPD_BANK_NUM, 17 },
1158		{ PDC_DIMM_SPD_MODULE_ROW, 5 },
1159		{ PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1160		{ PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1161		{ PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1162		{ PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1163		{ PDC_DIMM_SPD_CAS_LATENCY, 18 },
1164	};
1165
1166	/* hard-code chip #0 */
1167	mmio += PDC_CHIP0_OFS;
1168
1169	for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1170		pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1171				  pdc_i2c_read_data[i].reg,
1172				  &spd0[pdc_i2c_read_data[i].ofs]);
1173
1174	data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1175	data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1176		((((spd0[27] + 9) / 10) - 1) << 8) ;
1177	data |= (((((spd0[29] > spd0[28])
1178		    ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1179	data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1180
1181	if (spd0[18] & 0x08)
1182		data |= ((0x03) << 14);
1183	else if (spd0[18] & 0x04)
1184		data |= ((0x02) << 14);
1185	else if (spd0[18] & 0x01)
1186		data |= ((0x01) << 14);
1187	else
1188		data |= (0 << 14);
1189
1190	/*
1191	   Calculate the size of bDIMMSize (power of 2) and
1192	   merge the DIMM size by program start/end address.
1193	*/
1194
1195	bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1196	size = (1 << bdimmsize) >> 20;	/* size = xxx(MB) */
1197	data |= (((size / 16) - 1) << 16);
1198	data |= (0 << 23);
1199	data |= 8;
1200	writel(data, mmio + PDC_DIMM0_CONTROL);
1201	readl(mmio + PDC_DIMM0_CONTROL);
1202	return size;
1203}
1204
1205
1206static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1207{
1208	u32 data, spd0;
1209	int error, i;
1210	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1211
1212	/* hard-code chip #0 */
1213	mmio += PDC_CHIP0_OFS;
1214
1215	/*
1216	  Set To Default : DIMM Module Global Control Register (0x022259F1)
1217	  DIMM Arbitration Disable (bit 20)
1218	  DIMM Data/Control Output Driving Selection (bit12 - bit15)
1219	  Refresh Enable (bit 17)
1220	*/
1221
1222	data = 0x022259F1;
1223	writel(data, mmio + PDC_SDRAM_CONTROL);
1224	readl(mmio + PDC_SDRAM_CONTROL);
1225
1226	/* Turn on for ECC */
1227	if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1228			       PDC_DIMM_SPD_TYPE, &spd0)) {
1229		pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1230		       PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
 
1231		return 1;
1232	}
1233	if (spd0 == 0x02) {
1234		data |= (0x01 << 16);
1235		writel(data, mmio + PDC_SDRAM_CONTROL);
1236		readl(mmio + PDC_SDRAM_CONTROL);
1237		printk(KERN_ERR "Local DIMM ECC Enabled\n");
1238	}
1239
1240	/* DIMM Initialization Select/Enable (bit 18/19) */
1241	data &= (~(1<<18));
1242	data |= (1<<19);
1243	writel(data, mmio + PDC_SDRAM_CONTROL);
1244
1245	error = 1;
1246	for (i = 1; i <= 10; i++) {   /* polling ~5 secs */
1247		data = readl(mmio + PDC_SDRAM_CONTROL);
1248		if (!(data & (1<<19))) {
1249			error = 0;
1250			break;
1251		}
1252		msleep(i*100);
1253	}
1254	return error;
1255}
1256
1257
1258static unsigned int pdc20621_dimm_init(struct ata_host *host)
1259{
1260	int speed, size, length;
1261	u32 addr, spd0, pci_status;
1262	u32 time_period = 0;
1263	u32 tcount = 0;
1264	u32 ticks = 0;
1265	u32 clock = 0;
1266	u32 fparam = 0;
1267	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1268
1269	/* hard-code chip #0 */
1270	mmio += PDC_CHIP0_OFS;
1271
1272	/* Initialize PLL based upon PCI Bus Frequency */
1273
1274	/* Initialize Time Period Register */
1275	writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1276	time_period = readl(mmio + PDC_TIME_PERIOD);
1277	VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1278
1279	/* Enable timer */
1280	writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1281	readl(mmio + PDC_TIME_CONTROL);
1282
1283	/* Wait 3 seconds */
1284	msleep(3000);
1285
1286	/*
1287	   When timer is enabled, counter is decreased every internal
1288	   clock cycle.
1289	*/
1290
1291	tcount = readl(mmio + PDC_TIME_COUNTER);
1292	VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1293
1294	/*
1295	   If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1296	   register should be >= (0xffffffff - 3x10^8).
1297	*/
1298	if (tcount >= PCI_X_TCOUNT) {
1299		ticks = (time_period - tcount);
1300		VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1301
1302		clock = (ticks / 300000);
1303		VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
 
1304
1305		clock = (clock * 33);
1306		VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
 
1307
1308		/* PLL F Param (bit 22:16) */
1309		fparam = (1400000 / clock) - 2;
1310		VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1311
1312		/* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1313		pci_status = (0x8a001824 | (fparam << 16));
1314	} else
1315		pci_status = PCI_PLL_INIT;
1316
1317	/* Initialize PLL. */
1318	VPRINTK("pci_status: 0x%x\n", pci_status);
1319	writel(pci_status, mmio + PDC_CTL_STATUS);
1320	readl(mmio + PDC_CTL_STATUS);
1321
1322	/*
1323	   Read SPD of DIMM by I2C interface,
1324	   and program the DIMM Module Controller.
1325	*/
1326	if (!(speed = pdc20621_detect_dimm(host))) {
1327		printk(KERN_ERR "Detect Local DIMM Fail\n");
1328		return 1;	/* DIMM error */
1329	}
1330	VPRINTK("Local DIMM Speed = %d\n", speed);
1331
1332	/* Programming DIMM0 Module Control Register (index_CID0:80h) */
1333	size = pdc20621_prog_dimm0(host);
1334	VPRINTK("Local DIMM Size = %dMB\n", size);
1335
1336	/* Programming DIMM Module Global Control Register (index_CID0:88h) */
1337	if (pdc20621_prog_dimm_global(host)) {
1338		printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
 
1339		return 1;
1340	}
1341
1342#ifdef ATA_VERBOSE_DEBUG
1343	{
1344		u8 test_parttern1[40] =
1345			{0x55,0xAA,'P','r','o','m','i','s','e',' ',
1346			'N','o','t',' ','Y','e','t',' ',
1347			'D','e','f','i','n','e','d',' ',
1348			'1','.','1','0',
1349			'9','8','0','3','1','6','1','2',0,0};
1350		u8 test_parttern2[40] = {0};
1351
1352		pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1353		pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1354
1355		pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1356		pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1357		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1358		       test_parttern2[1], &(test_parttern2[2]));
1359		pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1360				       40);
1361		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1362		       test_parttern2[1], &(test_parttern2[2]));
 
1363
1364		pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1365		pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1366		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1367		       test_parttern2[1], &(test_parttern2[2]));
 
1368	}
1369#endif
1370
1371	/* ECC initiliazation. */
1372
1373	if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1374			       PDC_DIMM_SPD_TYPE, &spd0)) {
1375		pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
 
1376		       PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1377		return 1;
1378	}
1379	if (spd0 == 0x02) {
1380		void *buf;
1381		VPRINTK("Start ECC initialization\n");
1382		addr = 0;
1383		length = size * 1024 * 1024;
1384		buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1385		if (!buf)
1386			return 1;
1387		while (addr < length) {
1388			pdc20621_put_to_dimm(host, buf, addr,
1389					     ECC_ERASE_BUF_SZ);
1390			addr += ECC_ERASE_BUF_SZ;
1391		}
1392		kfree(buf);
1393		VPRINTK("Finish ECC initialization\n");
1394	}
1395	return 0;
1396}
1397
1398
1399static void pdc_20621_init(struct ata_host *host)
1400{
1401	u32 tmp;
1402	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1403
1404	/* hard-code chip #0 */
1405	mmio += PDC_CHIP0_OFS;
1406
1407	/*
1408	 * Select page 0x40 for our 32k DIMM window
1409	 */
1410	tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1411	tmp |= PDC_PAGE_WINDOW;	/* page 40h; arbitrarily selected */
1412	writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1413
1414	/*
1415	 * Reset Host DMA
1416	 */
1417	tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1418	tmp |= PDC_RESET;
1419	writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1420	readl(mmio + PDC_HDMA_CTLSTAT);		/* flush */
1421
1422	udelay(10);
1423
1424	tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1425	tmp &= ~PDC_RESET;
1426	writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1427	readl(mmio + PDC_HDMA_CTLSTAT);		/* flush */
1428}
1429
1430static int pdc_sata_init_one(struct pci_dev *pdev,
1431			     const struct pci_device_id *ent)
1432{
1433	const struct ata_port_info *ppi[] =
1434		{ &pdc_port_info[ent->driver_data], NULL };
1435	struct ata_host *host;
1436	struct pdc_host_priv *hpriv;
1437	int i, rc;
1438
1439	ata_print_version_once(&pdev->dev, DRV_VERSION);
1440
1441	/* allocate host */
1442	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1443	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1444	if (!host || !hpriv)
1445		return -ENOMEM;
1446
1447	host->private_data = hpriv;
1448
1449	/* acquire resources and fill host */
1450	rc = pcim_enable_device(pdev);
1451	if (rc)
1452		return rc;
1453
1454	rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1455				DRV_NAME);
1456	if (rc == -EBUSY)
1457		pcim_pin_device(pdev);
1458	if (rc)
1459		return rc;
1460	host->iomap = pcim_iomap_table(pdev);
1461
1462	for (i = 0; i < 4; i++) {
1463		struct ata_port *ap = host->ports[i];
1464		void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1465		unsigned int offset = 0x200 + i * 0x80;
1466
1467		pdc_sata_setup_port(&ap->ioaddr, base + offset);
1468
1469		ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1470		ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1471		ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1472	}
1473
1474	/* configure and activate */
1475	rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
1476	if (rc)
1477		return rc;
1478
1479	if (pdc20621_dimm_init(host))
1480		return -ENOMEM;
1481	pdc_20621_init(host);
1482
1483	pci_set_master(pdev);
1484	return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1485				 IRQF_SHARED, &pdc_sata_sht);
1486}
1487
1488module_pci_driver(pdc_sata_pci_driver);
1489
1490MODULE_AUTHOR("Jeff Garzik");
1491MODULE_DESCRIPTION("Promise SATA low-level driver");
1492MODULE_LICENSE("GPL");
1493MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1494MODULE_VERSION(DRV_VERSION);