Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * sata_sx4.c - Promise SATA
4 *
5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003-2004 Red Hat, Inc.
10 *
11 * libata documentation is available via 'make {ps|pdf}docs',
12 * as Documentation/driver-api/libata.rst
13 *
14 * Hardware documentation available under NDA.
15 */
16
17/*
18 Theory of operation
19 -------------------
20
21 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
22 engine, DIMM memory, and four ATA engines (one per SATA port).
23 Data is copied to/from DIMM memory by the HDMA engine, before
24 handing off to one (or more) of the ATA engines. The ATA
25 engines operate solely on DIMM memory.
26
27 The SX4 behaves like a PATA chip, with no SATA controls or
28 knowledge whatsoever, leading to the presumption that
29 PATA<->SATA bridges exist on SX4 boards, external to the
30 PDC20621 chip itself.
31
32 The chip is quite capable, supporting an XOR engine and linked
33 hardware commands (permits a string to transactions to be
34 submitted and waited-on as a single unit), and an optional
35 microprocessor.
36
37 The limiting factor is largely software. This Linux driver was
38 written to multiplex the single HDMA engine to copy disk
39 transactions into a fixed DIMM memory space, from where an ATA
40 engine takes over. As a result, each WRITE looks like this:
41
42 submit HDMA packet to hardware
43 hardware copies data from system memory to DIMM
44 hardware raises interrupt
45
46 submit ATA packet to hardware
47 hardware executes ATA WRITE command, w/ data in DIMM
48 hardware raises interrupt
49
50 and each READ looks like this:
51
52 submit ATA packet to hardware
53 hardware executes ATA READ command, w/ data in DIMM
54 hardware raises interrupt
55
56 submit HDMA packet to hardware
57 hardware copies data from DIMM to system memory
58 hardware raises interrupt
59
60 This is a very slow, lock-step way of doing things that can
61 certainly be improved by motivated kernel hackers.
62
63 */
64
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/slab.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
72#include <linux/device.h>
73#include <scsi/scsi_host.h>
74#include <scsi/scsi_cmnd.h>
75#include <linux/libata.h>
76#include "sata_promise.h"
77
78#define DRV_NAME "sata_sx4"
79#define DRV_VERSION "0.12"
80
81static int dimm_test;
82module_param(dimm_test, int, 0644);
83MODULE_PARM_DESC(dimm_test, "Enable DIMM test during startup (1 = enabled)");
84
85enum {
86 PDC_MMIO_BAR = 3,
87 PDC_DIMM_BAR = 4,
88
89 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
90
91 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
92 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
93 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
94 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
95
96 PDC_CTLSTAT = 0x60, /* IDEn control / status */
97
98 PDC_20621_SEQCTL = 0x400,
99 PDC_20621_SEQMASK = 0x480,
100 PDC_20621_GENERAL_CTL = 0x484,
101 PDC_20621_PAGE_SIZE = (32 * 1024),
102
103 /* chosen, not constant, values; we design our own DIMM mem map */
104 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
105 PDC_20621_DIMM_BASE = 0x00200000,
106 PDC_20621_DIMM_DATA = (64 * 1024),
107 PDC_DIMM_DATA_STEP = (256 * 1024),
108 PDC_DIMM_WINDOW_STEP = (8 * 1024),
109 PDC_DIMM_HOST_PRD = (6 * 1024),
110 PDC_DIMM_HOST_PKT = (128 * 0),
111 PDC_DIMM_HPKT_PRD = (128 * 1),
112 PDC_DIMM_ATA_PKT = (128 * 2),
113 PDC_DIMM_APKT_PRD = (128 * 3),
114 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
115 PDC_PAGE_WINDOW = 0x40,
116 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
117 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
118 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
119
120 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
121
122 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
123 (1<<23),
124
125 board_20621 = 0, /* FastTrak S150 SX4 */
126
127 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
128 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
129 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
130
131 PDC_MAX_HDMA = 32,
132 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
133
134 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
135 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
136 PDC_I2C_CONTROL = 0x48,
137 PDC_I2C_ADDR_DATA = 0x4C,
138 PDC_DIMM0_CONTROL = 0x80,
139 PDC_DIMM1_CONTROL = 0x84,
140 PDC_SDRAM_CONTROL = 0x88,
141 PDC_I2C_WRITE = 0, /* master -> slave */
142 PDC_I2C_READ = (1 << 6), /* master <- slave */
143 PDC_I2C_START = (1 << 7), /* start I2C proto */
144 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
145 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
146 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
147 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
148 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
149 PDC_DIMM_SPD_ROW_NUM = 3,
150 PDC_DIMM_SPD_COLUMN_NUM = 4,
151 PDC_DIMM_SPD_MODULE_ROW = 5,
152 PDC_DIMM_SPD_TYPE = 11,
153 PDC_DIMM_SPD_FRESH_RATE = 12,
154 PDC_DIMM_SPD_BANK_NUM = 17,
155 PDC_DIMM_SPD_CAS_LATENCY = 18,
156 PDC_DIMM_SPD_ATTRIBUTE = 21,
157 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
158 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
159 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
160 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
161 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
162 PDC_CTL_STATUS = 0x08,
163 PDC_DIMM_WINDOW_CTLR = 0x0C,
164 PDC_TIME_CONTROL = 0x3C,
165 PDC_TIME_PERIOD = 0x40,
166 PDC_TIME_COUNTER = 0x44,
167 PDC_GENERAL_CTLR = 0x484,
168 PCI_PLL_INIT = 0x8A531824,
169 PCI_X_TCOUNT = 0xEE1E5CFF,
170
171 /* PDC_TIME_CONTROL bits */
172 PDC_TIMER_BUZZER = (1 << 10),
173 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
174 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
175 PDC_TIMER_ENABLE = (1 << 7),
176 PDC_TIMER_MASK_INT = (1 << 5),
177 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
178 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
179 PDC_TIMER_ENABLE |
180 PDC_TIMER_MASK_INT,
181};
182
183#define ECC_ERASE_BUF_SZ (128 * 1024)
184
185struct pdc_port_priv {
186 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
187 u8 *pkt;
188 dma_addr_t pkt_dma;
189};
190
191struct pdc_host_priv {
192 unsigned int doing_hdma;
193 unsigned int hdma_prod;
194 unsigned int hdma_cons;
195 struct {
196 struct ata_queued_cmd *qc;
197 unsigned int seq;
198 unsigned long pkt_ofs;
199 } hdma[32];
200};
201
202
203static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
204static void pdc_error_handler(struct ata_port *ap);
205static void pdc_freeze(struct ata_port *ap);
206static void pdc_thaw(struct ata_port *ap);
207static int pdc_port_start(struct ata_port *ap);
208static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
209static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
210static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
211static unsigned int pdc20621_dimm_init(struct ata_host *host);
212static int pdc20621_detect_dimm(struct ata_host *host);
213static unsigned int pdc20621_i2c_read(struct ata_host *host,
214 u32 device, u32 subaddr, u32 *pdata);
215static int pdc20621_prog_dimm0(struct ata_host *host);
216static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
217static void pdc20621_get_from_dimm(struct ata_host *host,
218 void *psource, u32 offset, u32 size);
219static void pdc20621_put_to_dimm(struct ata_host *host,
220 void *psource, u32 offset, u32 size);
221static void pdc20621_irq_clear(struct ata_port *ap);
222static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
223static int pdc_softreset(struct ata_link *link, unsigned int *class,
224 unsigned long deadline);
225static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
226static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
227
228
229static const struct scsi_host_template pdc_sata_sht = {
230 ATA_BASE_SHT(DRV_NAME),
231 .sg_tablesize = LIBATA_MAX_PRD,
232 .dma_boundary = ATA_DMA_BOUNDARY,
233};
234
235static struct ata_port_operations pdc_20621_ops = {
236 .inherits = &ata_sff_port_ops,
237
238 .check_atapi_dma = pdc_check_atapi_dma,
239 .qc_prep = pdc20621_qc_prep,
240 .qc_issue = pdc20621_qc_issue,
241
242 .freeze = pdc_freeze,
243 .thaw = pdc_thaw,
244 .softreset = pdc_softreset,
245 .error_handler = pdc_error_handler,
246 .lost_interrupt = ATA_OP_NULL,
247 .post_internal_cmd = pdc_post_internal_cmd,
248
249 .port_start = pdc_port_start,
250
251 .sff_tf_load = pdc_tf_load_mmio,
252 .sff_exec_command = pdc_exec_command_mmio,
253 .sff_irq_clear = pdc20621_irq_clear,
254};
255
256static const struct ata_port_info pdc_port_info[] = {
257 /* board_20621 */
258 {
259 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
260 ATA_FLAG_PIO_POLLING,
261 .pio_mask = ATA_PIO4,
262 .mwdma_mask = ATA_MWDMA2,
263 .udma_mask = ATA_UDMA6,
264 .port_ops = &pdc_20621_ops,
265 },
266
267};
268
269static const struct pci_device_id pdc_sata_pci_tbl[] = {
270 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
271
272 { } /* terminate list */
273};
274
275static struct pci_driver pdc_sata_pci_driver = {
276 .name = DRV_NAME,
277 .id_table = pdc_sata_pci_tbl,
278 .probe = pdc_sata_init_one,
279 .remove = ata_pci_remove_one,
280};
281
282
283static int pdc_port_start(struct ata_port *ap)
284{
285 struct device *dev = ap->host->dev;
286 struct pdc_port_priv *pp;
287
288 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
289 if (!pp)
290 return -ENOMEM;
291
292 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
293 if (!pp->pkt)
294 return -ENOMEM;
295
296 ap->private_data = pp;
297
298 return 0;
299}
300
301static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
302 unsigned int total_len)
303{
304 u32 addr;
305 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
306 __le32 *buf32 = (__le32 *) buf;
307
308 /* output ATA packet S/G table */
309 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
310 (PDC_DIMM_DATA_STEP * portno);
311
312 buf32[dw] = cpu_to_le32(addr);
313 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
314}
315
316static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
317 unsigned int total_len)
318{
319 u32 addr;
320 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
321 __le32 *buf32 = (__le32 *) buf;
322
323 /* output Host DMA packet S/G table */
324 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
325 (PDC_DIMM_DATA_STEP * portno);
326
327 buf32[dw] = cpu_to_le32(addr);
328 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
329}
330
331static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
332 unsigned int devno, u8 *buf,
333 unsigned int portno)
334{
335 unsigned int i, dw;
336 __le32 *buf32 = (__le32 *) buf;
337 u8 dev_reg;
338
339 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
340 (PDC_DIMM_WINDOW_STEP * portno) +
341 PDC_DIMM_APKT_PRD;
342
343 i = PDC_DIMM_ATA_PKT;
344
345 /*
346 * Set up ATA packet
347 */
348 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
349 buf[i++] = PDC_PKT_READ;
350 else if (tf->protocol == ATA_PROT_NODATA)
351 buf[i++] = PDC_PKT_NODATA;
352 else
353 buf[i++] = 0;
354 buf[i++] = 0; /* reserved */
355 buf[i++] = portno + 1; /* seq. id */
356 buf[i++] = 0xff; /* delay seq. id */
357
358 /* dimm dma S/G, and next-pkt */
359 dw = i >> 2;
360 if (tf->protocol == ATA_PROT_NODATA)
361 buf32[dw] = 0;
362 else
363 buf32[dw] = cpu_to_le32(dimm_sg);
364 buf32[dw + 1] = 0;
365 i += 8;
366
367 if (devno == 0)
368 dev_reg = ATA_DEVICE_OBS;
369 else
370 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
371
372 /* select device */
373 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
374 buf[i++] = dev_reg;
375
376 /* device control register */
377 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
378 buf[i++] = tf->ctl;
379
380 return i;
381}
382
383static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
384 unsigned int portno)
385{
386 unsigned int dw;
387 u32 tmp;
388 __le32 *buf32 = (__le32 *) buf;
389
390 unsigned int host_sg = PDC_20621_DIMM_BASE +
391 (PDC_DIMM_WINDOW_STEP * portno) +
392 PDC_DIMM_HOST_PRD;
393 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
394 (PDC_DIMM_WINDOW_STEP * portno) +
395 PDC_DIMM_HPKT_PRD;
396
397 dw = PDC_DIMM_HOST_PKT >> 2;
398
399 /*
400 * Set up Host DMA packet
401 */
402 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
403 tmp = PDC_PKT_READ;
404 else
405 tmp = 0;
406 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
407 tmp |= (0xff << 24); /* delay seq. id */
408 buf32[dw + 0] = cpu_to_le32(tmp);
409 buf32[dw + 1] = cpu_to_le32(host_sg);
410 buf32[dw + 2] = cpu_to_le32(dimm_sg);
411 buf32[dw + 3] = 0;
412}
413
414static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
415{
416 struct scatterlist *sg;
417 struct ata_port *ap = qc->ap;
418 struct pdc_port_priv *pp = ap->private_data;
419 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
420 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
421 unsigned int portno = ap->port_no;
422 unsigned int i, si, idx, total_len = 0, sgt_len;
423 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
424
425 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
426
427 /* hard-code chip #0 */
428 mmio += PDC_CHIP0_OFS;
429
430 /*
431 * Build S/G table
432 */
433 idx = 0;
434 for_each_sg(qc->sg, sg, qc->n_elem, si) {
435 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
436 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
437 total_len += sg_dma_len(sg);
438 }
439 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
440 sgt_len = idx * 4;
441
442 /*
443 * Build ATA, host DMA packets
444 */
445 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
446 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
447
448 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
449 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
450
451 if (qc->tf.flags & ATA_TFLAG_LBA48)
452 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
453 else
454 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
455
456 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
457
458 /* copy three S/G tables and two packets to DIMM MMIO window */
459 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
460 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
461 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
462 PDC_DIMM_HOST_PRD,
463 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
464
465 /* force host FIFO dump */
466 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
467
468 readl(dimm_mmio); /* MMIO PCI posting flush */
469
470 ata_port_dbg(ap, "ata pkt buf ofs %u, prd size %u, mmio copied\n",
471 i, sgt_len);
472}
473
474static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
475{
476 struct ata_port *ap = qc->ap;
477 struct pdc_port_priv *pp = ap->private_data;
478 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
479 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
480 unsigned int portno = ap->port_no;
481 unsigned int i;
482
483 /* hard-code chip #0 */
484 mmio += PDC_CHIP0_OFS;
485
486 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
487
488 if (qc->tf.flags & ATA_TFLAG_LBA48)
489 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
490 else
491 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
492
493 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
494
495 /* copy three S/G tables and two packets to DIMM MMIO window */
496 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
497 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
498
499 /* force host FIFO dump */
500 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
501
502 readl(dimm_mmio); /* MMIO PCI posting flush */
503
504 ata_port_dbg(ap, "ata pkt buf ofs %u, mmio copied\n", i);
505}
506
507static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
508{
509 switch (qc->tf.protocol) {
510 case ATA_PROT_DMA:
511 pdc20621_dma_prep(qc);
512 break;
513 case ATA_PROT_NODATA:
514 pdc20621_nodata_prep(qc);
515 break;
516 default:
517 break;
518 }
519
520 return AC_ERR_OK;
521}
522
523static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
524 unsigned int seq,
525 u32 pkt_ofs)
526{
527 struct ata_port *ap = qc->ap;
528 struct ata_host *host = ap->host;
529 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
530
531 /* hard-code chip #0 */
532 mmio += PDC_CHIP0_OFS;
533
534 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
535 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
536
537 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
538 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
539}
540
541static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
542 unsigned int seq,
543 u32 pkt_ofs)
544{
545 struct ata_port *ap = qc->ap;
546 struct pdc_host_priv *pp = ap->host->private_data;
547 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
548
549 if (!pp->doing_hdma) {
550 __pdc20621_push_hdma(qc, seq, pkt_ofs);
551 pp->doing_hdma = 1;
552 return;
553 }
554
555 pp->hdma[idx].qc = qc;
556 pp->hdma[idx].seq = seq;
557 pp->hdma[idx].pkt_ofs = pkt_ofs;
558 pp->hdma_prod++;
559}
560
561static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
562{
563 struct ata_port *ap = qc->ap;
564 struct pdc_host_priv *pp = ap->host->private_data;
565 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
566
567 /* if nothing on queue, we're done */
568 if (pp->hdma_prod == pp->hdma_cons) {
569 pp->doing_hdma = 0;
570 return;
571 }
572
573 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
574 pp->hdma[idx].pkt_ofs);
575 pp->hdma_cons++;
576}
577
578static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
579{
580 struct ata_port *ap = qc->ap;
581 unsigned int port_no = ap->port_no;
582 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
583
584 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
585 dimm_mmio += PDC_DIMM_HOST_PKT;
586
587 ata_port_dbg(ap, "HDMA 0x%08X 0x%08X 0x%08X 0x%08X\n",
588 readl(dimm_mmio), readl(dimm_mmio + 4),
589 readl(dimm_mmio + 8), readl(dimm_mmio + 12));
590}
591
592static void pdc20621_packet_start(struct ata_queued_cmd *qc)
593{
594 struct ata_port *ap = qc->ap;
595 struct ata_host *host = ap->host;
596 unsigned int port_no = ap->port_no;
597 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
598 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
599 u8 seq = (u8) (port_no + 1);
600 unsigned int port_ofs;
601
602 /* hard-code chip #0 */
603 mmio += PDC_CHIP0_OFS;
604
605 wmb(); /* flush PRD, pkt writes */
606
607 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
608
609 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
610 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
611 seq += 4;
612
613 pdc20621_dump_hdma(qc);
614 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
615 ata_port_dbg(ap, "queued ofs 0x%x (%u), seq %u\n",
616 port_ofs + PDC_DIMM_HOST_PKT,
617 port_ofs + PDC_DIMM_HOST_PKT,
618 seq);
619 } else {
620 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
621 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
622
623 writel(port_ofs + PDC_DIMM_ATA_PKT,
624 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
625 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
626 ata_port_dbg(ap, "submitted ofs 0x%x (%u), seq %u\n",
627 port_ofs + PDC_DIMM_ATA_PKT,
628 port_ofs + PDC_DIMM_ATA_PKT,
629 seq);
630 }
631}
632
633static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
634{
635 switch (qc->tf.protocol) {
636 case ATA_PROT_NODATA:
637 if (qc->tf.flags & ATA_TFLAG_POLLING)
638 break;
639 fallthrough;
640 case ATA_PROT_DMA:
641 pdc20621_packet_start(qc);
642 return 0;
643
644 case ATAPI_PROT_DMA:
645 BUG();
646 break;
647
648 default:
649 break;
650 }
651
652 return ata_sff_qc_issue(qc);
653}
654
655static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
656 struct ata_queued_cmd *qc,
657 unsigned int doing_hdma,
658 void __iomem *mmio)
659{
660 unsigned int port_no = ap->port_no;
661 unsigned int port_ofs =
662 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
663 u8 status;
664 unsigned int handled = 0;
665
666 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
667 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
668
669 /* step two - DMA from DIMM to host */
670 if (doing_hdma) {
671 ata_port_dbg(ap, "read hdma, 0x%x 0x%x\n",
672 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
673 /* get drive status; clear intr; complete txn */
674 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
675 ata_qc_complete(qc);
676 pdc20621_pop_hdma(qc);
677 }
678
679 /* step one - exec ATA command */
680 else {
681 u8 seq = (u8) (port_no + 1 + 4);
682 ata_port_dbg(ap, "read ata, 0x%x 0x%x\n",
683 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
684
685 /* submit hdma pkt */
686 pdc20621_dump_hdma(qc);
687 pdc20621_push_hdma(qc, seq,
688 port_ofs + PDC_DIMM_HOST_PKT);
689 }
690 handled = 1;
691
692 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
693
694 /* step one - DMA from host to DIMM */
695 if (doing_hdma) {
696 u8 seq = (u8) (port_no + 1);
697 ata_port_dbg(ap, "write hdma, 0x%x 0x%x\n",
698 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
699
700 /* submit ata pkt */
701 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
702 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
703 writel(port_ofs + PDC_DIMM_ATA_PKT,
704 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
705 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
706 }
707
708 /* step two - execute ATA command */
709 else {
710 ata_port_dbg(ap, "write ata, 0x%x 0x%x\n",
711 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
712 /* get drive status; clear intr; complete txn */
713 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
714 ata_qc_complete(qc);
715 pdc20621_pop_hdma(qc);
716 }
717 handled = 1;
718
719 /* command completion, but no data xfer */
720 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
721
722 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
723 ata_port_dbg(ap, "BUS_NODATA (drv_stat 0x%X)\n", status);
724 qc->err_mask |= ac_err_mask(status);
725 ata_qc_complete(qc);
726 handled = 1;
727
728 } else {
729 ap->stats.idle_irq++;
730 }
731
732 return handled;
733}
734
735static void pdc20621_irq_clear(struct ata_port *ap)
736{
737 ioread8(ap->ioaddr.status_addr);
738}
739
740static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
741{
742 struct ata_host *host = dev_instance;
743 struct ata_port *ap;
744 u32 mask = 0;
745 unsigned int i, tmp, port_no;
746 unsigned int handled = 0;
747 void __iomem *mmio_base;
748
749 if (!host || !host->iomap[PDC_MMIO_BAR])
750 return IRQ_NONE;
751
752 mmio_base = host->iomap[PDC_MMIO_BAR];
753
754 /* reading should also clear interrupts */
755 mmio_base += PDC_CHIP0_OFS;
756 mask = readl(mmio_base + PDC_20621_SEQMASK);
757
758 if (mask == 0xffffffff)
759 return IRQ_NONE;
760
761 mask &= 0xffff; /* only 16 tags possible */
762 if (!mask)
763 return IRQ_NONE;
764
765 spin_lock(&host->lock);
766
767 for (i = 1; i < 9; i++) {
768 port_no = i - 1;
769 if (port_no > 3)
770 port_no -= 4;
771 if (port_no >= host->n_ports)
772 ap = NULL;
773 else
774 ap = host->ports[port_no];
775 tmp = mask & (1 << i);
776 if (ap)
777 ata_port_dbg(ap, "seq %u, tmp %x\n", i, tmp);
778 if (tmp && ap) {
779 struct ata_queued_cmd *qc;
780
781 qc = ata_qc_from_tag(ap, ap->link.active_tag);
782 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
783 handled += pdc20621_host_intr(ap, qc, (i > 4),
784 mmio_base);
785 }
786 }
787
788 spin_unlock(&host->lock);
789
790 return IRQ_RETVAL(handled);
791}
792
793static void pdc_freeze(struct ata_port *ap)
794{
795 void __iomem *mmio = ap->ioaddr.cmd_addr;
796 u32 tmp;
797
798 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
799
800 tmp = readl(mmio + PDC_CTLSTAT);
801 tmp |= PDC_MASK_INT;
802 tmp &= ~PDC_DMA_ENABLE;
803 writel(tmp, mmio + PDC_CTLSTAT);
804 readl(mmio + PDC_CTLSTAT); /* flush */
805}
806
807static void pdc_thaw(struct ata_port *ap)
808{
809 void __iomem *mmio = ap->ioaddr.cmd_addr;
810 u32 tmp;
811
812 /* FIXME: start HDMA engine, if zero ATA engines running */
813
814 /* clear IRQ */
815 ioread8(ap->ioaddr.status_addr);
816
817 /* turn IRQ back on */
818 tmp = readl(mmio + PDC_CTLSTAT);
819 tmp &= ~PDC_MASK_INT;
820 writel(tmp, mmio + PDC_CTLSTAT);
821 readl(mmio + PDC_CTLSTAT); /* flush */
822}
823
824static void pdc_reset_port(struct ata_port *ap)
825{
826 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
827 unsigned int i;
828 u32 tmp;
829
830 /* FIXME: handle HDMA copy engine */
831
832 for (i = 11; i > 0; i--) {
833 tmp = readl(mmio);
834 if (tmp & PDC_RESET)
835 break;
836
837 udelay(100);
838
839 tmp |= PDC_RESET;
840 writel(tmp, mmio);
841 }
842
843 tmp &= ~PDC_RESET;
844 writel(tmp, mmio);
845 readl(mmio); /* flush */
846}
847
848static int pdc_softreset(struct ata_link *link, unsigned int *class,
849 unsigned long deadline)
850{
851 pdc_reset_port(link->ap);
852 return ata_sff_softreset(link, class, deadline);
853}
854
855static void pdc_error_handler(struct ata_port *ap)
856{
857 if (!ata_port_is_frozen(ap))
858 pdc_reset_port(ap);
859
860 ata_sff_error_handler(ap);
861}
862
863static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
864{
865 struct ata_port *ap = qc->ap;
866
867 /* make DMA engine forget about the failed command */
868 if (qc->flags & ATA_QCFLAG_EH)
869 pdc_reset_port(ap);
870}
871
872static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
873{
874 u8 *scsicmd = qc->scsicmd->cmnd;
875 int pio = 1; /* atapi dma off by default */
876
877 /* Whitelist commands that may use DMA. */
878 switch (scsicmd[0]) {
879 case WRITE_12:
880 case WRITE_10:
881 case WRITE_6:
882 case READ_12:
883 case READ_10:
884 case READ_6:
885 case 0xad: /* READ_DVD_STRUCTURE */
886 case 0xbe: /* READ_CD */
887 pio = 0;
888 }
889 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
890 if (scsicmd[0] == WRITE_10) {
891 unsigned int lba =
892 (scsicmd[2] << 24) |
893 (scsicmd[3] << 16) |
894 (scsicmd[4] << 8) |
895 scsicmd[5];
896 if (lba >= 0xFFFF4FA2)
897 pio = 1;
898 }
899 return pio;
900}
901
902static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
903{
904 WARN_ON(tf->protocol == ATA_PROT_DMA ||
905 tf->protocol == ATAPI_PROT_DMA);
906 ata_sff_tf_load(ap, tf);
907}
908
909
910static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
911{
912 WARN_ON(tf->protocol == ATA_PROT_DMA ||
913 tf->protocol == ATAPI_PROT_DMA);
914 ata_sff_exec_command(ap, tf);
915}
916
917
918static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
919{
920 port->cmd_addr = base;
921 port->data_addr = base;
922 port->feature_addr =
923 port->error_addr = base + 0x4;
924 port->nsect_addr = base + 0x8;
925 port->lbal_addr = base + 0xc;
926 port->lbam_addr = base + 0x10;
927 port->lbah_addr = base + 0x14;
928 port->device_addr = base + 0x18;
929 port->command_addr =
930 port->status_addr = base + 0x1c;
931 port->altstatus_addr =
932 port->ctl_addr = base + 0x38;
933}
934
935
936static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
937 u32 offset, u32 size)
938{
939 u32 window_size;
940 u16 idx;
941 u8 page_mask;
942 long dist;
943 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
944 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
945
946 /* hard-code chip #0 */
947 mmio += PDC_CHIP0_OFS;
948
949 page_mask = 0x00;
950 window_size = 0x2000 * 4; /* 32K byte uchar size */
951 idx = (u16) (offset / window_size);
952
953 writel(0x01, mmio + PDC_GENERAL_CTLR);
954 readl(mmio + PDC_GENERAL_CTLR);
955 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
956 readl(mmio + PDC_DIMM_WINDOW_CTLR);
957
958 offset -= (idx * window_size);
959 idx++;
960 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
961 (long) (window_size - offset);
962 memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
963
964 psource += dist;
965 size -= dist;
966 for (; (long) size >= (long) window_size ;) {
967 writel(0x01, mmio + PDC_GENERAL_CTLR);
968 readl(mmio + PDC_GENERAL_CTLR);
969 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
970 readl(mmio + PDC_DIMM_WINDOW_CTLR);
971 memcpy_fromio(psource, dimm_mmio, window_size / 4);
972 psource += window_size;
973 size -= window_size;
974 idx++;
975 }
976
977 if (size) {
978 writel(0x01, mmio + PDC_GENERAL_CTLR);
979 readl(mmio + PDC_GENERAL_CTLR);
980 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
981 readl(mmio + PDC_DIMM_WINDOW_CTLR);
982 memcpy_fromio(psource, dimm_mmio, size / 4);
983 }
984}
985
986
987static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
988 u32 offset, u32 size)
989{
990 u32 window_size;
991 u16 idx;
992 u8 page_mask;
993 long dist;
994 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
995 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
996
997 /* hard-code chip #0 */
998 mmio += PDC_CHIP0_OFS;
999
1000 page_mask = 0x00;
1001 window_size = 0x2000 * 4; /* 32K byte uchar size */
1002 idx = (u16) (offset / window_size);
1003
1004 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1005 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1006 offset -= (idx * window_size);
1007 idx++;
1008 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1009 (long) (window_size - offset);
1010 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1011 writel(0x01, mmio + PDC_GENERAL_CTLR);
1012 readl(mmio + PDC_GENERAL_CTLR);
1013
1014 psource += dist;
1015 size -= dist;
1016 for (; (long) size >= (long) window_size ;) {
1017 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1018 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1019 memcpy_toio(dimm_mmio, psource, window_size / 4);
1020 writel(0x01, mmio + PDC_GENERAL_CTLR);
1021 readl(mmio + PDC_GENERAL_CTLR);
1022 psource += window_size;
1023 size -= window_size;
1024 idx++;
1025 }
1026
1027 if (size) {
1028 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1029 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1030 memcpy_toio(dimm_mmio, psource, size / 4);
1031 writel(0x01, mmio + PDC_GENERAL_CTLR);
1032 readl(mmio + PDC_GENERAL_CTLR);
1033 }
1034}
1035
1036
1037static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1038 u32 subaddr, u32 *pdata)
1039{
1040 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1041 u32 i2creg = 0;
1042 u32 status;
1043 u32 count = 0;
1044
1045 /* hard-code chip #0 */
1046 mmio += PDC_CHIP0_OFS;
1047
1048 i2creg |= device << 24;
1049 i2creg |= subaddr << 16;
1050
1051 /* Set the device and subaddress */
1052 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1053 readl(mmio + PDC_I2C_ADDR_DATA);
1054
1055 /* Write Control to perform read operation, mask int */
1056 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1057 mmio + PDC_I2C_CONTROL);
1058
1059 for (count = 0; count <= 1000; count ++) {
1060 status = readl(mmio + PDC_I2C_CONTROL);
1061 if (status & PDC_I2C_COMPLETE) {
1062 status = readl(mmio + PDC_I2C_ADDR_DATA);
1063 break;
1064 } else if (count == 1000)
1065 return 0;
1066 }
1067
1068 *pdata = (status >> 8) & 0x000000ff;
1069 return 1;
1070}
1071
1072
1073static int pdc20621_detect_dimm(struct ata_host *host)
1074{
1075 u32 data = 0;
1076 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1077 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1078 if (data == 100)
1079 return 100;
1080 } else
1081 return 0;
1082
1083 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1084 if (data <= 0x75)
1085 return 133;
1086 } else
1087 return 0;
1088
1089 return 0;
1090}
1091
1092
1093static int pdc20621_prog_dimm0(struct ata_host *host)
1094{
1095 u32 spd0[50];
1096 u32 data = 0;
1097 int size, i;
1098 u8 bdimmsize;
1099 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1100 static const struct {
1101 unsigned int reg;
1102 unsigned int ofs;
1103 } pdc_i2c_read_data [] = {
1104 { PDC_DIMM_SPD_TYPE, 11 },
1105 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1106 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1107 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1108 { PDC_DIMM_SPD_ROW_NUM, 3 },
1109 { PDC_DIMM_SPD_BANK_NUM, 17 },
1110 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1111 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1112 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1113 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1114 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1115 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1116 };
1117
1118 /* hard-code chip #0 */
1119 mmio += PDC_CHIP0_OFS;
1120
1121 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1122 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1123 pdc_i2c_read_data[i].reg,
1124 &spd0[pdc_i2c_read_data[i].ofs]);
1125
1126 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1127 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1128 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1129 data |= (((((spd0[29] > spd0[28])
1130 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1131 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1132
1133 if (spd0[18] & 0x08)
1134 data |= ((0x03) << 14);
1135 else if (spd0[18] & 0x04)
1136 data |= ((0x02) << 14);
1137 else if (spd0[18] & 0x01)
1138 data |= ((0x01) << 14);
1139 else
1140 data |= (0 << 14);
1141
1142 /*
1143 Calculate the size of bDIMMSize (power of 2) and
1144 merge the DIMM size by program start/end address.
1145 */
1146
1147 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1148 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1149 data |= (((size / 16) - 1) << 16);
1150 data |= (0 << 23);
1151 data |= 8;
1152 writel(data, mmio + PDC_DIMM0_CONTROL);
1153 readl(mmio + PDC_DIMM0_CONTROL);
1154 return size;
1155}
1156
1157
1158static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1159{
1160 u32 data, spd0;
1161 int error, i;
1162 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1163
1164 /* hard-code chip #0 */
1165 mmio += PDC_CHIP0_OFS;
1166
1167 /*
1168 Set To Default : DIMM Module Global Control Register (0x022259F1)
1169 DIMM Arbitration Disable (bit 20)
1170 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1171 Refresh Enable (bit 17)
1172 */
1173
1174 data = 0x022259F1;
1175 writel(data, mmio + PDC_SDRAM_CONTROL);
1176 readl(mmio + PDC_SDRAM_CONTROL);
1177
1178 /* Turn on for ECC */
1179 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1180 PDC_DIMM_SPD_TYPE, &spd0)) {
1181 dev_err(host->dev,
1182 "Failed in i2c read: device=%#x, subaddr=%#x\n",
1183 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1184 return 1;
1185 }
1186 if (spd0 == 0x02) {
1187 data |= (0x01 << 16);
1188 writel(data, mmio + PDC_SDRAM_CONTROL);
1189 readl(mmio + PDC_SDRAM_CONTROL);
1190 dev_err(host->dev, "Local DIMM ECC Enabled\n");
1191 }
1192
1193 /* DIMM Initialization Select/Enable (bit 18/19) */
1194 data &= (~(1<<18));
1195 data |= (1<<19);
1196 writel(data, mmio + PDC_SDRAM_CONTROL);
1197
1198 error = 1;
1199 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1200 data = readl(mmio + PDC_SDRAM_CONTROL);
1201 if (!(data & (1<<19))) {
1202 error = 0;
1203 break;
1204 }
1205 msleep(i*100);
1206 }
1207 return error;
1208}
1209
1210
1211static unsigned int pdc20621_dimm_init(struct ata_host *host)
1212{
1213 int speed, size, length;
1214 u32 addr, spd0, pci_status;
1215 u32 time_period = 0;
1216 u32 tcount = 0;
1217 u32 ticks = 0;
1218 u32 clock = 0;
1219 u32 fparam = 0;
1220 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1221
1222 /* hard-code chip #0 */
1223 mmio += PDC_CHIP0_OFS;
1224
1225 /* Initialize PLL based upon PCI Bus Frequency */
1226
1227 /* Initialize Time Period Register */
1228 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1229 time_period = readl(mmio + PDC_TIME_PERIOD);
1230 dev_dbg(host->dev, "Time Period Register (0x40): 0x%x\n", time_period);
1231
1232 /* Enable timer */
1233 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1234 readl(mmio + PDC_TIME_CONTROL);
1235
1236 /* Wait 3 seconds */
1237 msleep(3000);
1238
1239 /*
1240 When timer is enabled, counter is decreased every internal
1241 clock cycle.
1242 */
1243
1244 tcount = readl(mmio + PDC_TIME_COUNTER);
1245 dev_dbg(host->dev, "Time Counter Register (0x44): 0x%x\n", tcount);
1246
1247 /*
1248 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1249 register should be >= (0xffffffff - 3x10^8).
1250 */
1251 if (tcount >= PCI_X_TCOUNT) {
1252 ticks = (time_period - tcount);
1253 dev_dbg(host->dev, "Num counters 0x%x (%d)\n", ticks, ticks);
1254
1255 clock = (ticks / 300000);
1256 dev_dbg(host->dev, "10 * Internal clk = 0x%x (%d)\n",
1257 clock, clock);
1258
1259 clock = (clock * 33);
1260 dev_dbg(host->dev, "10 * Internal clk * 33 = 0x%x (%d)\n",
1261 clock, clock);
1262
1263 /* PLL F Param (bit 22:16) */
1264 fparam = (1400000 / clock) - 2;
1265 dev_dbg(host->dev, "PLL F Param: 0x%x (%d)\n", fparam, fparam);
1266
1267 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1268 pci_status = (0x8a001824 | (fparam << 16));
1269 } else
1270 pci_status = PCI_PLL_INIT;
1271
1272 /* Initialize PLL. */
1273 dev_dbg(host->dev, "pci_status: 0x%x\n", pci_status);
1274 writel(pci_status, mmio + PDC_CTL_STATUS);
1275 readl(mmio + PDC_CTL_STATUS);
1276
1277 /*
1278 Read SPD of DIMM by I2C interface,
1279 and program the DIMM Module Controller.
1280 */
1281 if (!(speed = pdc20621_detect_dimm(host))) {
1282 dev_err(host->dev, "Detect Local DIMM Fail\n");
1283 return 1; /* DIMM error */
1284 }
1285 dev_dbg(host->dev, "Local DIMM Speed = %d\n", speed);
1286
1287 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1288 size = pdc20621_prog_dimm0(host);
1289 dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
1290
1291 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1292 if (pdc20621_prog_dimm_global(host)) {
1293 dev_err(host->dev,
1294 "Programming DIMM Module Global Control Register Fail\n");
1295 return 1;
1296 }
1297
1298 if (dimm_test) {
1299 u8 test_parttern1[40] =
1300 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1301 'N','o','t',' ','Y','e','t',' ',
1302 'D','e','f','i','n','e','d',' ',
1303 '1','.','1','0',
1304 '9','8','0','3','1','6','1','2',0,0};
1305 u8 test_parttern2[40] = {0};
1306
1307 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1308 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1309
1310 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1311 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1312 dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
1313 test_parttern2[1], &(test_parttern2[2]));
1314 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1315 40);
1316 dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
1317 test_parttern2[0],
1318 test_parttern2[1], &(test_parttern2[2]));
1319
1320 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1321 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1322 dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
1323 test_parttern2[0],
1324 test_parttern2[1], &(test_parttern2[2]));
1325 }
1326
1327 /* ECC initiliazation. */
1328
1329 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1330 PDC_DIMM_SPD_TYPE, &spd0)) {
1331 dev_err(host->dev,
1332 "Failed in i2c read: device=%#x, subaddr=%#x\n",
1333 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1334 return 1;
1335 }
1336 if (spd0 == 0x02) {
1337 void *buf;
1338 dev_dbg(host->dev, "Start ECC initialization\n");
1339 addr = 0;
1340 length = size * 1024 * 1024;
1341 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1342 if (!buf)
1343 return 1;
1344 while (addr < length) {
1345 pdc20621_put_to_dimm(host, buf, addr,
1346 ECC_ERASE_BUF_SZ);
1347 addr += ECC_ERASE_BUF_SZ;
1348 }
1349 kfree(buf);
1350 dev_dbg(host->dev, "Finish ECC initialization\n");
1351 }
1352 return 0;
1353}
1354
1355
1356static void pdc_20621_init(struct ata_host *host)
1357{
1358 u32 tmp;
1359 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1360
1361 /* hard-code chip #0 */
1362 mmio += PDC_CHIP0_OFS;
1363
1364 /*
1365 * Select page 0x40 for our 32k DIMM window
1366 */
1367 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1368 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1369 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1370
1371 /*
1372 * Reset Host DMA
1373 */
1374 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1375 tmp |= PDC_RESET;
1376 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1377 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1378
1379 udelay(10);
1380
1381 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1382 tmp &= ~PDC_RESET;
1383 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1384 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1385}
1386
1387static int pdc_sata_init_one(struct pci_dev *pdev,
1388 const struct pci_device_id *ent)
1389{
1390 const struct ata_port_info *ppi[] =
1391 { &pdc_port_info[ent->driver_data], NULL };
1392 struct ata_host *host;
1393 struct pdc_host_priv *hpriv;
1394 int i, rc;
1395
1396 ata_print_version_once(&pdev->dev, DRV_VERSION);
1397
1398 /* allocate host */
1399 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1400 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1401 if (!host || !hpriv)
1402 return -ENOMEM;
1403
1404 host->private_data = hpriv;
1405
1406 /* acquire resources and fill host */
1407 rc = pcim_enable_device(pdev);
1408 if (rc)
1409 return rc;
1410
1411 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1412 DRV_NAME);
1413 if (rc == -EBUSY)
1414 pcim_pin_device(pdev);
1415 if (rc)
1416 return rc;
1417 host->iomap = pcim_iomap_table(pdev);
1418
1419 for (i = 0; i < 4; i++) {
1420 struct ata_port *ap = host->ports[i];
1421 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1422 unsigned int offset = 0x200 + i * 0x80;
1423
1424 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1425
1426 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1427 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1428 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1429 }
1430
1431 /* configure and activate */
1432 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
1433 if (rc)
1434 return rc;
1435
1436 if (pdc20621_dimm_init(host))
1437 return -ENOMEM;
1438 pdc_20621_init(host);
1439
1440 pci_set_master(pdev);
1441 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1442 IRQF_SHARED, &pdc_sata_sht);
1443}
1444
1445module_pci_driver(pdc_sata_pci_driver);
1446
1447MODULE_AUTHOR("Jeff Garzik");
1448MODULE_DESCRIPTION("Promise SATA low-level driver");
1449MODULE_LICENSE("GPL");
1450MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1451MODULE_VERSION(DRV_VERSION);
1/*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33/*
34 Theory of operation
35 -------------------
36
37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38 engine, DIMM memory, and four ATA engines (one per SATA port).
39 Data is copied to/from DIMM memory by the HDMA engine, before
40 handing off to one (or more) of the ATA engines. The ATA
41 engines operate solely on DIMM memory.
42
43 The SX4 behaves like a PATA chip, with no SATA controls or
44 knowledge whatsoever, leading to the presumption that
45 PATA<->SATA bridges exist on SX4 boards, external to the
46 PDC20621 chip itself.
47
48 The chip is quite capable, supporting an XOR engine and linked
49 hardware commands (permits a string to transactions to be
50 submitted and waited-on as a single unit), and an optional
51 microprocessor.
52
53 The limiting factor is largely software. This Linux driver was
54 written to multiplex the single HDMA engine to copy disk
55 transactions into a fixed DIMM memory space, from where an ATA
56 engine takes over. As a result, each WRITE looks like this:
57
58 submit HDMA packet to hardware
59 hardware copies data from system memory to DIMM
60 hardware raises interrupt
61
62 submit ATA packet to hardware
63 hardware executes ATA WRITE command, w/ data in DIMM
64 hardware raises interrupt
65
66 and each READ looks like this:
67
68 submit ATA packet to hardware
69 hardware executes ATA READ command, w/ data in DIMM
70 hardware raises interrupt
71
72 submit HDMA packet to hardware
73 hardware copies data from DIMM to system memory
74 hardware raises interrupt
75
76 This is a very slow, lock-step way of doing things that can
77 certainly be improved by motivated kernel hackers.
78
79 */
80
81#include <linux/kernel.h>
82#include <linux/module.h>
83#include <linux/pci.h>
84#include <linux/slab.h>
85#include <linux/blkdev.h>
86#include <linux/delay.h>
87#include <linux/interrupt.h>
88#include <linux/device.h>
89#include <scsi/scsi_host.h>
90#include <scsi/scsi_cmnd.h>
91#include <linux/libata.h>
92#include "sata_promise.h"
93
94#define DRV_NAME "sata_sx4"
95#define DRV_VERSION "0.12"
96
97
98enum {
99 PDC_MMIO_BAR = 3,
100 PDC_DIMM_BAR = 4,
101
102 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
103
104 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
105 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
106 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
107 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
108
109 PDC_CTLSTAT = 0x60, /* IDEn control / status */
110
111 PDC_20621_SEQCTL = 0x400,
112 PDC_20621_SEQMASK = 0x480,
113 PDC_20621_GENERAL_CTL = 0x484,
114 PDC_20621_PAGE_SIZE = (32 * 1024),
115
116 /* chosen, not constant, values; we design our own DIMM mem map */
117 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
118 PDC_20621_DIMM_BASE = 0x00200000,
119 PDC_20621_DIMM_DATA = (64 * 1024),
120 PDC_DIMM_DATA_STEP = (256 * 1024),
121 PDC_DIMM_WINDOW_STEP = (8 * 1024),
122 PDC_DIMM_HOST_PRD = (6 * 1024),
123 PDC_DIMM_HOST_PKT = (128 * 0),
124 PDC_DIMM_HPKT_PRD = (128 * 1),
125 PDC_DIMM_ATA_PKT = (128 * 2),
126 PDC_DIMM_APKT_PRD = (128 * 3),
127 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
128 PDC_PAGE_WINDOW = 0x40,
129 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
130 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
131 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
132
133 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
134
135 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
136 (1<<23),
137
138 board_20621 = 0, /* FastTrak S150 SX4 */
139
140 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
141 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
142 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
143
144 PDC_MAX_HDMA = 32,
145 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
146
147 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
148 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
149 PDC_I2C_CONTROL = 0x48,
150 PDC_I2C_ADDR_DATA = 0x4C,
151 PDC_DIMM0_CONTROL = 0x80,
152 PDC_DIMM1_CONTROL = 0x84,
153 PDC_SDRAM_CONTROL = 0x88,
154 PDC_I2C_WRITE = 0, /* master -> slave */
155 PDC_I2C_READ = (1 << 6), /* master <- slave */
156 PDC_I2C_START = (1 << 7), /* start I2C proto */
157 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
158 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
159 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
160 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
161 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
162 PDC_DIMM_SPD_ROW_NUM = 3,
163 PDC_DIMM_SPD_COLUMN_NUM = 4,
164 PDC_DIMM_SPD_MODULE_ROW = 5,
165 PDC_DIMM_SPD_TYPE = 11,
166 PDC_DIMM_SPD_FRESH_RATE = 12,
167 PDC_DIMM_SPD_BANK_NUM = 17,
168 PDC_DIMM_SPD_CAS_LATENCY = 18,
169 PDC_DIMM_SPD_ATTRIBUTE = 21,
170 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
171 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
172 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
173 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
174 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
175 PDC_CTL_STATUS = 0x08,
176 PDC_DIMM_WINDOW_CTLR = 0x0C,
177 PDC_TIME_CONTROL = 0x3C,
178 PDC_TIME_PERIOD = 0x40,
179 PDC_TIME_COUNTER = 0x44,
180 PDC_GENERAL_CTLR = 0x484,
181 PCI_PLL_INIT = 0x8A531824,
182 PCI_X_TCOUNT = 0xEE1E5CFF,
183
184 /* PDC_TIME_CONTROL bits */
185 PDC_TIMER_BUZZER = (1 << 10),
186 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
187 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
188 PDC_TIMER_ENABLE = (1 << 7),
189 PDC_TIMER_MASK_INT = (1 << 5),
190 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
191 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
192 PDC_TIMER_ENABLE |
193 PDC_TIMER_MASK_INT,
194};
195
196#define ECC_ERASE_BUF_SZ (128 * 1024)
197
198struct pdc_port_priv {
199 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
200 u8 *pkt;
201 dma_addr_t pkt_dma;
202};
203
204struct pdc_host_priv {
205 unsigned int doing_hdma;
206 unsigned int hdma_prod;
207 unsigned int hdma_cons;
208 struct {
209 struct ata_queued_cmd *qc;
210 unsigned int seq;
211 unsigned long pkt_ofs;
212 } hdma[32];
213};
214
215
216static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
217static void pdc_error_handler(struct ata_port *ap);
218static void pdc_freeze(struct ata_port *ap);
219static void pdc_thaw(struct ata_port *ap);
220static int pdc_port_start(struct ata_port *ap);
221static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
222static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
223static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
224static unsigned int pdc20621_dimm_init(struct ata_host *host);
225static int pdc20621_detect_dimm(struct ata_host *host);
226static unsigned int pdc20621_i2c_read(struct ata_host *host,
227 u32 device, u32 subaddr, u32 *pdata);
228static int pdc20621_prog_dimm0(struct ata_host *host);
229static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
230#ifdef ATA_VERBOSE_DEBUG
231static void pdc20621_get_from_dimm(struct ata_host *host,
232 void *psource, u32 offset, u32 size);
233#endif
234static void pdc20621_put_to_dimm(struct ata_host *host,
235 void *psource, u32 offset, u32 size);
236static void pdc20621_irq_clear(struct ata_port *ap);
237static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
238static int pdc_softreset(struct ata_link *link, unsigned int *class,
239 unsigned long deadline);
240static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
241static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
242
243
244static struct scsi_host_template pdc_sata_sht = {
245 ATA_BASE_SHT(DRV_NAME),
246 .sg_tablesize = LIBATA_MAX_PRD,
247 .dma_boundary = ATA_DMA_BOUNDARY,
248};
249
250/* TODO: inherit from base port_ops after converting to new EH */
251static struct ata_port_operations pdc_20621_ops = {
252 .inherits = &ata_sff_port_ops,
253
254 .check_atapi_dma = pdc_check_atapi_dma,
255 .qc_prep = pdc20621_qc_prep,
256 .qc_issue = pdc20621_qc_issue,
257
258 .freeze = pdc_freeze,
259 .thaw = pdc_thaw,
260 .softreset = pdc_softreset,
261 .error_handler = pdc_error_handler,
262 .lost_interrupt = ATA_OP_NULL,
263 .post_internal_cmd = pdc_post_internal_cmd,
264
265 .port_start = pdc_port_start,
266
267 .sff_tf_load = pdc_tf_load_mmio,
268 .sff_exec_command = pdc_exec_command_mmio,
269 .sff_irq_clear = pdc20621_irq_clear,
270};
271
272static const struct ata_port_info pdc_port_info[] = {
273 /* board_20621 */
274 {
275 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
276 ATA_FLAG_PIO_POLLING,
277 .pio_mask = ATA_PIO4,
278 .mwdma_mask = ATA_MWDMA2,
279 .udma_mask = ATA_UDMA6,
280 .port_ops = &pdc_20621_ops,
281 },
282
283};
284
285static const struct pci_device_id pdc_sata_pci_tbl[] = {
286 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
287
288 { } /* terminate list */
289};
290
291static struct pci_driver pdc_sata_pci_driver = {
292 .name = DRV_NAME,
293 .id_table = pdc_sata_pci_tbl,
294 .probe = pdc_sata_init_one,
295 .remove = ata_pci_remove_one,
296};
297
298
299static int pdc_port_start(struct ata_port *ap)
300{
301 struct device *dev = ap->host->dev;
302 struct pdc_port_priv *pp;
303
304 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
305 if (!pp)
306 return -ENOMEM;
307
308 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
309 if (!pp->pkt)
310 return -ENOMEM;
311
312 ap->private_data = pp;
313
314 return 0;
315}
316
317static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
318 unsigned int total_len)
319{
320 u32 addr;
321 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
322 __le32 *buf32 = (__le32 *) buf;
323
324 /* output ATA packet S/G table */
325 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
326 (PDC_DIMM_DATA_STEP * portno);
327 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
328 buf32[dw] = cpu_to_le32(addr);
329 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
330
331 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
332 PDC_20621_DIMM_BASE +
333 (PDC_DIMM_WINDOW_STEP * portno) +
334 PDC_DIMM_APKT_PRD,
335 buf32[dw], buf32[dw + 1]);
336}
337
338static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
339 unsigned int total_len)
340{
341 u32 addr;
342 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
343 __le32 *buf32 = (__le32 *) buf;
344
345 /* output Host DMA packet S/G table */
346 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
347 (PDC_DIMM_DATA_STEP * portno);
348
349 buf32[dw] = cpu_to_le32(addr);
350 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
351
352 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
353 PDC_20621_DIMM_BASE +
354 (PDC_DIMM_WINDOW_STEP * portno) +
355 PDC_DIMM_HPKT_PRD,
356 buf32[dw], buf32[dw + 1]);
357}
358
359static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
360 unsigned int devno, u8 *buf,
361 unsigned int portno)
362{
363 unsigned int i, dw;
364 __le32 *buf32 = (__le32 *) buf;
365 u8 dev_reg;
366
367 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
368 (PDC_DIMM_WINDOW_STEP * portno) +
369 PDC_DIMM_APKT_PRD;
370 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
371
372 i = PDC_DIMM_ATA_PKT;
373
374 /*
375 * Set up ATA packet
376 */
377 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
378 buf[i++] = PDC_PKT_READ;
379 else if (tf->protocol == ATA_PROT_NODATA)
380 buf[i++] = PDC_PKT_NODATA;
381 else
382 buf[i++] = 0;
383 buf[i++] = 0; /* reserved */
384 buf[i++] = portno + 1; /* seq. id */
385 buf[i++] = 0xff; /* delay seq. id */
386
387 /* dimm dma S/G, and next-pkt */
388 dw = i >> 2;
389 if (tf->protocol == ATA_PROT_NODATA)
390 buf32[dw] = 0;
391 else
392 buf32[dw] = cpu_to_le32(dimm_sg);
393 buf32[dw + 1] = 0;
394 i += 8;
395
396 if (devno == 0)
397 dev_reg = ATA_DEVICE_OBS;
398 else
399 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
400
401 /* select device */
402 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
403 buf[i++] = dev_reg;
404
405 /* device control register */
406 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
407 buf[i++] = tf->ctl;
408
409 return i;
410}
411
412static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
413 unsigned int portno)
414{
415 unsigned int dw;
416 u32 tmp;
417 __le32 *buf32 = (__le32 *) buf;
418
419 unsigned int host_sg = PDC_20621_DIMM_BASE +
420 (PDC_DIMM_WINDOW_STEP * portno) +
421 PDC_DIMM_HOST_PRD;
422 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
423 (PDC_DIMM_WINDOW_STEP * portno) +
424 PDC_DIMM_HPKT_PRD;
425 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
426 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
427
428 dw = PDC_DIMM_HOST_PKT >> 2;
429
430 /*
431 * Set up Host DMA packet
432 */
433 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
434 tmp = PDC_PKT_READ;
435 else
436 tmp = 0;
437 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
438 tmp |= (0xff << 24); /* delay seq. id */
439 buf32[dw + 0] = cpu_to_le32(tmp);
440 buf32[dw + 1] = cpu_to_le32(host_sg);
441 buf32[dw + 2] = cpu_to_le32(dimm_sg);
442 buf32[dw + 3] = 0;
443
444 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
445 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
446 PDC_DIMM_HOST_PKT,
447 buf32[dw + 0],
448 buf32[dw + 1],
449 buf32[dw + 2],
450 buf32[dw + 3]);
451}
452
453static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
454{
455 struct scatterlist *sg;
456 struct ata_port *ap = qc->ap;
457 struct pdc_port_priv *pp = ap->private_data;
458 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
459 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
460 unsigned int portno = ap->port_no;
461 unsigned int i, si, idx, total_len = 0, sgt_len;
462 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
463
464 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
465
466 VPRINTK("ata%u: ENTER\n", ap->print_id);
467
468 /* hard-code chip #0 */
469 mmio += PDC_CHIP0_OFS;
470
471 /*
472 * Build S/G table
473 */
474 idx = 0;
475 for_each_sg(qc->sg, sg, qc->n_elem, si) {
476 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
477 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
478 total_len += sg_dma_len(sg);
479 }
480 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
481 sgt_len = idx * 4;
482
483 /*
484 * Build ATA, host DMA packets
485 */
486 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
487 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
488
489 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
490 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
491
492 if (qc->tf.flags & ATA_TFLAG_LBA48)
493 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
494 else
495 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
496
497 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
498
499 /* copy three S/G tables and two packets to DIMM MMIO window */
500 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
501 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
502 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
503 PDC_DIMM_HOST_PRD,
504 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
505
506 /* force host FIFO dump */
507 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
508
509 readl(dimm_mmio); /* MMIO PCI posting flush */
510
511 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
512}
513
514static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
515{
516 struct ata_port *ap = qc->ap;
517 struct pdc_port_priv *pp = ap->private_data;
518 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
519 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
520 unsigned int portno = ap->port_no;
521 unsigned int i;
522
523 VPRINTK("ata%u: ENTER\n", ap->print_id);
524
525 /* hard-code chip #0 */
526 mmio += PDC_CHIP0_OFS;
527
528 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
529
530 if (qc->tf.flags & ATA_TFLAG_LBA48)
531 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
532 else
533 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
534
535 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
536
537 /* copy three S/G tables and two packets to DIMM MMIO window */
538 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
539 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
540
541 /* force host FIFO dump */
542 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
543
544 readl(dimm_mmio); /* MMIO PCI posting flush */
545
546 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
547}
548
549static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
550{
551 switch (qc->tf.protocol) {
552 case ATA_PROT_DMA:
553 pdc20621_dma_prep(qc);
554 break;
555 case ATA_PROT_NODATA:
556 pdc20621_nodata_prep(qc);
557 break;
558 default:
559 break;
560 }
561}
562
563static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
564 unsigned int seq,
565 u32 pkt_ofs)
566{
567 struct ata_port *ap = qc->ap;
568 struct ata_host *host = ap->host;
569 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
570
571 /* hard-code chip #0 */
572 mmio += PDC_CHIP0_OFS;
573
574 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
575 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
576
577 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
578 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
579}
580
581static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
582 unsigned int seq,
583 u32 pkt_ofs)
584{
585 struct ata_port *ap = qc->ap;
586 struct pdc_host_priv *pp = ap->host->private_data;
587 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
588
589 if (!pp->doing_hdma) {
590 __pdc20621_push_hdma(qc, seq, pkt_ofs);
591 pp->doing_hdma = 1;
592 return;
593 }
594
595 pp->hdma[idx].qc = qc;
596 pp->hdma[idx].seq = seq;
597 pp->hdma[idx].pkt_ofs = pkt_ofs;
598 pp->hdma_prod++;
599}
600
601static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
602{
603 struct ata_port *ap = qc->ap;
604 struct pdc_host_priv *pp = ap->host->private_data;
605 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
606
607 /* if nothing on queue, we're done */
608 if (pp->hdma_prod == pp->hdma_cons) {
609 pp->doing_hdma = 0;
610 return;
611 }
612
613 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
614 pp->hdma[idx].pkt_ofs);
615 pp->hdma_cons++;
616}
617
618#ifdef ATA_VERBOSE_DEBUG
619static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
620{
621 struct ata_port *ap = qc->ap;
622 unsigned int port_no = ap->port_no;
623 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
624
625 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
626 dimm_mmio += PDC_DIMM_HOST_PKT;
627
628 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
629 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
630 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
631 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
632}
633#else
634static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
635#endif /* ATA_VERBOSE_DEBUG */
636
637static void pdc20621_packet_start(struct ata_queued_cmd *qc)
638{
639 struct ata_port *ap = qc->ap;
640 struct ata_host *host = ap->host;
641 unsigned int port_no = ap->port_no;
642 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
643 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
644 u8 seq = (u8) (port_no + 1);
645 unsigned int port_ofs;
646
647 /* hard-code chip #0 */
648 mmio += PDC_CHIP0_OFS;
649
650 VPRINTK("ata%u: ENTER\n", ap->print_id);
651
652 wmb(); /* flush PRD, pkt writes */
653
654 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
655
656 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
657 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
658 seq += 4;
659
660 pdc20621_dump_hdma(qc);
661 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
662 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
663 port_ofs + PDC_DIMM_HOST_PKT,
664 port_ofs + PDC_DIMM_HOST_PKT,
665 seq);
666 } else {
667 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
668 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
669
670 writel(port_ofs + PDC_DIMM_ATA_PKT,
671 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
672 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
674 port_ofs + PDC_DIMM_ATA_PKT,
675 port_ofs + PDC_DIMM_ATA_PKT,
676 seq);
677 }
678}
679
680static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
681{
682 switch (qc->tf.protocol) {
683 case ATA_PROT_NODATA:
684 if (qc->tf.flags & ATA_TFLAG_POLLING)
685 break;
686 /*FALLTHROUGH*/
687 case ATA_PROT_DMA:
688 pdc20621_packet_start(qc);
689 return 0;
690
691 case ATAPI_PROT_DMA:
692 BUG();
693 break;
694
695 default:
696 break;
697 }
698
699 return ata_sff_qc_issue(qc);
700}
701
702static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
703 struct ata_queued_cmd *qc,
704 unsigned int doing_hdma,
705 void __iomem *mmio)
706{
707 unsigned int port_no = ap->port_no;
708 unsigned int port_ofs =
709 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
710 u8 status;
711 unsigned int handled = 0;
712
713 VPRINTK("ENTER\n");
714
715 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
716 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
717
718 /* step two - DMA from DIMM to host */
719 if (doing_hdma) {
720 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
721 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
722 /* get drive status; clear intr; complete txn */
723 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
724 ata_qc_complete(qc);
725 pdc20621_pop_hdma(qc);
726 }
727
728 /* step one - exec ATA command */
729 else {
730 u8 seq = (u8) (port_no + 1 + 4);
731 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
732 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
733
734 /* submit hdma pkt */
735 pdc20621_dump_hdma(qc);
736 pdc20621_push_hdma(qc, seq,
737 port_ofs + PDC_DIMM_HOST_PKT);
738 }
739 handled = 1;
740
741 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
742
743 /* step one - DMA from host to DIMM */
744 if (doing_hdma) {
745 u8 seq = (u8) (port_no + 1);
746 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
747 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
748
749 /* submit ata pkt */
750 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
751 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
752 writel(port_ofs + PDC_DIMM_ATA_PKT,
753 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
754 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
755 }
756
757 /* step two - execute ATA command */
758 else {
759 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
760 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
761 /* get drive status; clear intr; complete txn */
762 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
763 ata_qc_complete(qc);
764 pdc20621_pop_hdma(qc);
765 }
766 handled = 1;
767
768 /* command completion, but no data xfer */
769 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
770
771 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
772 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
773 qc->err_mask |= ac_err_mask(status);
774 ata_qc_complete(qc);
775 handled = 1;
776
777 } else {
778 ap->stats.idle_irq++;
779 }
780
781 return handled;
782}
783
784static void pdc20621_irq_clear(struct ata_port *ap)
785{
786 ioread8(ap->ioaddr.status_addr);
787}
788
789static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
790{
791 struct ata_host *host = dev_instance;
792 struct ata_port *ap;
793 u32 mask = 0;
794 unsigned int i, tmp, port_no;
795 unsigned int handled = 0;
796 void __iomem *mmio_base;
797
798 VPRINTK("ENTER\n");
799
800 if (!host || !host->iomap[PDC_MMIO_BAR]) {
801 VPRINTK("QUICK EXIT\n");
802 return IRQ_NONE;
803 }
804
805 mmio_base = host->iomap[PDC_MMIO_BAR];
806
807 /* reading should also clear interrupts */
808 mmio_base += PDC_CHIP0_OFS;
809 mask = readl(mmio_base + PDC_20621_SEQMASK);
810 VPRINTK("mask == 0x%x\n", mask);
811
812 if (mask == 0xffffffff) {
813 VPRINTK("QUICK EXIT 2\n");
814 return IRQ_NONE;
815 }
816 mask &= 0xffff; /* only 16 tags possible */
817 if (!mask) {
818 VPRINTK("QUICK EXIT 3\n");
819 return IRQ_NONE;
820 }
821
822 spin_lock(&host->lock);
823
824 for (i = 1; i < 9; i++) {
825 port_no = i - 1;
826 if (port_no > 3)
827 port_no -= 4;
828 if (port_no >= host->n_ports)
829 ap = NULL;
830 else
831 ap = host->ports[port_no];
832 tmp = mask & (1 << i);
833 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
834 if (tmp && ap) {
835 struct ata_queued_cmd *qc;
836
837 qc = ata_qc_from_tag(ap, ap->link.active_tag);
838 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
839 handled += pdc20621_host_intr(ap, qc, (i > 4),
840 mmio_base);
841 }
842 }
843
844 spin_unlock(&host->lock);
845
846 VPRINTK("mask == 0x%x\n", mask);
847
848 VPRINTK("EXIT\n");
849
850 return IRQ_RETVAL(handled);
851}
852
853static void pdc_freeze(struct ata_port *ap)
854{
855 void __iomem *mmio = ap->ioaddr.cmd_addr;
856 u32 tmp;
857
858 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
859
860 tmp = readl(mmio + PDC_CTLSTAT);
861 tmp |= PDC_MASK_INT;
862 tmp &= ~PDC_DMA_ENABLE;
863 writel(tmp, mmio + PDC_CTLSTAT);
864 readl(mmio + PDC_CTLSTAT); /* flush */
865}
866
867static void pdc_thaw(struct ata_port *ap)
868{
869 void __iomem *mmio = ap->ioaddr.cmd_addr;
870 u32 tmp;
871
872 /* FIXME: start HDMA engine, if zero ATA engines running */
873
874 /* clear IRQ */
875 ioread8(ap->ioaddr.status_addr);
876
877 /* turn IRQ back on */
878 tmp = readl(mmio + PDC_CTLSTAT);
879 tmp &= ~PDC_MASK_INT;
880 writel(tmp, mmio + PDC_CTLSTAT);
881 readl(mmio + PDC_CTLSTAT); /* flush */
882}
883
884static void pdc_reset_port(struct ata_port *ap)
885{
886 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
887 unsigned int i;
888 u32 tmp;
889
890 /* FIXME: handle HDMA copy engine */
891
892 for (i = 11; i > 0; i--) {
893 tmp = readl(mmio);
894 if (tmp & PDC_RESET)
895 break;
896
897 udelay(100);
898
899 tmp |= PDC_RESET;
900 writel(tmp, mmio);
901 }
902
903 tmp &= ~PDC_RESET;
904 writel(tmp, mmio);
905 readl(mmio); /* flush */
906}
907
908static int pdc_softreset(struct ata_link *link, unsigned int *class,
909 unsigned long deadline)
910{
911 pdc_reset_port(link->ap);
912 return ata_sff_softreset(link, class, deadline);
913}
914
915static void pdc_error_handler(struct ata_port *ap)
916{
917 if (!(ap->pflags & ATA_PFLAG_FROZEN))
918 pdc_reset_port(ap);
919
920 ata_sff_error_handler(ap);
921}
922
923static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
924{
925 struct ata_port *ap = qc->ap;
926
927 /* make DMA engine forget about the failed command */
928 if (qc->flags & ATA_QCFLAG_FAILED)
929 pdc_reset_port(ap);
930}
931
932static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
933{
934 u8 *scsicmd = qc->scsicmd->cmnd;
935 int pio = 1; /* atapi dma off by default */
936
937 /* Whitelist commands that may use DMA. */
938 switch (scsicmd[0]) {
939 case WRITE_12:
940 case WRITE_10:
941 case WRITE_6:
942 case READ_12:
943 case READ_10:
944 case READ_6:
945 case 0xad: /* READ_DVD_STRUCTURE */
946 case 0xbe: /* READ_CD */
947 pio = 0;
948 }
949 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
950 if (scsicmd[0] == WRITE_10) {
951 unsigned int lba =
952 (scsicmd[2] << 24) |
953 (scsicmd[3] << 16) |
954 (scsicmd[4] << 8) |
955 scsicmd[5];
956 if (lba >= 0xFFFF4FA2)
957 pio = 1;
958 }
959 return pio;
960}
961
962static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
963{
964 WARN_ON(tf->protocol == ATA_PROT_DMA ||
965 tf->protocol == ATAPI_PROT_DMA);
966 ata_sff_tf_load(ap, tf);
967}
968
969
970static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
971{
972 WARN_ON(tf->protocol == ATA_PROT_DMA ||
973 tf->protocol == ATAPI_PROT_DMA);
974 ata_sff_exec_command(ap, tf);
975}
976
977
978static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
979{
980 port->cmd_addr = base;
981 port->data_addr = base;
982 port->feature_addr =
983 port->error_addr = base + 0x4;
984 port->nsect_addr = base + 0x8;
985 port->lbal_addr = base + 0xc;
986 port->lbam_addr = base + 0x10;
987 port->lbah_addr = base + 0x14;
988 port->device_addr = base + 0x18;
989 port->command_addr =
990 port->status_addr = base + 0x1c;
991 port->altstatus_addr =
992 port->ctl_addr = base + 0x38;
993}
994
995
996#ifdef ATA_VERBOSE_DEBUG
997static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
998 u32 offset, u32 size)
999{
1000 u32 window_size;
1001 u16 idx;
1002 u8 page_mask;
1003 long dist;
1004 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1005 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1006
1007 /* hard-code chip #0 */
1008 mmio += PDC_CHIP0_OFS;
1009
1010 page_mask = 0x00;
1011 window_size = 0x2000 * 4; /* 32K byte uchar size */
1012 idx = (u16) (offset / window_size);
1013
1014 writel(0x01, mmio + PDC_GENERAL_CTLR);
1015 readl(mmio + PDC_GENERAL_CTLR);
1016 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1017 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1018
1019 offset -= (idx * window_size);
1020 idx++;
1021 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1022 (long) (window_size - offset);
1023 memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
1024
1025 psource += dist;
1026 size -= dist;
1027 for (; (long) size >= (long) window_size ;) {
1028 writel(0x01, mmio + PDC_GENERAL_CTLR);
1029 readl(mmio + PDC_GENERAL_CTLR);
1030 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1031 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1032 memcpy_fromio(psource, dimm_mmio, window_size / 4);
1033 psource += window_size;
1034 size -= window_size;
1035 idx++;
1036 }
1037
1038 if (size) {
1039 writel(0x01, mmio + PDC_GENERAL_CTLR);
1040 readl(mmio + PDC_GENERAL_CTLR);
1041 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1042 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1043 memcpy_fromio(psource, dimm_mmio, size / 4);
1044 }
1045}
1046#endif
1047
1048
1049static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1050 u32 offset, u32 size)
1051{
1052 u32 window_size;
1053 u16 idx;
1054 u8 page_mask;
1055 long dist;
1056 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1057 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1058
1059 /* hard-code chip #0 */
1060 mmio += PDC_CHIP0_OFS;
1061
1062 page_mask = 0x00;
1063 window_size = 0x2000 * 4; /* 32K byte uchar size */
1064 idx = (u16) (offset / window_size);
1065
1066 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1067 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1068 offset -= (idx * window_size);
1069 idx++;
1070 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1071 (long) (window_size - offset);
1072 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1073 writel(0x01, mmio + PDC_GENERAL_CTLR);
1074 readl(mmio + PDC_GENERAL_CTLR);
1075
1076 psource += dist;
1077 size -= dist;
1078 for (; (long) size >= (long) window_size ;) {
1079 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1080 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1081 memcpy_toio(dimm_mmio, psource, window_size / 4);
1082 writel(0x01, mmio + PDC_GENERAL_CTLR);
1083 readl(mmio + PDC_GENERAL_CTLR);
1084 psource += window_size;
1085 size -= window_size;
1086 idx++;
1087 }
1088
1089 if (size) {
1090 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1091 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1092 memcpy_toio(dimm_mmio, psource, size / 4);
1093 writel(0x01, mmio + PDC_GENERAL_CTLR);
1094 readl(mmio + PDC_GENERAL_CTLR);
1095 }
1096}
1097
1098
1099static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1100 u32 subaddr, u32 *pdata)
1101{
1102 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1103 u32 i2creg = 0;
1104 u32 status;
1105 u32 count = 0;
1106
1107 /* hard-code chip #0 */
1108 mmio += PDC_CHIP0_OFS;
1109
1110 i2creg |= device << 24;
1111 i2creg |= subaddr << 16;
1112
1113 /* Set the device and subaddress */
1114 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1115 readl(mmio + PDC_I2C_ADDR_DATA);
1116
1117 /* Write Control to perform read operation, mask int */
1118 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1119 mmio + PDC_I2C_CONTROL);
1120
1121 for (count = 0; count <= 1000; count ++) {
1122 status = readl(mmio + PDC_I2C_CONTROL);
1123 if (status & PDC_I2C_COMPLETE) {
1124 status = readl(mmio + PDC_I2C_ADDR_DATA);
1125 break;
1126 } else if (count == 1000)
1127 return 0;
1128 }
1129
1130 *pdata = (status >> 8) & 0x000000ff;
1131 return 1;
1132}
1133
1134
1135static int pdc20621_detect_dimm(struct ata_host *host)
1136{
1137 u32 data = 0;
1138 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1139 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1140 if (data == 100)
1141 return 100;
1142 } else
1143 return 0;
1144
1145 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1146 if (data <= 0x75)
1147 return 133;
1148 } else
1149 return 0;
1150
1151 return 0;
1152}
1153
1154
1155static int pdc20621_prog_dimm0(struct ata_host *host)
1156{
1157 u32 spd0[50];
1158 u32 data = 0;
1159 int size, i;
1160 u8 bdimmsize;
1161 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1162 static const struct {
1163 unsigned int reg;
1164 unsigned int ofs;
1165 } pdc_i2c_read_data [] = {
1166 { PDC_DIMM_SPD_TYPE, 11 },
1167 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1168 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1169 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1170 { PDC_DIMM_SPD_ROW_NUM, 3 },
1171 { PDC_DIMM_SPD_BANK_NUM, 17 },
1172 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1173 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1174 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1175 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1176 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1177 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1178 };
1179
1180 /* hard-code chip #0 */
1181 mmio += PDC_CHIP0_OFS;
1182
1183 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1184 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1185 pdc_i2c_read_data[i].reg,
1186 &spd0[pdc_i2c_read_data[i].ofs]);
1187
1188 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1189 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1190 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1191 data |= (((((spd0[29] > spd0[28])
1192 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1193 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1194
1195 if (spd0[18] & 0x08)
1196 data |= ((0x03) << 14);
1197 else if (spd0[18] & 0x04)
1198 data |= ((0x02) << 14);
1199 else if (spd0[18] & 0x01)
1200 data |= ((0x01) << 14);
1201 else
1202 data |= (0 << 14);
1203
1204 /*
1205 Calculate the size of bDIMMSize (power of 2) and
1206 merge the DIMM size by program start/end address.
1207 */
1208
1209 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1210 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1211 data |= (((size / 16) - 1) << 16);
1212 data |= (0 << 23);
1213 data |= 8;
1214 writel(data, mmio + PDC_DIMM0_CONTROL);
1215 readl(mmio + PDC_DIMM0_CONTROL);
1216 return size;
1217}
1218
1219
1220static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1221{
1222 u32 data, spd0;
1223 int error, i;
1224 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1225
1226 /* hard-code chip #0 */
1227 mmio += PDC_CHIP0_OFS;
1228
1229 /*
1230 Set To Default : DIMM Module Global Control Register (0x022259F1)
1231 DIMM Arbitration Disable (bit 20)
1232 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1233 Refresh Enable (bit 17)
1234 */
1235
1236 data = 0x022259F1;
1237 writel(data, mmio + PDC_SDRAM_CONTROL);
1238 readl(mmio + PDC_SDRAM_CONTROL);
1239
1240 /* Turn on for ECC */
1241 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1242 PDC_DIMM_SPD_TYPE, &spd0);
1243 if (spd0 == 0x02) {
1244 data |= (0x01 << 16);
1245 writel(data, mmio + PDC_SDRAM_CONTROL);
1246 readl(mmio + PDC_SDRAM_CONTROL);
1247 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1248 }
1249
1250 /* DIMM Initialization Select/Enable (bit 18/19) */
1251 data &= (~(1<<18));
1252 data |= (1<<19);
1253 writel(data, mmio + PDC_SDRAM_CONTROL);
1254
1255 error = 1;
1256 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1257 data = readl(mmio + PDC_SDRAM_CONTROL);
1258 if (!(data & (1<<19))) {
1259 error = 0;
1260 break;
1261 }
1262 msleep(i*100);
1263 }
1264 return error;
1265}
1266
1267
1268static unsigned int pdc20621_dimm_init(struct ata_host *host)
1269{
1270 int speed, size, length;
1271 u32 addr, spd0, pci_status;
1272 u32 time_period = 0;
1273 u32 tcount = 0;
1274 u32 ticks = 0;
1275 u32 clock = 0;
1276 u32 fparam = 0;
1277 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1278
1279 /* hard-code chip #0 */
1280 mmio += PDC_CHIP0_OFS;
1281
1282 /* Initialize PLL based upon PCI Bus Frequency */
1283
1284 /* Initialize Time Period Register */
1285 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1286 time_period = readl(mmio + PDC_TIME_PERIOD);
1287 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1288
1289 /* Enable timer */
1290 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1291 readl(mmio + PDC_TIME_CONTROL);
1292
1293 /* Wait 3 seconds */
1294 msleep(3000);
1295
1296 /*
1297 When timer is enabled, counter is decreased every internal
1298 clock cycle.
1299 */
1300
1301 tcount = readl(mmio + PDC_TIME_COUNTER);
1302 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1303
1304 /*
1305 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1306 register should be >= (0xffffffff - 3x10^8).
1307 */
1308 if (tcount >= PCI_X_TCOUNT) {
1309 ticks = (time_period - tcount);
1310 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1311
1312 clock = (ticks / 300000);
1313 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1314
1315 clock = (clock * 33);
1316 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1317
1318 /* PLL F Param (bit 22:16) */
1319 fparam = (1400000 / clock) - 2;
1320 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1321
1322 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1323 pci_status = (0x8a001824 | (fparam << 16));
1324 } else
1325 pci_status = PCI_PLL_INIT;
1326
1327 /* Initialize PLL. */
1328 VPRINTK("pci_status: 0x%x\n", pci_status);
1329 writel(pci_status, mmio + PDC_CTL_STATUS);
1330 readl(mmio + PDC_CTL_STATUS);
1331
1332 /*
1333 Read SPD of DIMM by I2C interface,
1334 and program the DIMM Module Controller.
1335 */
1336 if (!(speed = pdc20621_detect_dimm(host))) {
1337 printk(KERN_ERR "Detect Local DIMM Fail\n");
1338 return 1; /* DIMM error */
1339 }
1340 VPRINTK("Local DIMM Speed = %d\n", speed);
1341
1342 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1343 size = pdc20621_prog_dimm0(host);
1344 VPRINTK("Local DIMM Size = %dMB\n", size);
1345
1346 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1347 if (pdc20621_prog_dimm_global(host)) {
1348 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1349 return 1;
1350 }
1351
1352#ifdef ATA_VERBOSE_DEBUG
1353 {
1354 u8 test_parttern1[40] =
1355 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1356 'N','o','t',' ','Y','e','t',' ',
1357 'D','e','f','i','n','e','d',' ',
1358 '1','.','1','0',
1359 '9','8','0','3','1','6','1','2',0,0};
1360 u8 test_parttern2[40] = {0};
1361
1362 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1363 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1364
1365 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1366 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1367 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1368 test_parttern2[1], &(test_parttern2[2]));
1369 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1370 40);
1371 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1372 test_parttern2[1], &(test_parttern2[2]));
1373
1374 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1375 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1376 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1377 test_parttern2[1], &(test_parttern2[2]));
1378 }
1379#endif
1380
1381 /* ECC initiliazation. */
1382
1383 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1384 PDC_DIMM_SPD_TYPE, &spd0);
1385 if (spd0 == 0x02) {
1386 void *buf;
1387 VPRINTK("Start ECC initialization\n");
1388 addr = 0;
1389 length = size * 1024 * 1024;
1390 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1391 while (addr < length) {
1392 pdc20621_put_to_dimm(host, buf, addr,
1393 ECC_ERASE_BUF_SZ);
1394 addr += ECC_ERASE_BUF_SZ;
1395 }
1396 kfree(buf);
1397 VPRINTK("Finish ECC initialization\n");
1398 }
1399 return 0;
1400}
1401
1402
1403static void pdc_20621_init(struct ata_host *host)
1404{
1405 u32 tmp;
1406 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1407
1408 /* hard-code chip #0 */
1409 mmio += PDC_CHIP0_OFS;
1410
1411 /*
1412 * Select page 0x40 for our 32k DIMM window
1413 */
1414 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1415 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1416 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1417
1418 /*
1419 * Reset Host DMA
1420 */
1421 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1422 tmp |= PDC_RESET;
1423 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1424 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1425
1426 udelay(10);
1427
1428 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1429 tmp &= ~PDC_RESET;
1430 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1431 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1432}
1433
1434static int pdc_sata_init_one(struct pci_dev *pdev,
1435 const struct pci_device_id *ent)
1436{
1437 const struct ata_port_info *ppi[] =
1438 { &pdc_port_info[ent->driver_data], NULL };
1439 struct ata_host *host;
1440 struct pdc_host_priv *hpriv;
1441 int i, rc;
1442
1443 ata_print_version_once(&pdev->dev, DRV_VERSION);
1444
1445 /* allocate host */
1446 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1447 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1448 if (!host || !hpriv)
1449 return -ENOMEM;
1450
1451 host->private_data = hpriv;
1452
1453 /* acquire resources and fill host */
1454 rc = pcim_enable_device(pdev);
1455 if (rc)
1456 return rc;
1457
1458 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1459 DRV_NAME);
1460 if (rc == -EBUSY)
1461 pcim_pin_device(pdev);
1462 if (rc)
1463 return rc;
1464 host->iomap = pcim_iomap_table(pdev);
1465
1466 for (i = 0; i < 4; i++) {
1467 struct ata_port *ap = host->ports[i];
1468 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1469 unsigned int offset = 0x200 + i * 0x80;
1470
1471 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1472
1473 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1474 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1475 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1476 }
1477
1478 /* configure and activate */
1479 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1480 if (rc)
1481 return rc;
1482 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1483 if (rc)
1484 return rc;
1485
1486 if (pdc20621_dimm_init(host))
1487 return -ENOMEM;
1488 pdc_20621_init(host);
1489
1490 pci_set_master(pdev);
1491 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1492 IRQF_SHARED, &pdc_sata_sht);
1493}
1494
1495module_pci_driver(pdc_sata_pci_driver);
1496
1497MODULE_AUTHOR("Jeff Garzik");
1498MODULE_DESCRIPTION("Promise SATA low-level driver");
1499MODULE_LICENSE("GPL");
1500MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1501MODULE_VERSION(DRV_VERSION);