Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * sata_sx4.c - Promise SATA
4 *
5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003-2004 Red Hat, Inc.
10 *
11 * libata documentation is available via 'make {ps|pdf}docs',
12 * as Documentation/driver-api/libata.rst
13 *
14 * Hardware documentation available under NDA.
15 */
16
17/*
18 Theory of operation
19 -------------------
20
21 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
22 engine, DIMM memory, and four ATA engines (one per SATA port).
23 Data is copied to/from DIMM memory by the HDMA engine, before
24 handing off to one (or more) of the ATA engines. The ATA
25 engines operate solely on DIMM memory.
26
27 The SX4 behaves like a PATA chip, with no SATA controls or
28 knowledge whatsoever, leading to the presumption that
29 PATA<->SATA bridges exist on SX4 boards, external to the
30 PDC20621 chip itself.
31
32 The chip is quite capable, supporting an XOR engine and linked
33 hardware commands (permits a string to transactions to be
34 submitted and waited-on as a single unit), and an optional
35 microprocessor.
36
37 The limiting factor is largely software. This Linux driver was
38 written to multiplex the single HDMA engine to copy disk
39 transactions into a fixed DIMM memory space, from where an ATA
40 engine takes over. As a result, each WRITE looks like this:
41
42 submit HDMA packet to hardware
43 hardware copies data from system memory to DIMM
44 hardware raises interrupt
45
46 submit ATA packet to hardware
47 hardware executes ATA WRITE command, w/ data in DIMM
48 hardware raises interrupt
49
50 and each READ looks like this:
51
52 submit ATA packet to hardware
53 hardware executes ATA READ command, w/ data in DIMM
54 hardware raises interrupt
55
56 submit HDMA packet to hardware
57 hardware copies data from DIMM to system memory
58 hardware raises interrupt
59
60 This is a very slow, lock-step way of doing things that can
61 certainly be improved by motivated kernel hackers.
62
63 */
64
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/slab.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
72#include <linux/device.h>
73#include <scsi/scsi_host.h>
74#include <scsi/scsi_cmnd.h>
75#include <linux/libata.h>
76#include "sata_promise.h"
77
78#define DRV_NAME "sata_sx4"
79#define DRV_VERSION "0.12"
80
81
82enum {
83 PDC_MMIO_BAR = 3,
84 PDC_DIMM_BAR = 4,
85
86 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
87
88 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
89 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
90 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
91 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
92
93 PDC_CTLSTAT = 0x60, /* IDEn control / status */
94
95 PDC_20621_SEQCTL = 0x400,
96 PDC_20621_SEQMASK = 0x480,
97 PDC_20621_GENERAL_CTL = 0x484,
98 PDC_20621_PAGE_SIZE = (32 * 1024),
99
100 /* chosen, not constant, values; we design our own DIMM mem map */
101 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
102 PDC_20621_DIMM_BASE = 0x00200000,
103 PDC_20621_DIMM_DATA = (64 * 1024),
104 PDC_DIMM_DATA_STEP = (256 * 1024),
105 PDC_DIMM_WINDOW_STEP = (8 * 1024),
106 PDC_DIMM_HOST_PRD = (6 * 1024),
107 PDC_DIMM_HOST_PKT = (128 * 0),
108 PDC_DIMM_HPKT_PRD = (128 * 1),
109 PDC_DIMM_ATA_PKT = (128 * 2),
110 PDC_DIMM_APKT_PRD = (128 * 3),
111 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
112 PDC_PAGE_WINDOW = 0x40,
113 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
114 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
115 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
116
117 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
118
119 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
120 (1<<23),
121
122 board_20621 = 0, /* FastTrak S150 SX4 */
123
124 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
125 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
126 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
127
128 PDC_MAX_HDMA = 32,
129 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
130
131 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
132 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
133 PDC_I2C_CONTROL = 0x48,
134 PDC_I2C_ADDR_DATA = 0x4C,
135 PDC_DIMM0_CONTROL = 0x80,
136 PDC_DIMM1_CONTROL = 0x84,
137 PDC_SDRAM_CONTROL = 0x88,
138 PDC_I2C_WRITE = 0, /* master -> slave */
139 PDC_I2C_READ = (1 << 6), /* master <- slave */
140 PDC_I2C_START = (1 << 7), /* start I2C proto */
141 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
142 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
143 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
144 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
145 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
146 PDC_DIMM_SPD_ROW_NUM = 3,
147 PDC_DIMM_SPD_COLUMN_NUM = 4,
148 PDC_DIMM_SPD_MODULE_ROW = 5,
149 PDC_DIMM_SPD_TYPE = 11,
150 PDC_DIMM_SPD_FRESH_RATE = 12,
151 PDC_DIMM_SPD_BANK_NUM = 17,
152 PDC_DIMM_SPD_CAS_LATENCY = 18,
153 PDC_DIMM_SPD_ATTRIBUTE = 21,
154 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
155 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
156 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
157 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
158 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
159 PDC_CTL_STATUS = 0x08,
160 PDC_DIMM_WINDOW_CTLR = 0x0C,
161 PDC_TIME_CONTROL = 0x3C,
162 PDC_TIME_PERIOD = 0x40,
163 PDC_TIME_COUNTER = 0x44,
164 PDC_GENERAL_CTLR = 0x484,
165 PCI_PLL_INIT = 0x8A531824,
166 PCI_X_TCOUNT = 0xEE1E5CFF,
167
168 /* PDC_TIME_CONTROL bits */
169 PDC_TIMER_BUZZER = (1 << 10),
170 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
171 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
172 PDC_TIMER_ENABLE = (1 << 7),
173 PDC_TIMER_MASK_INT = (1 << 5),
174 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
175 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
176 PDC_TIMER_ENABLE |
177 PDC_TIMER_MASK_INT,
178};
179
180#define ECC_ERASE_BUF_SZ (128 * 1024)
181
182struct pdc_port_priv {
183 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
184 u8 *pkt;
185 dma_addr_t pkt_dma;
186};
187
188struct pdc_host_priv {
189 unsigned int doing_hdma;
190 unsigned int hdma_prod;
191 unsigned int hdma_cons;
192 struct {
193 struct ata_queued_cmd *qc;
194 unsigned int seq;
195 unsigned long pkt_ofs;
196 } hdma[32];
197};
198
199
200static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
201static void pdc_error_handler(struct ata_port *ap);
202static void pdc_freeze(struct ata_port *ap);
203static void pdc_thaw(struct ata_port *ap);
204static int pdc_port_start(struct ata_port *ap);
205static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
206static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
207static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
208static unsigned int pdc20621_dimm_init(struct ata_host *host);
209static int pdc20621_detect_dimm(struct ata_host *host);
210static unsigned int pdc20621_i2c_read(struct ata_host *host,
211 u32 device, u32 subaddr, u32 *pdata);
212static int pdc20621_prog_dimm0(struct ata_host *host);
213static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
214#ifdef ATA_VERBOSE_DEBUG
215static void pdc20621_get_from_dimm(struct ata_host *host,
216 void *psource, u32 offset, u32 size);
217#endif
218static void pdc20621_put_to_dimm(struct ata_host *host,
219 void *psource, u32 offset, u32 size);
220static void pdc20621_irq_clear(struct ata_port *ap);
221static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
222static int pdc_softreset(struct ata_link *link, unsigned int *class,
223 unsigned long deadline);
224static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
225static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
226
227
228static struct scsi_host_template pdc_sata_sht = {
229 ATA_BASE_SHT(DRV_NAME),
230 .sg_tablesize = LIBATA_MAX_PRD,
231 .dma_boundary = ATA_DMA_BOUNDARY,
232};
233
234/* TODO: inherit from base port_ops after converting to new EH */
235static struct ata_port_operations pdc_20621_ops = {
236 .inherits = &ata_sff_port_ops,
237
238 .check_atapi_dma = pdc_check_atapi_dma,
239 .qc_prep = pdc20621_qc_prep,
240 .qc_issue = pdc20621_qc_issue,
241
242 .freeze = pdc_freeze,
243 .thaw = pdc_thaw,
244 .softreset = pdc_softreset,
245 .error_handler = pdc_error_handler,
246 .lost_interrupt = ATA_OP_NULL,
247 .post_internal_cmd = pdc_post_internal_cmd,
248
249 .port_start = pdc_port_start,
250
251 .sff_tf_load = pdc_tf_load_mmio,
252 .sff_exec_command = pdc_exec_command_mmio,
253 .sff_irq_clear = pdc20621_irq_clear,
254};
255
256static const struct ata_port_info pdc_port_info[] = {
257 /* board_20621 */
258 {
259 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
260 ATA_FLAG_PIO_POLLING,
261 .pio_mask = ATA_PIO4,
262 .mwdma_mask = ATA_MWDMA2,
263 .udma_mask = ATA_UDMA6,
264 .port_ops = &pdc_20621_ops,
265 },
266
267};
268
269static const struct pci_device_id pdc_sata_pci_tbl[] = {
270 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
271
272 { } /* terminate list */
273};
274
275static struct pci_driver pdc_sata_pci_driver = {
276 .name = DRV_NAME,
277 .id_table = pdc_sata_pci_tbl,
278 .probe = pdc_sata_init_one,
279 .remove = ata_pci_remove_one,
280};
281
282
283static int pdc_port_start(struct ata_port *ap)
284{
285 struct device *dev = ap->host->dev;
286 struct pdc_port_priv *pp;
287
288 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
289 if (!pp)
290 return -ENOMEM;
291
292 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
293 if (!pp->pkt)
294 return -ENOMEM;
295
296 ap->private_data = pp;
297
298 return 0;
299}
300
301static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
302 unsigned int total_len)
303{
304 u32 addr;
305 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
306 __le32 *buf32 = (__le32 *) buf;
307
308 /* output ATA packet S/G table */
309 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
310 (PDC_DIMM_DATA_STEP * portno);
311 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
312 buf32[dw] = cpu_to_le32(addr);
313 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
314
315 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
316 PDC_20621_DIMM_BASE +
317 (PDC_DIMM_WINDOW_STEP * portno) +
318 PDC_DIMM_APKT_PRD,
319 buf32[dw], buf32[dw + 1]);
320}
321
322static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
323 unsigned int total_len)
324{
325 u32 addr;
326 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
327 __le32 *buf32 = (__le32 *) buf;
328
329 /* output Host DMA packet S/G table */
330 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
331 (PDC_DIMM_DATA_STEP * portno);
332
333 buf32[dw] = cpu_to_le32(addr);
334 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
335
336 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
337 PDC_20621_DIMM_BASE +
338 (PDC_DIMM_WINDOW_STEP * portno) +
339 PDC_DIMM_HPKT_PRD,
340 buf32[dw], buf32[dw + 1]);
341}
342
343static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
344 unsigned int devno, u8 *buf,
345 unsigned int portno)
346{
347 unsigned int i, dw;
348 __le32 *buf32 = (__le32 *) buf;
349 u8 dev_reg;
350
351 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
352 (PDC_DIMM_WINDOW_STEP * portno) +
353 PDC_DIMM_APKT_PRD;
354 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
355
356 i = PDC_DIMM_ATA_PKT;
357
358 /*
359 * Set up ATA packet
360 */
361 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
362 buf[i++] = PDC_PKT_READ;
363 else if (tf->protocol == ATA_PROT_NODATA)
364 buf[i++] = PDC_PKT_NODATA;
365 else
366 buf[i++] = 0;
367 buf[i++] = 0; /* reserved */
368 buf[i++] = portno + 1; /* seq. id */
369 buf[i++] = 0xff; /* delay seq. id */
370
371 /* dimm dma S/G, and next-pkt */
372 dw = i >> 2;
373 if (tf->protocol == ATA_PROT_NODATA)
374 buf32[dw] = 0;
375 else
376 buf32[dw] = cpu_to_le32(dimm_sg);
377 buf32[dw + 1] = 0;
378 i += 8;
379
380 if (devno == 0)
381 dev_reg = ATA_DEVICE_OBS;
382 else
383 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
384
385 /* select device */
386 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
387 buf[i++] = dev_reg;
388
389 /* device control register */
390 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
391 buf[i++] = tf->ctl;
392
393 return i;
394}
395
396static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
397 unsigned int portno)
398{
399 unsigned int dw;
400 u32 tmp;
401 __le32 *buf32 = (__le32 *) buf;
402
403 unsigned int host_sg = PDC_20621_DIMM_BASE +
404 (PDC_DIMM_WINDOW_STEP * portno) +
405 PDC_DIMM_HOST_PRD;
406 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
407 (PDC_DIMM_WINDOW_STEP * portno) +
408 PDC_DIMM_HPKT_PRD;
409 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
410 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
411
412 dw = PDC_DIMM_HOST_PKT >> 2;
413
414 /*
415 * Set up Host DMA packet
416 */
417 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
418 tmp = PDC_PKT_READ;
419 else
420 tmp = 0;
421 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
422 tmp |= (0xff << 24); /* delay seq. id */
423 buf32[dw + 0] = cpu_to_le32(tmp);
424 buf32[dw + 1] = cpu_to_le32(host_sg);
425 buf32[dw + 2] = cpu_to_le32(dimm_sg);
426 buf32[dw + 3] = 0;
427
428 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
429 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
430 PDC_DIMM_HOST_PKT,
431 buf32[dw + 0],
432 buf32[dw + 1],
433 buf32[dw + 2],
434 buf32[dw + 3]);
435}
436
437static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
438{
439 struct scatterlist *sg;
440 struct ata_port *ap = qc->ap;
441 struct pdc_port_priv *pp = ap->private_data;
442 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
443 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
444 unsigned int portno = ap->port_no;
445 unsigned int i, si, idx, total_len = 0, sgt_len;
446 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
447
448 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
449
450 VPRINTK("ata%u: ENTER\n", ap->print_id);
451
452 /* hard-code chip #0 */
453 mmio += PDC_CHIP0_OFS;
454
455 /*
456 * Build S/G table
457 */
458 idx = 0;
459 for_each_sg(qc->sg, sg, qc->n_elem, si) {
460 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
461 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
462 total_len += sg_dma_len(sg);
463 }
464 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
465 sgt_len = idx * 4;
466
467 /*
468 * Build ATA, host DMA packets
469 */
470 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
471 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
472
473 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
474 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
475
476 if (qc->tf.flags & ATA_TFLAG_LBA48)
477 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
478 else
479 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
480
481 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
482
483 /* copy three S/G tables and two packets to DIMM MMIO window */
484 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
485 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
486 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
487 PDC_DIMM_HOST_PRD,
488 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
489
490 /* force host FIFO dump */
491 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
492
493 readl(dimm_mmio); /* MMIO PCI posting flush */
494
495 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
496}
497
498static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
499{
500 struct ata_port *ap = qc->ap;
501 struct pdc_port_priv *pp = ap->private_data;
502 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
503 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
504 unsigned int portno = ap->port_no;
505 unsigned int i;
506
507 VPRINTK("ata%u: ENTER\n", ap->print_id);
508
509 /* hard-code chip #0 */
510 mmio += PDC_CHIP0_OFS;
511
512 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
513
514 if (qc->tf.flags & ATA_TFLAG_LBA48)
515 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
516 else
517 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
518
519 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
520
521 /* copy three S/G tables and two packets to DIMM MMIO window */
522 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
523 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
524
525 /* force host FIFO dump */
526 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
527
528 readl(dimm_mmio); /* MMIO PCI posting flush */
529
530 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
531}
532
533static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
534{
535 switch (qc->tf.protocol) {
536 case ATA_PROT_DMA:
537 pdc20621_dma_prep(qc);
538 break;
539 case ATA_PROT_NODATA:
540 pdc20621_nodata_prep(qc);
541 break;
542 default:
543 break;
544 }
545
546 return AC_ERR_OK;
547}
548
549static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
550 unsigned int seq,
551 u32 pkt_ofs)
552{
553 struct ata_port *ap = qc->ap;
554 struct ata_host *host = ap->host;
555 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
556
557 /* hard-code chip #0 */
558 mmio += PDC_CHIP0_OFS;
559
560 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
561 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
562
563 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
564 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
565}
566
567static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
568 unsigned int seq,
569 u32 pkt_ofs)
570{
571 struct ata_port *ap = qc->ap;
572 struct pdc_host_priv *pp = ap->host->private_data;
573 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
574
575 if (!pp->doing_hdma) {
576 __pdc20621_push_hdma(qc, seq, pkt_ofs);
577 pp->doing_hdma = 1;
578 return;
579 }
580
581 pp->hdma[idx].qc = qc;
582 pp->hdma[idx].seq = seq;
583 pp->hdma[idx].pkt_ofs = pkt_ofs;
584 pp->hdma_prod++;
585}
586
587static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
588{
589 struct ata_port *ap = qc->ap;
590 struct pdc_host_priv *pp = ap->host->private_data;
591 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
592
593 /* if nothing on queue, we're done */
594 if (pp->hdma_prod == pp->hdma_cons) {
595 pp->doing_hdma = 0;
596 return;
597 }
598
599 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
600 pp->hdma[idx].pkt_ofs);
601 pp->hdma_cons++;
602}
603
604#ifdef ATA_VERBOSE_DEBUG
605static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
606{
607 struct ata_port *ap = qc->ap;
608 unsigned int port_no = ap->port_no;
609 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
610
611 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
612 dimm_mmio += PDC_DIMM_HOST_PKT;
613
614 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
615 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
616 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
617 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
618}
619#else
620static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
621#endif /* ATA_VERBOSE_DEBUG */
622
623static void pdc20621_packet_start(struct ata_queued_cmd *qc)
624{
625 struct ata_port *ap = qc->ap;
626 struct ata_host *host = ap->host;
627 unsigned int port_no = ap->port_no;
628 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
629 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
630 u8 seq = (u8) (port_no + 1);
631 unsigned int port_ofs;
632
633 /* hard-code chip #0 */
634 mmio += PDC_CHIP0_OFS;
635
636 VPRINTK("ata%u: ENTER\n", ap->print_id);
637
638 wmb(); /* flush PRD, pkt writes */
639
640 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
641
642 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
643 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
644 seq += 4;
645
646 pdc20621_dump_hdma(qc);
647 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
648 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
649 port_ofs + PDC_DIMM_HOST_PKT,
650 port_ofs + PDC_DIMM_HOST_PKT,
651 seq);
652 } else {
653 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
654 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
655
656 writel(port_ofs + PDC_DIMM_ATA_PKT,
657 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
658 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
659 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
660 port_ofs + PDC_DIMM_ATA_PKT,
661 port_ofs + PDC_DIMM_ATA_PKT,
662 seq);
663 }
664}
665
666static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
667{
668 switch (qc->tf.protocol) {
669 case ATA_PROT_NODATA:
670 if (qc->tf.flags & ATA_TFLAG_POLLING)
671 break;
672 fallthrough;
673 case ATA_PROT_DMA:
674 pdc20621_packet_start(qc);
675 return 0;
676
677 case ATAPI_PROT_DMA:
678 BUG();
679 break;
680
681 default:
682 break;
683 }
684
685 return ata_sff_qc_issue(qc);
686}
687
688static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
689 struct ata_queued_cmd *qc,
690 unsigned int doing_hdma,
691 void __iomem *mmio)
692{
693 unsigned int port_no = ap->port_no;
694 unsigned int port_ofs =
695 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
696 u8 status;
697 unsigned int handled = 0;
698
699 VPRINTK("ENTER\n");
700
701 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
702 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
703
704 /* step two - DMA from DIMM to host */
705 if (doing_hdma) {
706 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
707 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
708 /* get drive status; clear intr; complete txn */
709 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
710 ata_qc_complete(qc);
711 pdc20621_pop_hdma(qc);
712 }
713
714 /* step one - exec ATA command */
715 else {
716 u8 seq = (u8) (port_no + 1 + 4);
717 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
718 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
719
720 /* submit hdma pkt */
721 pdc20621_dump_hdma(qc);
722 pdc20621_push_hdma(qc, seq,
723 port_ofs + PDC_DIMM_HOST_PKT);
724 }
725 handled = 1;
726
727 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
728
729 /* step one - DMA from host to DIMM */
730 if (doing_hdma) {
731 u8 seq = (u8) (port_no + 1);
732 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
733 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
734
735 /* submit ata pkt */
736 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
737 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
738 writel(port_ofs + PDC_DIMM_ATA_PKT,
739 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
740 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
741 }
742
743 /* step two - execute ATA command */
744 else {
745 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
746 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
747 /* get drive status; clear intr; complete txn */
748 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
749 ata_qc_complete(qc);
750 pdc20621_pop_hdma(qc);
751 }
752 handled = 1;
753
754 /* command completion, but no data xfer */
755 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
756
757 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
758 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
759 qc->err_mask |= ac_err_mask(status);
760 ata_qc_complete(qc);
761 handled = 1;
762
763 } else {
764 ap->stats.idle_irq++;
765 }
766
767 return handled;
768}
769
770static void pdc20621_irq_clear(struct ata_port *ap)
771{
772 ioread8(ap->ioaddr.status_addr);
773}
774
775static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
776{
777 struct ata_host *host = dev_instance;
778 struct ata_port *ap;
779 u32 mask = 0;
780 unsigned int i, tmp, port_no;
781 unsigned int handled = 0;
782 void __iomem *mmio_base;
783
784 VPRINTK("ENTER\n");
785
786 if (!host || !host->iomap[PDC_MMIO_BAR]) {
787 VPRINTK("QUICK EXIT\n");
788 return IRQ_NONE;
789 }
790
791 mmio_base = host->iomap[PDC_MMIO_BAR];
792
793 /* reading should also clear interrupts */
794 mmio_base += PDC_CHIP0_OFS;
795 mask = readl(mmio_base + PDC_20621_SEQMASK);
796 VPRINTK("mask == 0x%x\n", mask);
797
798 if (mask == 0xffffffff) {
799 VPRINTK("QUICK EXIT 2\n");
800 return IRQ_NONE;
801 }
802 mask &= 0xffff; /* only 16 tags possible */
803 if (!mask) {
804 VPRINTK("QUICK EXIT 3\n");
805 return IRQ_NONE;
806 }
807
808 spin_lock(&host->lock);
809
810 for (i = 1; i < 9; i++) {
811 port_no = i - 1;
812 if (port_no > 3)
813 port_no -= 4;
814 if (port_no >= host->n_ports)
815 ap = NULL;
816 else
817 ap = host->ports[port_no];
818 tmp = mask & (1 << i);
819 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
820 if (tmp && ap) {
821 struct ata_queued_cmd *qc;
822
823 qc = ata_qc_from_tag(ap, ap->link.active_tag);
824 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
825 handled += pdc20621_host_intr(ap, qc, (i > 4),
826 mmio_base);
827 }
828 }
829
830 spin_unlock(&host->lock);
831
832 VPRINTK("mask == 0x%x\n", mask);
833
834 VPRINTK("EXIT\n");
835
836 return IRQ_RETVAL(handled);
837}
838
839static void pdc_freeze(struct ata_port *ap)
840{
841 void __iomem *mmio = ap->ioaddr.cmd_addr;
842 u32 tmp;
843
844 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
845
846 tmp = readl(mmio + PDC_CTLSTAT);
847 tmp |= PDC_MASK_INT;
848 tmp &= ~PDC_DMA_ENABLE;
849 writel(tmp, mmio + PDC_CTLSTAT);
850 readl(mmio + PDC_CTLSTAT); /* flush */
851}
852
853static void pdc_thaw(struct ata_port *ap)
854{
855 void __iomem *mmio = ap->ioaddr.cmd_addr;
856 u32 tmp;
857
858 /* FIXME: start HDMA engine, if zero ATA engines running */
859
860 /* clear IRQ */
861 ioread8(ap->ioaddr.status_addr);
862
863 /* turn IRQ back on */
864 tmp = readl(mmio + PDC_CTLSTAT);
865 tmp &= ~PDC_MASK_INT;
866 writel(tmp, mmio + PDC_CTLSTAT);
867 readl(mmio + PDC_CTLSTAT); /* flush */
868}
869
870static void pdc_reset_port(struct ata_port *ap)
871{
872 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
873 unsigned int i;
874 u32 tmp;
875
876 /* FIXME: handle HDMA copy engine */
877
878 for (i = 11; i > 0; i--) {
879 tmp = readl(mmio);
880 if (tmp & PDC_RESET)
881 break;
882
883 udelay(100);
884
885 tmp |= PDC_RESET;
886 writel(tmp, mmio);
887 }
888
889 tmp &= ~PDC_RESET;
890 writel(tmp, mmio);
891 readl(mmio); /* flush */
892}
893
894static int pdc_softreset(struct ata_link *link, unsigned int *class,
895 unsigned long deadline)
896{
897 pdc_reset_port(link->ap);
898 return ata_sff_softreset(link, class, deadline);
899}
900
901static void pdc_error_handler(struct ata_port *ap)
902{
903 if (!(ap->pflags & ATA_PFLAG_FROZEN))
904 pdc_reset_port(ap);
905
906 ata_sff_error_handler(ap);
907}
908
909static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
910{
911 struct ata_port *ap = qc->ap;
912
913 /* make DMA engine forget about the failed command */
914 if (qc->flags & ATA_QCFLAG_FAILED)
915 pdc_reset_port(ap);
916}
917
918static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
919{
920 u8 *scsicmd = qc->scsicmd->cmnd;
921 int pio = 1; /* atapi dma off by default */
922
923 /* Whitelist commands that may use DMA. */
924 switch (scsicmd[0]) {
925 case WRITE_12:
926 case WRITE_10:
927 case WRITE_6:
928 case READ_12:
929 case READ_10:
930 case READ_6:
931 case 0xad: /* READ_DVD_STRUCTURE */
932 case 0xbe: /* READ_CD */
933 pio = 0;
934 }
935 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
936 if (scsicmd[0] == WRITE_10) {
937 unsigned int lba =
938 (scsicmd[2] << 24) |
939 (scsicmd[3] << 16) |
940 (scsicmd[4] << 8) |
941 scsicmd[5];
942 if (lba >= 0xFFFF4FA2)
943 pio = 1;
944 }
945 return pio;
946}
947
948static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
949{
950 WARN_ON(tf->protocol == ATA_PROT_DMA ||
951 tf->protocol == ATAPI_PROT_DMA);
952 ata_sff_tf_load(ap, tf);
953}
954
955
956static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
957{
958 WARN_ON(tf->protocol == ATA_PROT_DMA ||
959 tf->protocol == ATAPI_PROT_DMA);
960 ata_sff_exec_command(ap, tf);
961}
962
963
964static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
965{
966 port->cmd_addr = base;
967 port->data_addr = base;
968 port->feature_addr =
969 port->error_addr = base + 0x4;
970 port->nsect_addr = base + 0x8;
971 port->lbal_addr = base + 0xc;
972 port->lbam_addr = base + 0x10;
973 port->lbah_addr = base + 0x14;
974 port->device_addr = base + 0x18;
975 port->command_addr =
976 port->status_addr = base + 0x1c;
977 port->altstatus_addr =
978 port->ctl_addr = base + 0x38;
979}
980
981
982#ifdef ATA_VERBOSE_DEBUG
983static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
984 u32 offset, u32 size)
985{
986 u32 window_size;
987 u16 idx;
988 u8 page_mask;
989 long dist;
990 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
991 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
992
993 /* hard-code chip #0 */
994 mmio += PDC_CHIP0_OFS;
995
996 page_mask = 0x00;
997 window_size = 0x2000 * 4; /* 32K byte uchar size */
998 idx = (u16) (offset / window_size);
999
1000 writel(0x01, mmio + PDC_GENERAL_CTLR);
1001 readl(mmio + PDC_GENERAL_CTLR);
1002 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1003 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1004
1005 offset -= (idx * window_size);
1006 idx++;
1007 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1008 (long) (window_size - offset);
1009 memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
1010
1011 psource += dist;
1012 size -= dist;
1013 for (; (long) size >= (long) window_size ;) {
1014 writel(0x01, mmio + PDC_GENERAL_CTLR);
1015 readl(mmio + PDC_GENERAL_CTLR);
1016 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1017 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1018 memcpy_fromio(psource, dimm_mmio, window_size / 4);
1019 psource += window_size;
1020 size -= window_size;
1021 idx++;
1022 }
1023
1024 if (size) {
1025 writel(0x01, mmio + PDC_GENERAL_CTLR);
1026 readl(mmio + PDC_GENERAL_CTLR);
1027 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1028 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1029 memcpy_fromio(psource, dimm_mmio, size / 4);
1030 }
1031}
1032#endif
1033
1034
1035static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1036 u32 offset, u32 size)
1037{
1038 u32 window_size;
1039 u16 idx;
1040 u8 page_mask;
1041 long dist;
1042 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1043 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1044
1045 /* hard-code chip #0 */
1046 mmio += PDC_CHIP0_OFS;
1047
1048 page_mask = 0x00;
1049 window_size = 0x2000 * 4; /* 32K byte uchar size */
1050 idx = (u16) (offset / window_size);
1051
1052 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1053 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1054 offset -= (idx * window_size);
1055 idx++;
1056 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1057 (long) (window_size - offset);
1058 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1059 writel(0x01, mmio + PDC_GENERAL_CTLR);
1060 readl(mmio + PDC_GENERAL_CTLR);
1061
1062 psource += dist;
1063 size -= dist;
1064 for (; (long) size >= (long) window_size ;) {
1065 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1066 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1067 memcpy_toio(dimm_mmio, psource, window_size / 4);
1068 writel(0x01, mmio + PDC_GENERAL_CTLR);
1069 readl(mmio + PDC_GENERAL_CTLR);
1070 psource += window_size;
1071 size -= window_size;
1072 idx++;
1073 }
1074
1075 if (size) {
1076 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1077 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1078 memcpy_toio(dimm_mmio, psource, size / 4);
1079 writel(0x01, mmio + PDC_GENERAL_CTLR);
1080 readl(mmio + PDC_GENERAL_CTLR);
1081 }
1082}
1083
1084
1085static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1086 u32 subaddr, u32 *pdata)
1087{
1088 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1089 u32 i2creg = 0;
1090 u32 status;
1091 u32 count = 0;
1092
1093 /* hard-code chip #0 */
1094 mmio += PDC_CHIP0_OFS;
1095
1096 i2creg |= device << 24;
1097 i2creg |= subaddr << 16;
1098
1099 /* Set the device and subaddress */
1100 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1101 readl(mmio + PDC_I2C_ADDR_DATA);
1102
1103 /* Write Control to perform read operation, mask int */
1104 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1105 mmio + PDC_I2C_CONTROL);
1106
1107 for (count = 0; count <= 1000; count ++) {
1108 status = readl(mmio + PDC_I2C_CONTROL);
1109 if (status & PDC_I2C_COMPLETE) {
1110 status = readl(mmio + PDC_I2C_ADDR_DATA);
1111 break;
1112 } else if (count == 1000)
1113 return 0;
1114 }
1115
1116 *pdata = (status >> 8) & 0x000000ff;
1117 return 1;
1118}
1119
1120
1121static int pdc20621_detect_dimm(struct ata_host *host)
1122{
1123 u32 data = 0;
1124 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1125 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1126 if (data == 100)
1127 return 100;
1128 } else
1129 return 0;
1130
1131 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1132 if (data <= 0x75)
1133 return 133;
1134 } else
1135 return 0;
1136
1137 return 0;
1138}
1139
1140
1141static int pdc20621_prog_dimm0(struct ata_host *host)
1142{
1143 u32 spd0[50];
1144 u32 data = 0;
1145 int size, i;
1146 u8 bdimmsize;
1147 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1148 static const struct {
1149 unsigned int reg;
1150 unsigned int ofs;
1151 } pdc_i2c_read_data [] = {
1152 { PDC_DIMM_SPD_TYPE, 11 },
1153 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1154 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1155 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1156 { PDC_DIMM_SPD_ROW_NUM, 3 },
1157 { PDC_DIMM_SPD_BANK_NUM, 17 },
1158 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1159 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1160 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1161 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1162 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1163 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1164 };
1165
1166 /* hard-code chip #0 */
1167 mmio += PDC_CHIP0_OFS;
1168
1169 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1170 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1171 pdc_i2c_read_data[i].reg,
1172 &spd0[pdc_i2c_read_data[i].ofs]);
1173
1174 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1175 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1176 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1177 data |= (((((spd0[29] > spd0[28])
1178 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1179 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1180
1181 if (spd0[18] & 0x08)
1182 data |= ((0x03) << 14);
1183 else if (spd0[18] & 0x04)
1184 data |= ((0x02) << 14);
1185 else if (spd0[18] & 0x01)
1186 data |= ((0x01) << 14);
1187 else
1188 data |= (0 << 14);
1189
1190 /*
1191 Calculate the size of bDIMMSize (power of 2) and
1192 merge the DIMM size by program start/end address.
1193 */
1194
1195 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1196 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1197 data |= (((size / 16) - 1) << 16);
1198 data |= (0 << 23);
1199 data |= 8;
1200 writel(data, mmio + PDC_DIMM0_CONTROL);
1201 readl(mmio + PDC_DIMM0_CONTROL);
1202 return size;
1203}
1204
1205
1206static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1207{
1208 u32 data, spd0;
1209 int error, i;
1210 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1211
1212 /* hard-code chip #0 */
1213 mmio += PDC_CHIP0_OFS;
1214
1215 /*
1216 Set To Default : DIMM Module Global Control Register (0x022259F1)
1217 DIMM Arbitration Disable (bit 20)
1218 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1219 Refresh Enable (bit 17)
1220 */
1221
1222 data = 0x022259F1;
1223 writel(data, mmio + PDC_SDRAM_CONTROL);
1224 readl(mmio + PDC_SDRAM_CONTROL);
1225
1226 /* Turn on for ECC */
1227 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1228 PDC_DIMM_SPD_TYPE, &spd0)) {
1229 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1230 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1231 return 1;
1232 }
1233 if (spd0 == 0x02) {
1234 data |= (0x01 << 16);
1235 writel(data, mmio + PDC_SDRAM_CONTROL);
1236 readl(mmio + PDC_SDRAM_CONTROL);
1237 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1238 }
1239
1240 /* DIMM Initialization Select/Enable (bit 18/19) */
1241 data &= (~(1<<18));
1242 data |= (1<<19);
1243 writel(data, mmio + PDC_SDRAM_CONTROL);
1244
1245 error = 1;
1246 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1247 data = readl(mmio + PDC_SDRAM_CONTROL);
1248 if (!(data & (1<<19))) {
1249 error = 0;
1250 break;
1251 }
1252 msleep(i*100);
1253 }
1254 return error;
1255}
1256
1257
1258static unsigned int pdc20621_dimm_init(struct ata_host *host)
1259{
1260 int speed, size, length;
1261 u32 addr, spd0, pci_status;
1262 u32 time_period = 0;
1263 u32 tcount = 0;
1264 u32 ticks = 0;
1265 u32 clock = 0;
1266 u32 fparam = 0;
1267 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1268
1269 /* hard-code chip #0 */
1270 mmio += PDC_CHIP0_OFS;
1271
1272 /* Initialize PLL based upon PCI Bus Frequency */
1273
1274 /* Initialize Time Period Register */
1275 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1276 time_period = readl(mmio + PDC_TIME_PERIOD);
1277 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1278
1279 /* Enable timer */
1280 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1281 readl(mmio + PDC_TIME_CONTROL);
1282
1283 /* Wait 3 seconds */
1284 msleep(3000);
1285
1286 /*
1287 When timer is enabled, counter is decreased every internal
1288 clock cycle.
1289 */
1290
1291 tcount = readl(mmio + PDC_TIME_COUNTER);
1292 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1293
1294 /*
1295 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1296 register should be >= (0xffffffff - 3x10^8).
1297 */
1298 if (tcount >= PCI_X_TCOUNT) {
1299 ticks = (time_period - tcount);
1300 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1301
1302 clock = (ticks / 300000);
1303 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1304
1305 clock = (clock * 33);
1306 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1307
1308 /* PLL F Param (bit 22:16) */
1309 fparam = (1400000 / clock) - 2;
1310 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1311
1312 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1313 pci_status = (0x8a001824 | (fparam << 16));
1314 } else
1315 pci_status = PCI_PLL_INIT;
1316
1317 /* Initialize PLL. */
1318 VPRINTK("pci_status: 0x%x\n", pci_status);
1319 writel(pci_status, mmio + PDC_CTL_STATUS);
1320 readl(mmio + PDC_CTL_STATUS);
1321
1322 /*
1323 Read SPD of DIMM by I2C interface,
1324 and program the DIMM Module Controller.
1325 */
1326 if (!(speed = pdc20621_detect_dimm(host))) {
1327 printk(KERN_ERR "Detect Local DIMM Fail\n");
1328 return 1; /* DIMM error */
1329 }
1330 VPRINTK("Local DIMM Speed = %d\n", speed);
1331
1332 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1333 size = pdc20621_prog_dimm0(host);
1334 VPRINTK("Local DIMM Size = %dMB\n", size);
1335
1336 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1337 if (pdc20621_prog_dimm_global(host)) {
1338 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1339 return 1;
1340 }
1341
1342#ifdef ATA_VERBOSE_DEBUG
1343 {
1344 u8 test_parttern1[40] =
1345 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1346 'N','o','t',' ','Y','e','t',' ',
1347 'D','e','f','i','n','e','d',' ',
1348 '1','.','1','0',
1349 '9','8','0','3','1','6','1','2',0,0};
1350 u8 test_parttern2[40] = {0};
1351
1352 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1353 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1354
1355 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1356 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1357 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1358 test_parttern2[1], &(test_parttern2[2]));
1359 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1360 40);
1361 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1362 test_parttern2[1], &(test_parttern2[2]));
1363
1364 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1365 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1366 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1367 test_parttern2[1], &(test_parttern2[2]));
1368 }
1369#endif
1370
1371 /* ECC initiliazation. */
1372
1373 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1374 PDC_DIMM_SPD_TYPE, &spd0)) {
1375 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1376 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1377 return 1;
1378 }
1379 if (spd0 == 0x02) {
1380 void *buf;
1381 VPRINTK("Start ECC initialization\n");
1382 addr = 0;
1383 length = size * 1024 * 1024;
1384 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1385 if (!buf)
1386 return 1;
1387 while (addr < length) {
1388 pdc20621_put_to_dimm(host, buf, addr,
1389 ECC_ERASE_BUF_SZ);
1390 addr += ECC_ERASE_BUF_SZ;
1391 }
1392 kfree(buf);
1393 VPRINTK("Finish ECC initialization\n");
1394 }
1395 return 0;
1396}
1397
1398
1399static void pdc_20621_init(struct ata_host *host)
1400{
1401 u32 tmp;
1402 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1403
1404 /* hard-code chip #0 */
1405 mmio += PDC_CHIP0_OFS;
1406
1407 /*
1408 * Select page 0x40 for our 32k DIMM window
1409 */
1410 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1411 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1412 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1413
1414 /*
1415 * Reset Host DMA
1416 */
1417 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1418 tmp |= PDC_RESET;
1419 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1420 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1421
1422 udelay(10);
1423
1424 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1425 tmp &= ~PDC_RESET;
1426 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1427 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1428}
1429
1430static int pdc_sata_init_one(struct pci_dev *pdev,
1431 const struct pci_device_id *ent)
1432{
1433 const struct ata_port_info *ppi[] =
1434 { &pdc_port_info[ent->driver_data], NULL };
1435 struct ata_host *host;
1436 struct pdc_host_priv *hpriv;
1437 int i, rc;
1438
1439 ata_print_version_once(&pdev->dev, DRV_VERSION);
1440
1441 /* allocate host */
1442 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1443 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1444 if (!host || !hpriv)
1445 return -ENOMEM;
1446
1447 host->private_data = hpriv;
1448
1449 /* acquire resources and fill host */
1450 rc = pcim_enable_device(pdev);
1451 if (rc)
1452 return rc;
1453
1454 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1455 DRV_NAME);
1456 if (rc == -EBUSY)
1457 pcim_pin_device(pdev);
1458 if (rc)
1459 return rc;
1460 host->iomap = pcim_iomap_table(pdev);
1461
1462 for (i = 0; i < 4; i++) {
1463 struct ata_port *ap = host->ports[i];
1464 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1465 unsigned int offset = 0x200 + i * 0x80;
1466
1467 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1468
1469 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1470 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1471 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1472 }
1473
1474 /* configure and activate */
1475 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
1476 if (rc)
1477 return rc;
1478
1479 if (pdc20621_dimm_init(host))
1480 return -ENOMEM;
1481 pdc_20621_init(host);
1482
1483 pci_set_master(pdev);
1484 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1485 IRQF_SHARED, &pdc_sata_sht);
1486}
1487
1488module_pci_driver(pdc_sata_pci_driver);
1489
1490MODULE_AUTHOR("Jeff Garzik");
1491MODULE_DESCRIPTION("Promise SATA low-level driver");
1492MODULE_LICENSE("GPL");
1493MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1494MODULE_VERSION(DRV_VERSION);
1/*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33/*
34 Theory of operation
35 -------------------
36
37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38 engine, DIMM memory, and four ATA engines (one per SATA port).
39 Data is copied to/from DIMM memory by the HDMA engine, before
40 handing off to one (or more) of the ATA engines. The ATA
41 engines operate solely on DIMM memory.
42
43 The SX4 behaves like a PATA chip, with no SATA controls or
44 knowledge whatsoever, leading to the presumption that
45 PATA<->SATA bridges exist on SX4 boards, external to the
46 PDC20621 chip itself.
47
48 The chip is quite capable, supporting an XOR engine and linked
49 hardware commands (permits a string to transactions to be
50 submitted and waited-on as a single unit), and an optional
51 microprocessor.
52
53 The limiting factor is largely software. This Linux driver was
54 written to multiplex the single HDMA engine to copy disk
55 transactions into a fixed DIMM memory space, from where an ATA
56 engine takes over. As a result, each WRITE looks like this:
57
58 submit HDMA packet to hardware
59 hardware copies data from system memory to DIMM
60 hardware raises interrupt
61
62 submit ATA packet to hardware
63 hardware executes ATA WRITE command, w/ data in DIMM
64 hardware raises interrupt
65
66 and each READ looks like this:
67
68 submit ATA packet to hardware
69 hardware executes ATA READ command, w/ data in DIMM
70 hardware raises interrupt
71
72 submit HDMA packet to hardware
73 hardware copies data from DIMM to system memory
74 hardware raises interrupt
75
76 This is a very slow, lock-step way of doing things that can
77 certainly be improved by motivated kernel hackers.
78
79 */
80
81#include <linux/kernel.h>
82#include <linux/module.h>
83#include <linux/pci.h>
84#include <linux/slab.h>
85#include <linux/init.h>
86#include <linux/blkdev.h>
87#include <linux/delay.h>
88#include <linux/interrupt.h>
89#include <linux/device.h>
90#include <scsi/scsi_host.h>
91#include <scsi/scsi_cmnd.h>
92#include <linux/libata.h>
93#include "sata_promise.h"
94
95#define DRV_NAME "sata_sx4"
96#define DRV_VERSION "0.12"
97
98
99enum {
100 PDC_MMIO_BAR = 3,
101 PDC_DIMM_BAR = 4,
102
103 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
104
105 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
106 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
107 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
108 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
109
110 PDC_CTLSTAT = 0x60, /* IDEn control / status */
111
112 PDC_20621_SEQCTL = 0x400,
113 PDC_20621_SEQMASK = 0x480,
114 PDC_20621_GENERAL_CTL = 0x484,
115 PDC_20621_PAGE_SIZE = (32 * 1024),
116
117 /* chosen, not constant, values; we design our own DIMM mem map */
118 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
119 PDC_20621_DIMM_BASE = 0x00200000,
120 PDC_20621_DIMM_DATA = (64 * 1024),
121 PDC_DIMM_DATA_STEP = (256 * 1024),
122 PDC_DIMM_WINDOW_STEP = (8 * 1024),
123 PDC_DIMM_HOST_PRD = (6 * 1024),
124 PDC_DIMM_HOST_PKT = (128 * 0),
125 PDC_DIMM_HPKT_PRD = (128 * 1),
126 PDC_DIMM_ATA_PKT = (128 * 2),
127 PDC_DIMM_APKT_PRD = (128 * 3),
128 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
129 PDC_PAGE_WINDOW = 0x40,
130 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
131 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
132 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
133
134 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
135
136 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
137 (1<<23),
138
139 board_20621 = 0, /* FastTrak S150 SX4 */
140
141 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
142 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
143 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
144
145 PDC_MAX_HDMA = 32,
146 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
147
148 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
149 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
150 PDC_I2C_CONTROL = 0x48,
151 PDC_I2C_ADDR_DATA = 0x4C,
152 PDC_DIMM0_CONTROL = 0x80,
153 PDC_DIMM1_CONTROL = 0x84,
154 PDC_SDRAM_CONTROL = 0x88,
155 PDC_I2C_WRITE = 0, /* master -> slave */
156 PDC_I2C_READ = (1 << 6), /* master <- slave */
157 PDC_I2C_START = (1 << 7), /* start I2C proto */
158 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
159 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
160 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
161 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
162 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
163 PDC_DIMM_SPD_ROW_NUM = 3,
164 PDC_DIMM_SPD_COLUMN_NUM = 4,
165 PDC_DIMM_SPD_MODULE_ROW = 5,
166 PDC_DIMM_SPD_TYPE = 11,
167 PDC_DIMM_SPD_FRESH_RATE = 12,
168 PDC_DIMM_SPD_BANK_NUM = 17,
169 PDC_DIMM_SPD_CAS_LATENCY = 18,
170 PDC_DIMM_SPD_ATTRIBUTE = 21,
171 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
172 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
173 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
174 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
175 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
176 PDC_CTL_STATUS = 0x08,
177 PDC_DIMM_WINDOW_CTLR = 0x0C,
178 PDC_TIME_CONTROL = 0x3C,
179 PDC_TIME_PERIOD = 0x40,
180 PDC_TIME_COUNTER = 0x44,
181 PDC_GENERAL_CTLR = 0x484,
182 PCI_PLL_INIT = 0x8A531824,
183 PCI_X_TCOUNT = 0xEE1E5CFF,
184
185 /* PDC_TIME_CONTROL bits */
186 PDC_TIMER_BUZZER = (1 << 10),
187 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
188 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
189 PDC_TIMER_ENABLE = (1 << 7),
190 PDC_TIMER_MASK_INT = (1 << 5),
191 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
192 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
193 PDC_TIMER_ENABLE |
194 PDC_TIMER_MASK_INT,
195};
196
197#define ECC_ERASE_BUF_SZ (128 * 1024)
198
199struct pdc_port_priv {
200 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
201 u8 *pkt;
202 dma_addr_t pkt_dma;
203};
204
205struct pdc_host_priv {
206 unsigned int doing_hdma;
207 unsigned int hdma_prod;
208 unsigned int hdma_cons;
209 struct {
210 struct ata_queued_cmd *qc;
211 unsigned int seq;
212 unsigned long pkt_ofs;
213 } hdma[32];
214};
215
216
217static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
218static void pdc_error_handler(struct ata_port *ap);
219static void pdc_freeze(struct ata_port *ap);
220static void pdc_thaw(struct ata_port *ap);
221static int pdc_port_start(struct ata_port *ap);
222static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
223static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
224static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
225static unsigned int pdc20621_dimm_init(struct ata_host *host);
226static int pdc20621_detect_dimm(struct ata_host *host);
227static unsigned int pdc20621_i2c_read(struct ata_host *host,
228 u32 device, u32 subaddr, u32 *pdata);
229static int pdc20621_prog_dimm0(struct ata_host *host);
230static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
231#ifdef ATA_VERBOSE_DEBUG
232static void pdc20621_get_from_dimm(struct ata_host *host,
233 void *psource, u32 offset, u32 size);
234#endif
235static void pdc20621_put_to_dimm(struct ata_host *host,
236 void *psource, u32 offset, u32 size);
237static void pdc20621_irq_clear(struct ata_port *ap);
238static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
239static int pdc_softreset(struct ata_link *link, unsigned int *class,
240 unsigned long deadline);
241static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
242static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
243
244
245static struct scsi_host_template pdc_sata_sht = {
246 ATA_BASE_SHT(DRV_NAME),
247 .sg_tablesize = LIBATA_MAX_PRD,
248 .dma_boundary = ATA_DMA_BOUNDARY,
249};
250
251/* TODO: inherit from base port_ops after converting to new EH */
252static struct ata_port_operations pdc_20621_ops = {
253 .inherits = &ata_sff_port_ops,
254
255 .check_atapi_dma = pdc_check_atapi_dma,
256 .qc_prep = pdc20621_qc_prep,
257 .qc_issue = pdc20621_qc_issue,
258
259 .freeze = pdc_freeze,
260 .thaw = pdc_thaw,
261 .softreset = pdc_softreset,
262 .error_handler = pdc_error_handler,
263 .lost_interrupt = ATA_OP_NULL,
264 .post_internal_cmd = pdc_post_internal_cmd,
265
266 .port_start = pdc_port_start,
267
268 .sff_tf_load = pdc_tf_load_mmio,
269 .sff_exec_command = pdc_exec_command_mmio,
270 .sff_irq_clear = pdc20621_irq_clear,
271};
272
273static const struct ata_port_info pdc_port_info[] = {
274 /* board_20621 */
275 {
276 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
277 ATA_FLAG_PIO_POLLING,
278 .pio_mask = ATA_PIO4,
279 .mwdma_mask = ATA_MWDMA2,
280 .udma_mask = ATA_UDMA6,
281 .port_ops = &pdc_20621_ops,
282 },
283
284};
285
286static const struct pci_device_id pdc_sata_pci_tbl[] = {
287 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
288
289 { } /* terminate list */
290};
291
292static struct pci_driver pdc_sata_pci_driver = {
293 .name = DRV_NAME,
294 .id_table = pdc_sata_pci_tbl,
295 .probe = pdc_sata_init_one,
296 .remove = ata_pci_remove_one,
297};
298
299
300static int pdc_port_start(struct ata_port *ap)
301{
302 struct device *dev = ap->host->dev;
303 struct pdc_port_priv *pp;
304
305 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
306 if (!pp)
307 return -ENOMEM;
308
309 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
310 if (!pp->pkt)
311 return -ENOMEM;
312
313 ap->private_data = pp;
314
315 return 0;
316}
317
318static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
319 unsigned int portno,
320 unsigned int total_len)
321{
322 u32 addr;
323 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
324 __le32 *buf32 = (__le32 *) buf;
325
326 /* output ATA packet S/G table */
327 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
328 (PDC_DIMM_DATA_STEP * portno);
329 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
330 buf32[dw] = cpu_to_le32(addr);
331 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
332
333 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
334 PDC_20621_DIMM_BASE +
335 (PDC_DIMM_WINDOW_STEP * portno) +
336 PDC_DIMM_APKT_PRD,
337 buf32[dw], buf32[dw + 1]);
338}
339
340static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
341 unsigned int portno,
342 unsigned int total_len)
343{
344 u32 addr;
345 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
346 __le32 *buf32 = (__le32 *) buf;
347
348 /* output Host DMA packet S/G table */
349 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
350 (PDC_DIMM_DATA_STEP * portno);
351
352 buf32[dw] = cpu_to_le32(addr);
353 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
354
355 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
356 PDC_20621_DIMM_BASE +
357 (PDC_DIMM_WINDOW_STEP * portno) +
358 PDC_DIMM_HPKT_PRD,
359 buf32[dw], buf32[dw + 1]);
360}
361
362static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
363 unsigned int devno, u8 *buf,
364 unsigned int portno)
365{
366 unsigned int i, dw;
367 __le32 *buf32 = (__le32 *) buf;
368 u8 dev_reg;
369
370 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
371 (PDC_DIMM_WINDOW_STEP * portno) +
372 PDC_DIMM_APKT_PRD;
373 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
374
375 i = PDC_DIMM_ATA_PKT;
376
377 /*
378 * Set up ATA packet
379 */
380 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
381 buf[i++] = PDC_PKT_READ;
382 else if (tf->protocol == ATA_PROT_NODATA)
383 buf[i++] = PDC_PKT_NODATA;
384 else
385 buf[i++] = 0;
386 buf[i++] = 0; /* reserved */
387 buf[i++] = portno + 1; /* seq. id */
388 buf[i++] = 0xff; /* delay seq. id */
389
390 /* dimm dma S/G, and next-pkt */
391 dw = i >> 2;
392 if (tf->protocol == ATA_PROT_NODATA)
393 buf32[dw] = 0;
394 else
395 buf32[dw] = cpu_to_le32(dimm_sg);
396 buf32[dw + 1] = 0;
397 i += 8;
398
399 if (devno == 0)
400 dev_reg = ATA_DEVICE_OBS;
401 else
402 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
403
404 /* select device */
405 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
406 buf[i++] = dev_reg;
407
408 /* device control register */
409 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
410 buf[i++] = tf->ctl;
411
412 return i;
413}
414
415static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
416 unsigned int portno)
417{
418 unsigned int dw;
419 u32 tmp;
420 __le32 *buf32 = (__le32 *) buf;
421
422 unsigned int host_sg = PDC_20621_DIMM_BASE +
423 (PDC_DIMM_WINDOW_STEP * portno) +
424 PDC_DIMM_HOST_PRD;
425 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
426 (PDC_DIMM_WINDOW_STEP * portno) +
427 PDC_DIMM_HPKT_PRD;
428 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
429 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
430
431 dw = PDC_DIMM_HOST_PKT >> 2;
432
433 /*
434 * Set up Host DMA packet
435 */
436 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
437 tmp = PDC_PKT_READ;
438 else
439 tmp = 0;
440 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
441 tmp |= (0xff << 24); /* delay seq. id */
442 buf32[dw + 0] = cpu_to_le32(tmp);
443 buf32[dw + 1] = cpu_to_le32(host_sg);
444 buf32[dw + 2] = cpu_to_le32(dimm_sg);
445 buf32[dw + 3] = 0;
446
447 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
448 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
449 PDC_DIMM_HOST_PKT,
450 buf32[dw + 0],
451 buf32[dw + 1],
452 buf32[dw + 2],
453 buf32[dw + 3]);
454}
455
456static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
457{
458 struct scatterlist *sg;
459 struct ata_port *ap = qc->ap;
460 struct pdc_port_priv *pp = ap->private_data;
461 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
462 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
463 unsigned int portno = ap->port_no;
464 unsigned int i, si, idx, total_len = 0, sgt_len;
465 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
466
467 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
468
469 VPRINTK("ata%u: ENTER\n", ap->print_id);
470
471 /* hard-code chip #0 */
472 mmio += PDC_CHIP0_OFS;
473
474 /*
475 * Build S/G table
476 */
477 idx = 0;
478 for_each_sg(qc->sg, sg, qc->n_elem, si) {
479 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
480 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
481 total_len += sg_dma_len(sg);
482 }
483 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
484 sgt_len = idx * 4;
485
486 /*
487 * Build ATA, host DMA packets
488 */
489 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
490 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
491
492 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
493 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
494
495 if (qc->tf.flags & ATA_TFLAG_LBA48)
496 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
497 else
498 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
499
500 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
501
502 /* copy three S/G tables and two packets to DIMM MMIO window */
503 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
504 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
505 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
506 PDC_DIMM_HOST_PRD,
507 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
508
509 /* force host FIFO dump */
510 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
511
512 readl(dimm_mmio); /* MMIO PCI posting flush */
513
514 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
515}
516
517static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
518{
519 struct ata_port *ap = qc->ap;
520 struct pdc_port_priv *pp = ap->private_data;
521 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
522 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
523 unsigned int portno = ap->port_no;
524 unsigned int i;
525
526 VPRINTK("ata%u: ENTER\n", ap->print_id);
527
528 /* hard-code chip #0 */
529 mmio += PDC_CHIP0_OFS;
530
531 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
532
533 if (qc->tf.flags & ATA_TFLAG_LBA48)
534 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
535 else
536 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
537
538 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
539
540 /* copy three S/G tables and two packets to DIMM MMIO window */
541 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
542 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
543
544 /* force host FIFO dump */
545 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
546
547 readl(dimm_mmio); /* MMIO PCI posting flush */
548
549 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
550}
551
552static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
553{
554 switch (qc->tf.protocol) {
555 case ATA_PROT_DMA:
556 pdc20621_dma_prep(qc);
557 break;
558 case ATA_PROT_NODATA:
559 pdc20621_nodata_prep(qc);
560 break;
561 default:
562 break;
563 }
564}
565
566static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
567 unsigned int seq,
568 u32 pkt_ofs)
569{
570 struct ata_port *ap = qc->ap;
571 struct ata_host *host = ap->host;
572 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
573
574 /* hard-code chip #0 */
575 mmio += PDC_CHIP0_OFS;
576
577 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
578 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
579
580 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
581 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
582}
583
584static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
585 unsigned int seq,
586 u32 pkt_ofs)
587{
588 struct ata_port *ap = qc->ap;
589 struct pdc_host_priv *pp = ap->host->private_data;
590 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
591
592 if (!pp->doing_hdma) {
593 __pdc20621_push_hdma(qc, seq, pkt_ofs);
594 pp->doing_hdma = 1;
595 return;
596 }
597
598 pp->hdma[idx].qc = qc;
599 pp->hdma[idx].seq = seq;
600 pp->hdma[idx].pkt_ofs = pkt_ofs;
601 pp->hdma_prod++;
602}
603
604static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
605{
606 struct ata_port *ap = qc->ap;
607 struct pdc_host_priv *pp = ap->host->private_data;
608 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
609
610 /* if nothing on queue, we're done */
611 if (pp->hdma_prod == pp->hdma_cons) {
612 pp->doing_hdma = 0;
613 return;
614 }
615
616 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
617 pp->hdma[idx].pkt_ofs);
618 pp->hdma_cons++;
619}
620
621#ifdef ATA_VERBOSE_DEBUG
622static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
623{
624 struct ata_port *ap = qc->ap;
625 unsigned int port_no = ap->port_no;
626 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
627
628 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
629 dimm_mmio += PDC_DIMM_HOST_PKT;
630
631 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
632 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
633 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
634 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
635}
636#else
637static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
638#endif /* ATA_VERBOSE_DEBUG */
639
640static void pdc20621_packet_start(struct ata_queued_cmd *qc)
641{
642 struct ata_port *ap = qc->ap;
643 struct ata_host *host = ap->host;
644 unsigned int port_no = ap->port_no;
645 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
646 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
647 u8 seq = (u8) (port_no + 1);
648 unsigned int port_ofs;
649
650 /* hard-code chip #0 */
651 mmio += PDC_CHIP0_OFS;
652
653 VPRINTK("ata%u: ENTER\n", ap->print_id);
654
655 wmb(); /* flush PRD, pkt writes */
656
657 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
658
659 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
660 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
661 seq += 4;
662
663 pdc20621_dump_hdma(qc);
664 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
665 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
666 port_ofs + PDC_DIMM_HOST_PKT,
667 port_ofs + PDC_DIMM_HOST_PKT,
668 seq);
669 } else {
670 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
671 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
672
673 writel(port_ofs + PDC_DIMM_ATA_PKT,
674 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
675 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
676 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
677 port_ofs + PDC_DIMM_ATA_PKT,
678 port_ofs + PDC_DIMM_ATA_PKT,
679 seq);
680 }
681}
682
683static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
684{
685 switch (qc->tf.protocol) {
686 case ATA_PROT_NODATA:
687 if (qc->tf.flags & ATA_TFLAG_POLLING)
688 break;
689 /*FALLTHROUGH*/
690 case ATA_PROT_DMA:
691 pdc20621_packet_start(qc);
692 return 0;
693
694 case ATAPI_PROT_DMA:
695 BUG();
696 break;
697
698 default:
699 break;
700 }
701
702 return ata_sff_qc_issue(qc);
703}
704
705static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
706 struct ata_queued_cmd *qc,
707 unsigned int doing_hdma,
708 void __iomem *mmio)
709{
710 unsigned int port_no = ap->port_no;
711 unsigned int port_ofs =
712 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
713 u8 status;
714 unsigned int handled = 0;
715
716 VPRINTK("ENTER\n");
717
718 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
719 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
720
721 /* step two - DMA from DIMM to host */
722 if (doing_hdma) {
723 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
724 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
725 /* get drive status; clear intr; complete txn */
726 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
727 ata_qc_complete(qc);
728 pdc20621_pop_hdma(qc);
729 }
730
731 /* step one - exec ATA command */
732 else {
733 u8 seq = (u8) (port_no + 1 + 4);
734 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
735 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
736
737 /* submit hdma pkt */
738 pdc20621_dump_hdma(qc);
739 pdc20621_push_hdma(qc, seq,
740 port_ofs + PDC_DIMM_HOST_PKT);
741 }
742 handled = 1;
743
744 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
745
746 /* step one - DMA from host to DIMM */
747 if (doing_hdma) {
748 u8 seq = (u8) (port_no + 1);
749 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
750 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
751
752 /* submit ata pkt */
753 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
754 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
755 writel(port_ofs + PDC_DIMM_ATA_PKT,
756 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
757 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
758 }
759
760 /* step two - execute ATA command */
761 else {
762 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
763 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
764 /* get drive status; clear intr; complete txn */
765 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
766 ata_qc_complete(qc);
767 pdc20621_pop_hdma(qc);
768 }
769 handled = 1;
770
771 /* command completion, but no data xfer */
772 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
773
774 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
775 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
776 qc->err_mask |= ac_err_mask(status);
777 ata_qc_complete(qc);
778 handled = 1;
779
780 } else {
781 ap->stats.idle_irq++;
782 }
783
784 return handled;
785}
786
787static void pdc20621_irq_clear(struct ata_port *ap)
788{
789 ioread8(ap->ioaddr.status_addr);
790}
791
792static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
793{
794 struct ata_host *host = dev_instance;
795 struct ata_port *ap;
796 u32 mask = 0;
797 unsigned int i, tmp, port_no;
798 unsigned int handled = 0;
799 void __iomem *mmio_base;
800
801 VPRINTK("ENTER\n");
802
803 if (!host || !host->iomap[PDC_MMIO_BAR]) {
804 VPRINTK("QUICK EXIT\n");
805 return IRQ_NONE;
806 }
807
808 mmio_base = host->iomap[PDC_MMIO_BAR];
809
810 /* reading should also clear interrupts */
811 mmio_base += PDC_CHIP0_OFS;
812 mask = readl(mmio_base + PDC_20621_SEQMASK);
813 VPRINTK("mask == 0x%x\n", mask);
814
815 if (mask == 0xffffffff) {
816 VPRINTK("QUICK EXIT 2\n");
817 return IRQ_NONE;
818 }
819 mask &= 0xffff; /* only 16 tags possible */
820 if (!mask) {
821 VPRINTK("QUICK EXIT 3\n");
822 return IRQ_NONE;
823 }
824
825 spin_lock(&host->lock);
826
827 for (i = 1; i < 9; i++) {
828 port_no = i - 1;
829 if (port_no > 3)
830 port_no -= 4;
831 if (port_no >= host->n_ports)
832 ap = NULL;
833 else
834 ap = host->ports[port_no];
835 tmp = mask & (1 << i);
836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
837 if (tmp && ap) {
838 struct ata_queued_cmd *qc;
839
840 qc = ata_qc_from_tag(ap, ap->link.active_tag);
841 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
842 handled += pdc20621_host_intr(ap, qc, (i > 4),
843 mmio_base);
844 }
845 }
846
847 spin_unlock(&host->lock);
848
849 VPRINTK("mask == 0x%x\n", mask);
850
851 VPRINTK("EXIT\n");
852
853 return IRQ_RETVAL(handled);
854}
855
856static void pdc_freeze(struct ata_port *ap)
857{
858 void __iomem *mmio = ap->ioaddr.cmd_addr;
859 u32 tmp;
860
861 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
862
863 tmp = readl(mmio + PDC_CTLSTAT);
864 tmp |= PDC_MASK_INT;
865 tmp &= ~PDC_DMA_ENABLE;
866 writel(tmp, mmio + PDC_CTLSTAT);
867 readl(mmio + PDC_CTLSTAT); /* flush */
868}
869
870static void pdc_thaw(struct ata_port *ap)
871{
872 void __iomem *mmio = ap->ioaddr.cmd_addr;
873 u32 tmp;
874
875 /* FIXME: start HDMA engine, if zero ATA engines running */
876
877 /* clear IRQ */
878 ioread8(ap->ioaddr.status_addr);
879
880 /* turn IRQ back on */
881 tmp = readl(mmio + PDC_CTLSTAT);
882 tmp &= ~PDC_MASK_INT;
883 writel(tmp, mmio + PDC_CTLSTAT);
884 readl(mmio + PDC_CTLSTAT); /* flush */
885}
886
887static void pdc_reset_port(struct ata_port *ap)
888{
889 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
890 unsigned int i;
891 u32 tmp;
892
893 /* FIXME: handle HDMA copy engine */
894
895 for (i = 11; i > 0; i--) {
896 tmp = readl(mmio);
897 if (tmp & PDC_RESET)
898 break;
899
900 udelay(100);
901
902 tmp |= PDC_RESET;
903 writel(tmp, mmio);
904 }
905
906 tmp &= ~PDC_RESET;
907 writel(tmp, mmio);
908 readl(mmio); /* flush */
909}
910
911static int pdc_softreset(struct ata_link *link, unsigned int *class,
912 unsigned long deadline)
913{
914 pdc_reset_port(link->ap);
915 return ata_sff_softreset(link, class, deadline);
916}
917
918static void pdc_error_handler(struct ata_port *ap)
919{
920 if (!(ap->pflags & ATA_PFLAG_FROZEN))
921 pdc_reset_port(ap);
922
923 ata_sff_error_handler(ap);
924}
925
926static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
927{
928 struct ata_port *ap = qc->ap;
929
930 /* make DMA engine forget about the failed command */
931 if (qc->flags & ATA_QCFLAG_FAILED)
932 pdc_reset_port(ap);
933}
934
935static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
936{
937 u8 *scsicmd = qc->scsicmd->cmnd;
938 int pio = 1; /* atapi dma off by default */
939
940 /* Whitelist commands that may use DMA. */
941 switch (scsicmd[0]) {
942 case WRITE_12:
943 case WRITE_10:
944 case WRITE_6:
945 case READ_12:
946 case READ_10:
947 case READ_6:
948 case 0xad: /* READ_DVD_STRUCTURE */
949 case 0xbe: /* READ_CD */
950 pio = 0;
951 }
952 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
953 if (scsicmd[0] == WRITE_10) {
954 unsigned int lba =
955 (scsicmd[2] << 24) |
956 (scsicmd[3] << 16) |
957 (scsicmd[4] << 8) |
958 scsicmd[5];
959 if (lba >= 0xFFFF4FA2)
960 pio = 1;
961 }
962 return pio;
963}
964
965static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
966{
967 WARN_ON(tf->protocol == ATA_PROT_DMA ||
968 tf->protocol == ATAPI_PROT_DMA);
969 ata_sff_tf_load(ap, tf);
970}
971
972
973static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
974{
975 WARN_ON(tf->protocol == ATA_PROT_DMA ||
976 tf->protocol == ATAPI_PROT_DMA);
977 ata_sff_exec_command(ap, tf);
978}
979
980
981static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
982{
983 port->cmd_addr = base;
984 port->data_addr = base;
985 port->feature_addr =
986 port->error_addr = base + 0x4;
987 port->nsect_addr = base + 0x8;
988 port->lbal_addr = base + 0xc;
989 port->lbam_addr = base + 0x10;
990 port->lbah_addr = base + 0x14;
991 port->device_addr = base + 0x18;
992 port->command_addr =
993 port->status_addr = base + 0x1c;
994 port->altstatus_addr =
995 port->ctl_addr = base + 0x38;
996}
997
998
999#ifdef ATA_VERBOSE_DEBUG
1000static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
1001 u32 offset, u32 size)
1002{
1003 u32 window_size;
1004 u16 idx;
1005 u8 page_mask;
1006 long dist;
1007 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1008 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1009
1010 /* hard-code chip #0 */
1011 mmio += PDC_CHIP0_OFS;
1012
1013 page_mask = 0x00;
1014 window_size = 0x2000 * 4; /* 32K byte uchar size */
1015 idx = (u16) (offset / window_size);
1016
1017 writel(0x01, mmio + PDC_GENERAL_CTLR);
1018 readl(mmio + PDC_GENERAL_CTLR);
1019 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1020 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1021
1022 offset -= (idx * window_size);
1023 idx++;
1024 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1025 (long) (window_size - offset);
1026 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
1027 dist);
1028
1029 psource += dist;
1030 size -= dist;
1031 for (; (long) size >= (long) window_size ;) {
1032 writel(0x01, mmio + PDC_GENERAL_CTLR);
1033 readl(mmio + PDC_GENERAL_CTLR);
1034 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1035 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1036 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1037 window_size / 4);
1038 psource += window_size;
1039 size -= window_size;
1040 idx++;
1041 }
1042
1043 if (size) {
1044 writel(0x01, mmio + PDC_GENERAL_CTLR);
1045 readl(mmio + PDC_GENERAL_CTLR);
1046 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1047 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1048 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1049 size / 4);
1050 }
1051}
1052#endif
1053
1054
1055static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1056 u32 offset, u32 size)
1057{
1058 u32 window_size;
1059 u16 idx;
1060 u8 page_mask;
1061 long dist;
1062 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1063 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1064
1065 /* hard-code chip #0 */
1066 mmio += PDC_CHIP0_OFS;
1067
1068 page_mask = 0x00;
1069 window_size = 0x2000 * 4; /* 32K byte uchar size */
1070 idx = (u16) (offset / window_size);
1071
1072 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1073 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1074 offset -= (idx * window_size);
1075 idx++;
1076 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1077 (long) (window_size - offset);
1078 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1079 writel(0x01, mmio + PDC_GENERAL_CTLR);
1080 readl(mmio + PDC_GENERAL_CTLR);
1081
1082 psource += dist;
1083 size -= dist;
1084 for (; (long) size >= (long) window_size ;) {
1085 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1086 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1087 memcpy_toio(dimm_mmio, psource, window_size / 4);
1088 writel(0x01, mmio + PDC_GENERAL_CTLR);
1089 readl(mmio + PDC_GENERAL_CTLR);
1090 psource += window_size;
1091 size -= window_size;
1092 idx++;
1093 }
1094
1095 if (size) {
1096 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1097 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1098 memcpy_toio(dimm_mmio, psource, size / 4);
1099 writel(0x01, mmio + PDC_GENERAL_CTLR);
1100 readl(mmio + PDC_GENERAL_CTLR);
1101 }
1102}
1103
1104
1105static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1106 u32 subaddr, u32 *pdata)
1107{
1108 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1109 u32 i2creg = 0;
1110 u32 status;
1111 u32 count = 0;
1112
1113 /* hard-code chip #0 */
1114 mmio += PDC_CHIP0_OFS;
1115
1116 i2creg |= device << 24;
1117 i2creg |= subaddr << 16;
1118
1119 /* Set the device and subaddress */
1120 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1121 readl(mmio + PDC_I2C_ADDR_DATA);
1122
1123 /* Write Control to perform read operation, mask int */
1124 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1125 mmio + PDC_I2C_CONTROL);
1126
1127 for (count = 0; count <= 1000; count ++) {
1128 status = readl(mmio + PDC_I2C_CONTROL);
1129 if (status & PDC_I2C_COMPLETE) {
1130 status = readl(mmio + PDC_I2C_ADDR_DATA);
1131 break;
1132 } else if (count == 1000)
1133 return 0;
1134 }
1135
1136 *pdata = (status >> 8) & 0x000000ff;
1137 return 1;
1138}
1139
1140
1141static int pdc20621_detect_dimm(struct ata_host *host)
1142{
1143 u32 data = 0;
1144 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1145 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1146 if (data == 100)
1147 return 100;
1148 } else
1149 return 0;
1150
1151 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1152 if (data <= 0x75)
1153 return 133;
1154 } else
1155 return 0;
1156
1157 return 0;
1158}
1159
1160
1161static int pdc20621_prog_dimm0(struct ata_host *host)
1162{
1163 u32 spd0[50];
1164 u32 data = 0;
1165 int size, i;
1166 u8 bdimmsize;
1167 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1168 static const struct {
1169 unsigned int reg;
1170 unsigned int ofs;
1171 } pdc_i2c_read_data [] = {
1172 { PDC_DIMM_SPD_TYPE, 11 },
1173 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1174 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1175 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1176 { PDC_DIMM_SPD_ROW_NUM, 3 },
1177 { PDC_DIMM_SPD_BANK_NUM, 17 },
1178 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1179 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1180 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1181 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1182 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1183 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1184 };
1185
1186 /* hard-code chip #0 */
1187 mmio += PDC_CHIP0_OFS;
1188
1189 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1190 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1191 pdc_i2c_read_data[i].reg,
1192 &spd0[pdc_i2c_read_data[i].ofs]);
1193
1194 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1195 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1196 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1197 data |= (((((spd0[29] > spd0[28])
1198 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1199 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1200
1201 if (spd0[18] & 0x08)
1202 data |= ((0x03) << 14);
1203 else if (spd0[18] & 0x04)
1204 data |= ((0x02) << 14);
1205 else if (spd0[18] & 0x01)
1206 data |= ((0x01) << 14);
1207 else
1208 data |= (0 << 14);
1209
1210 /*
1211 Calculate the size of bDIMMSize (power of 2) and
1212 merge the DIMM size by program start/end address.
1213 */
1214
1215 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1216 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1217 data |= (((size / 16) - 1) << 16);
1218 data |= (0 << 23);
1219 data |= 8;
1220 writel(data, mmio + PDC_DIMM0_CONTROL);
1221 readl(mmio + PDC_DIMM0_CONTROL);
1222 return size;
1223}
1224
1225
1226static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1227{
1228 u32 data, spd0;
1229 int error, i;
1230 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1231
1232 /* hard-code chip #0 */
1233 mmio += PDC_CHIP0_OFS;
1234
1235 /*
1236 Set To Default : DIMM Module Global Control Register (0x022259F1)
1237 DIMM Arbitration Disable (bit 20)
1238 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1239 Refresh Enable (bit 17)
1240 */
1241
1242 data = 0x022259F1;
1243 writel(data, mmio + PDC_SDRAM_CONTROL);
1244 readl(mmio + PDC_SDRAM_CONTROL);
1245
1246 /* Turn on for ECC */
1247 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1248 PDC_DIMM_SPD_TYPE, &spd0);
1249 if (spd0 == 0x02) {
1250 data |= (0x01 << 16);
1251 writel(data, mmio + PDC_SDRAM_CONTROL);
1252 readl(mmio + PDC_SDRAM_CONTROL);
1253 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1254 }
1255
1256 /* DIMM Initialization Select/Enable (bit 18/19) */
1257 data &= (~(1<<18));
1258 data |= (1<<19);
1259 writel(data, mmio + PDC_SDRAM_CONTROL);
1260
1261 error = 1;
1262 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1263 data = readl(mmio + PDC_SDRAM_CONTROL);
1264 if (!(data & (1<<19))) {
1265 error = 0;
1266 break;
1267 }
1268 msleep(i*100);
1269 }
1270 return error;
1271}
1272
1273
1274static unsigned int pdc20621_dimm_init(struct ata_host *host)
1275{
1276 int speed, size, length;
1277 u32 addr, spd0, pci_status;
1278 u32 time_period = 0;
1279 u32 tcount = 0;
1280 u32 ticks = 0;
1281 u32 clock = 0;
1282 u32 fparam = 0;
1283 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1284
1285 /* hard-code chip #0 */
1286 mmio += PDC_CHIP0_OFS;
1287
1288 /* Initialize PLL based upon PCI Bus Frequency */
1289
1290 /* Initialize Time Period Register */
1291 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1292 time_period = readl(mmio + PDC_TIME_PERIOD);
1293 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1294
1295 /* Enable timer */
1296 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1297 readl(mmio + PDC_TIME_CONTROL);
1298
1299 /* Wait 3 seconds */
1300 msleep(3000);
1301
1302 /*
1303 When timer is enabled, counter is decreased every internal
1304 clock cycle.
1305 */
1306
1307 tcount = readl(mmio + PDC_TIME_COUNTER);
1308 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1309
1310 /*
1311 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1312 register should be >= (0xffffffff - 3x10^8).
1313 */
1314 if (tcount >= PCI_X_TCOUNT) {
1315 ticks = (time_period - tcount);
1316 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1317
1318 clock = (ticks / 300000);
1319 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1320
1321 clock = (clock * 33);
1322 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1323
1324 /* PLL F Param (bit 22:16) */
1325 fparam = (1400000 / clock) - 2;
1326 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1327
1328 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1329 pci_status = (0x8a001824 | (fparam << 16));
1330 } else
1331 pci_status = PCI_PLL_INIT;
1332
1333 /* Initialize PLL. */
1334 VPRINTK("pci_status: 0x%x\n", pci_status);
1335 writel(pci_status, mmio + PDC_CTL_STATUS);
1336 readl(mmio + PDC_CTL_STATUS);
1337
1338 /*
1339 Read SPD of DIMM by I2C interface,
1340 and program the DIMM Module Controller.
1341 */
1342 if (!(speed = pdc20621_detect_dimm(host))) {
1343 printk(KERN_ERR "Detect Local DIMM Fail\n");
1344 return 1; /* DIMM error */
1345 }
1346 VPRINTK("Local DIMM Speed = %d\n", speed);
1347
1348 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1349 size = pdc20621_prog_dimm0(host);
1350 VPRINTK("Local DIMM Size = %dMB\n", size);
1351
1352 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1353 if (pdc20621_prog_dimm_global(host)) {
1354 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1355 return 1;
1356 }
1357
1358#ifdef ATA_VERBOSE_DEBUG
1359 {
1360 u8 test_parttern1[40] =
1361 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1362 'N','o','t',' ','Y','e','t',' ',
1363 'D','e','f','i','n','e','d',' ',
1364 '1','.','1','0',
1365 '9','8','0','3','1','6','1','2',0,0};
1366 u8 test_parttern2[40] = {0};
1367
1368 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1369 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1370
1371 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1372 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1373 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1374 test_parttern2[1], &(test_parttern2[2]));
1375 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1376 40);
1377 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1378 test_parttern2[1], &(test_parttern2[2]));
1379
1380 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1381 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1382 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1383 test_parttern2[1], &(test_parttern2[2]));
1384 }
1385#endif
1386
1387 /* ECC initiliazation. */
1388
1389 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1390 PDC_DIMM_SPD_TYPE, &spd0);
1391 if (spd0 == 0x02) {
1392 void *buf;
1393 VPRINTK("Start ECC initialization\n");
1394 addr = 0;
1395 length = size * 1024 * 1024;
1396 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1397 while (addr < length) {
1398 pdc20621_put_to_dimm(host, buf, addr,
1399 ECC_ERASE_BUF_SZ);
1400 addr += ECC_ERASE_BUF_SZ;
1401 }
1402 kfree(buf);
1403 VPRINTK("Finish ECC initialization\n");
1404 }
1405 return 0;
1406}
1407
1408
1409static void pdc_20621_init(struct ata_host *host)
1410{
1411 u32 tmp;
1412 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1413
1414 /* hard-code chip #0 */
1415 mmio += PDC_CHIP0_OFS;
1416
1417 /*
1418 * Select page 0x40 for our 32k DIMM window
1419 */
1420 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1421 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1422 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1423
1424 /*
1425 * Reset Host DMA
1426 */
1427 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1428 tmp |= PDC_RESET;
1429 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1430 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1431
1432 udelay(10);
1433
1434 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1435 tmp &= ~PDC_RESET;
1436 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1437 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1438}
1439
1440static int pdc_sata_init_one(struct pci_dev *pdev,
1441 const struct pci_device_id *ent)
1442{
1443 const struct ata_port_info *ppi[] =
1444 { &pdc_port_info[ent->driver_data], NULL };
1445 struct ata_host *host;
1446 struct pdc_host_priv *hpriv;
1447 int i, rc;
1448
1449 ata_print_version_once(&pdev->dev, DRV_VERSION);
1450
1451 /* allocate host */
1452 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1453 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1454 if (!host || !hpriv)
1455 return -ENOMEM;
1456
1457 host->private_data = hpriv;
1458
1459 /* acquire resources and fill host */
1460 rc = pcim_enable_device(pdev);
1461 if (rc)
1462 return rc;
1463
1464 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1465 DRV_NAME);
1466 if (rc == -EBUSY)
1467 pcim_pin_device(pdev);
1468 if (rc)
1469 return rc;
1470 host->iomap = pcim_iomap_table(pdev);
1471
1472 for (i = 0; i < 4; i++) {
1473 struct ata_port *ap = host->ports[i];
1474 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1475 unsigned int offset = 0x200 + i * 0x80;
1476
1477 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1478
1479 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1480 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1481 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1482 }
1483
1484 /* configure and activate */
1485 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1486 if (rc)
1487 return rc;
1488 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1489 if (rc)
1490 return rc;
1491
1492 if (pdc20621_dimm_init(host))
1493 return -ENOMEM;
1494 pdc_20621_init(host);
1495
1496 pci_set_master(pdev);
1497 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1498 IRQF_SHARED, &pdc_sata_sht);
1499}
1500
1501
1502static int __init pdc_sata_init(void)
1503{
1504 return pci_register_driver(&pdc_sata_pci_driver);
1505}
1506
1507
1508static void __exit pdc_sata_exit(void)
1509{
1510 pci_unregister_driver(&pdc_sata_pci_driver);
1511}
1512
1513
1514MODULE_AUTHOR("Jeff Garzik");
1515MODULE_DESCRIPTION("Promise SATA low-level driver");
1516MODULE_LICENSE("GPL");
1517MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1518MODULE_VERSION(DRV_VERSION);
1519
1520module_init(pdc_sata_init);
1521module_exit(pdc_sata_exit);