Loading...
1/*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
57#include <linux/scatterlist.h>
58#include <linux/io.h>
59#include <linux/async.h>
60#include <linux/log2.h>
61#include <linux/slab.h>
62#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h>
65#include <linux/libata.h>
66#include <asm/byteorder.h>
67#include <linux/cdrom.h>
68#include <linux/ratelimit.h>
69
70#include "libata.h"
71#include "libata-transport.h"
72
73/* debounce timing parameters in msecs { interval, duration, timeout } */
74const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
75const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
76const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
77
78const struct ata_port_operations ata_base_port_ops = {
79 .prereset = ata_std_prereset,
80 .postreset = ata_std_postreset,
81 .error_handler = ata_std_error_handler,
82};
83
84const struct ata_port_operations sata_port_ops = {
85 .inherits = &ata_base_port_ops,
86
87 .qc_defer = ata_std_qc_defer,
88 .hardreset = sata_std_hardreset,
89};
90
91static unsigned int ata_dev_init_params(struct ata_device *dev,
92 u16 heads, u16 sectors);
93static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
94static void ata_dev_xfermask(struct ata_device *dev);
95static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
96
97unsigned int ata_print_id = 1;
98
99struct ata_force_param {
100 const char *name;
101 unsigned int cbl;
102 int spd_limit;
103 unsigned long xfer_mask;
104 unsigned int horkage_on;
105 unsigned int horkage_off;
106 unsigned int lflags;
107};
108
109struct ata_force_ent {
110 int port;
111 int device;
112 struct ata_force_param param;
113};
114
115static struct ata_force_ent *ata_force_tbl;
116static int ata_force_tbl_size;
117
118static char ata_force_param_buf[PAGE_SIZE] __initdata;
119/* param_buf is thrown away after initialization, disallow read */
120module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
121MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
122
123static int atapi_enabled = 1;
124module_param(atapi_enabled, int, 0444);
125MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
126
127static int atapi_dmadir = 0;
128module_param(atapi_dmadir, int, 0444);
129MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
130
131int atapi_passthru16 = 1;
132module_param(atapi_passthru16, int, 0444);
133MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
134
135int libata_fua = 0;
136module_param_named(fua, libata_fua, int, 0444);
137MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
138
139static int ata_ignore_hpa;
140module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
141MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
142
143static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
144module_param_named(dma, libata_dma_mask, int, 0444);
145MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
146
147static int ata_probe_timeout;
148module_param(ata_probe_timeout, int, 0444);
149MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
150
151int libata_noacpi = 0;
152module_param_named(noacpi, libata_noacpi, int, 0444);
153MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
154
155int libata_allow_tpm = 0;
156module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
157MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
158
159static int atapi_an;
160module_param(atapi_an, int, 0444);
161MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
162
163MODULE_AUTHOR("Jeff Garzik");
164MODULE_DESCRIPTION("Library module for ATA devices");
165MODULE_LICENSE("GPL");
166MODULE_VERSION(DRV_VERSION);
167
168
169static bool ata_sstatus_online(u32 sstatus)
170{
171 return (sstatus & 0xf) == 0x3;
172}
173
174/**
175 * ata_link_next - link iteration helper
176 * @link: the previous link, NULL to start
177 * @ap: ATA port containing links to iterate
178 * @mode: iteration mode, one of ATA_LITER_*
179 *
180 * LOCKING:
181 * Host lock or EH context.
182 *
183 * RETURNS:
184 * Pointer to the next link.
185 */
186struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
187 enum ata_link_iter_mode mode)
188{
189 BUG_ON(mode != ATA_LITER_EDGE &&
190 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
191
192 /* NULL link indicates start of iteration */
193 if (!link)
194 switch (mode) {
195 case ATA_LITER_EDGE:
196 case ATA_LITER_PMP_FIRST:
197 if (sata_pmp_attached(ap))
198 return ap->pmp_link;
199 /* fall through */
200 case ATA_LITER_HOST_FIRST:
201 return &ap->link;
202 }
203
204 /* we just iterated over the host link, what's next? */
205 if (link == &ap->link)
206 switch (mode) {
207 case ATA_LITER_HOST_FIRST:
208 if (sata_pmp_attached(ap))
209 return ap->pmp_link;
210 /* fall through */
211 case ATA_LITER_PMP_FIRST:
212 if (unlikely(ap->slave_link))
213 return ap->slave_link;
214 /* fall through */
215 case ATA_LITER_EDGE:
216 return NULL;
217 }
218
219 /* slave_link excludes PMP */
220 if (unlikely(link == ap->slave_link))
221 return NULL;
222
223 /* we were over a PMP link */
224 if (++link < ap->pmp_link + ap->nr_pmp_links)
225 return link;
226
227 if (mode == ATA_LITER_PMP_FIRST)
228 return &ap->link;
229
230 return NULL;
231}
232
233/**
234 * ata_dev_next - device iteration helper
235 * @dev: the previous device, NULL to start
236 * @link: ATA link containing devices to iterate
237 * @mode: iteration mode, one of ATA_DITER_*
238 *
239 * LOCKING:
240 * Host lock or EH context.
241 *
242 * RETURNS:
243 * Pointer to the next device.
244 */
245struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
246 enum ata_dev_iter_mode mode)
247{
248 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
249 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
250
251 /* NULL dev indicates start of iteration */
252 if (!dev)
253 switch (mode) {
254 case ATA_DITER_ENABLED:
255 case ATA_DITER_ALL:
256 dev = link->device;
257 goto check;
258 case ATA_DITER_ENABLED_REVERSE:
259 case ATA_DITER_ALL_REVERSE:
260 dev = link->device + ata_link_max_devices(link) - 1;
261 goto check;
262 }
263
264 next:
265 /* move to the next one */
266 switch (mode) {
267 case ATA_DITER_ENABLED:
268 case ATA_DITER_ALL:
269 if (++dev < link->device + ata_link_max_devices(link))
270 goto check;
271 return NULL;
272 case ATA_DITER_ENABLED_REVERSE:
273 case ATA_DITER_ALL_REVERSE:
274 if (--dev >= link->device)
275 goto check;
276 return NULL;
277 }
278
279 check:
280 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
281 !ata_dev_enabled(dev))
282 goto next;
283 return dev;
284}
285
286/**
287 * ata_dev_phys_link - find physical link for a device
288 * @dev: ATA device to look up physical link for
289 *
290 * Look up physical link which @dev is attached to. Note that
291 * this is different from @dev->link only when @dev is on slave
292 * link. For all other cases, it's the same as @dev->link.
293 *
294 * LOCKING:
295 * Don't care.
296 *
297 * RETURNS:
298 * Pointer to the found physical link.
299 */
300struct ata_link *ata_dev_phys_link(struct ata_device *dev)
301{
302 struct ata_port *ap = dev->link->ap;
303
304 if (!ap->slave_link)
305 return dev->link;
306 if (!dev->devno)
307 return &ap->link;
308 return ap->slave_link;
309}
310
311/**
312 * ata_force_cbl - force cable type according to libata.force
313 * @ap: ATA port of interest
314 *
315 * Force cable type according to libata.force and whine about it.
316 * The last entry which has matching port number is used, so it
317 * can be specified as part of device force parameters. For
318 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
319 * same effect.
320 *
321 * LOCKING:
322 * EH context.
323 */
324void ata_force_cbl(struct ata_port *ap)
325{
326 int i;
327
328 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
329 const struct ata_force_ent *fe = &ata_force_tbl[i];
330
331 if (fe->port != -1 && fe->port != ap->print_id)
332 continue;
333
334 if (fe->param.cbl == ATA_CBL_NONE)
335 continue;
336
337 ap->cbl = fe->param.cbl;
338 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
339 return;
340 }
341}
342
343/**
344 * ata_force_link_limits - force link limits according to libata.force
345 * @link: ATA link of interest
346 *
347 * Force link flags and SATA spd limit according to libata.force
348 * and whine about it. When only the port part is specified
349 * (e.g. 1:), the limit applies to all links connected to both
350 * the host link and all fan-out ports connected via PMP. If the
351 * device part is specified as 0 (e.g. 1.00:), it specifies the
352 * first fan-out link not the host link. Device number 15 always
353 * points to the host link whether PMP is attached or not. If the
354 * controller has slave link, device number 16 points to it.
355 *
356 * LOCKING:
357 * EH context.
358 */
359static void ata_force_link_limits(struct ata_link *link)
360{
361 bool did_spd = false;
362 int linkno = link->pmp;
363 int i;
364
365 if (ata_is_host_link(link))
366 linkno += 15;
367
368 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
369 const struct ata_force_ent *fe = &ata_force_tbl[i];
370
371 if (fe->port != -1 && fe->port != link->ap->print_id)
372 continue;
373
374 if (fe->device != -1 && fe->device != linkno)
375 continue;
376
377 /* only honor the first spd limit */
378 if (!did_spd && fe->param.spd_limit) {
379 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
380 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
381 fe->param.name);
382 did_spd = true;
383 }
384
385 /* let lflags stack */
386 if (fe->param.lflags) {
387 link->flags |= fe->param.lflags;
388 ata_link_notice(link,
389 "FORCE: link flag 0x%x forced -> 0x%x\n",
390 fe->param.lflags, link->flags);
391 }
392 }
393}
394
395/**
396 * ata_force_xfermask - force xfermask according to libata.force
397 * @dev: ATA device of interest
398 *
399 * Force xfer_mask according to libata.force and whine about it.
400 * For consistency with link selection, device number 15 selects
401 * the first device connected to the host link.
402 *
403 * LOCKING:
404 * EH context.
405 */
406static void ata_force_xfermask(struct ata_device *dev)
407{
408 int devno = dev->link->pmp + dev->devno;
409 int alt_devno = devno;
410 int i;
411
412 /* allow n.15/16 for devices attached to host port */
413 if (ata_is_host_link(dev->link))
414 alt_devno += 15;
415
416 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
417 const struct ata_force_ent *fe = &ata_force_tbl[i];
418 unsigned long pio_mask, mwdma_mask, udma_mask;
419
420 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
421 continue;
422
423 if (fe->device != -1 && fe->device != devno &&
424 fe->device != alt_devno)
425 continue;
426
427 if (!fe->param.xfer_mask)
428 continue;
429
430 ata_unpack_xfermask(fe->param.xfer_mask,
431 &pio_mask, &mwdma_mask, &udma_mask);
432 if (udma_mask)
433 dev->udma_mask = udma_mask;
434 else if (mwdma_mask) {
435 dev->udma_mask = 0;
436 dev->mwdma_mask = mwdma_mask;
437 } else {
438 dev->udma_mask = 0;
439 dev->mwdma_mask = 0;
440 dev->pio_mask = pio_mask;
441 }
442
443 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
444 fe->param.name);
445 return;
446 }
447}
448
449/**
450 * ata_force_horkage - force horkage according to libata.force
451 * @dev: ATA device of interest
452 *
453 * Force horkage according to libata.force and whine about it.
454 * For consistency with link selection, device number 15 selects
455 * the first device connected to the host link.
456 *
457 * LOCKING:
458 * EH context.
459 */
460static void ata_force_horkage(struct ata_device *dev)
461{
462 int devno = dev->link->pmp + dev->devno;
463 int alt_devno = devno;
464 int i;
465
466 /* allow n.15/16 for devices attached to host port */
467 if (ata_is_host_link(dev->link))
468 alt_devno += 15;
469
470 for (i = 0; i < ata_force_tbl_size; i++) {
471 const struct ata_force_ent *fe = &ata_force_tbl[i];
472
473 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
474 continue;
475
476 if (fe->device != -1 && fe->device != devno &&
477 fe->device != alt_devno)
478 continue;
479
480 if (!(~dev->horkage & fe->param.horkage_on) &&
481 !(dev->horkage & fe->param.horkage_off))
482 continue;
483
484 dev->horkage |= fe->param.horkage_on;
485 dev->horkage &= ~fe->param.horkage_off;
486
487 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
488 fe->param.name);
489 }
490}
491
492/**
493 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
494 * @opcode: SCSI opcode
495 *
496 * Determine ATAPI command type from @opcode.
497 *
498 * LOCKING:
499 * None.
500 *
501 * RETURNS:
502 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
503 */
504int atapi_cmd_type(u8 opcode)
505{
506 switch (opcode) {
507 case GPCMD_READ_10:
508 case GPCMD_READ_12:
509 return ATAPI_READ;
510
511 case GPCMD_WRITE_10:
512 case GPCMD_WRITE_12:
513 case GPCMD_WRITE_AND_VERIFY_10:
514 return ATAPI_WRITE;
515
516 case GPCMD_READ_CD:
517 case GPCMD_READ_CD_MSF:
518 return ATAPI_READ_CD;
519
520 case ATA_16:
521 case ATA_12:
522 if (atapi_passthru16)
523 return ATAPI_PASS_THRU;
524 /* fall thru */
525 default:
526 return ATAPI_MISC;
527 }
528}
529
530/**
531 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
532 * @tf: Taskfile to convert
533 * @pmp: Port multiplier port
534 * @is_cmd: This FIS is for command
535 * @fis: Buffer into which data will output
536 *
537 * Converts a standard ATA taskfile to a Serial ATA
538 * FIS structure (Register - Host to Device).
539 *
540 * LOCKING:
541 * Inherited from caller.
542 */
543void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
544{
545 fis[0] = 0x27; /* Register - Host to Device FIS */
546 fis[1] = pmp & 0xf; /* Port multiplier number*/
547 if (is_cmd)
548 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
549
550 fis[2] = tf->command;
551 fis[3] = tf->feature;
552
553 fis[4] = tf->lbal;
554 fis[5] = tf->lbam;
555 fis[6] = tf->lbah;
556 fis[7] = tf->device;
557
558 fis[8] = tf->hob_lbal;
559 fis[9] = tf->hob_lbam;
560 fis[10] = tf->hob_lbah;
561 fis[11] = tf->hob_feature;
562
563 fis[12] = tf->nsect;
564 fis[13] = tf->hob_nsect;
565 fis[14] = 0;
566 fis[15] = tf->ctl;
567
568 fis[16] = 0;
569 fis[17] = 0;
570 fis[18] = 0;
571 fis[19] = 0;
572}
573
574/**
575 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
576 * @fis: Buffer from which data will be input
577 * @tf: Taskfile to output
578 *
579 * Converts a serial ATA FIS structure to a standard ATA taskfile.
580 *
581 * LOCKING:
582 * Inherited from caller.
583 */
584
585void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
586{
587 tf->command = fis[2]; /* status */
588 tf->feature = fis[3]; /* error */
589
590 tf->lbal = fis[4];
591 tf->lbam = fis[5];
592 tf->lbah = fis[6];
593 tf->device = fis[7];
594
595 tf->hob_lbal = fis[8];
596 tf->hob_lbam = fis[9];
597 tf->hob_lbah = fis[10];
598
599 tf->nsect = fis[12];
600 tf->hob_nsect = fis[13];
601}
602
603static const u8 ata_rw_cmds[] = {
604 /* pio multi */
605 ATA_CMD_READ_MULTI,
606 ATA_CMD_WRITE_MULTI,
607 ATA_CMD_READ_MULTI_EXT,
608 ATA_CMD_WRITE_MULTI_EXT,
609 0,
610 0,
611 0,
612 ATA_CMD_WRITE_MULTI_FUA_EXT,
613 /* pio */
614 ATA_CMD_PIO_READ,
615 ATA_CMD_PIO_WRITE,
616 ATA_CMD_PIO_READ_EXT,
617 ATA_CMD_PIO_WRITE_EXT,
618 0,
619 0,
620 0,
621 0,
622 /* dma */
623 ATA_CMD_READ,
624 ATA_CMD_WRITE,
625 ATA_CMD_READ_EXT,
626 ATA_CMD_WRITE_EXT,
627 0,
628 0,
629 0,
630 ATA_CMD_WRITE_FUA_EXT
631};
632
633/**
634 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
635 * @tf: command to examine and configure
636 * @dev: device tf belongs to
637 *
638 * Examine the device configuration and tf->flags to calculate
639 * the proper read/write commands and protocol to use.
640 *
641 * LOCKING:
642 * caller.
643 */
644static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
645{
646 u8 cmd;
647
648 int index, fua, lba48, write;
649
650 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
651 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
652 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
653
654 if (dev->flags & ATA_DFLAG_PIO) {
655 tf->protocol = ATA_PROT_PIO;
656 index = dev->multi_count ? 0 : 8;
657 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
658 /* Unable to use DMA due to host limitation */
659 tf->protocol = ATA_PROT_PIO;
660 index = dev->multi_count ? 0 : 8;
661 } else {
662 tf->protocol = ATA_PROT_DMA;
663 index = 16;
664 }
665
666 cmd = ata_rw_cmds[index + fua + lba48 + write];
667 if (cmd) {
668 tf->command = cmd;
669 return 0;
670 }
671 return -1;
672}
673
674/**
675 * ata_tf_read_block - Read block address from ATA taskfile
676 * @tf: ATA taskfile of interest
677 * @dev: ATA device @tf belongs to
678 *
679 * LOCKING:
680 * None.
681 *
682 * Read block address from @tf. This function can handle all
683 * three address formats - LBA, LBA48 and CHS. tf->protocol and
684 * flags select the address format to use.
685 *
686 * RETURNS:
687 * Block address read from @tf.
688 */
689u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
690{
691 u64 block = 0;
692
693 if (tf->flags & ATA_TFLAG_LBA) {
694 if (tf->flags & ATA_TFLAG_LBA48) {
695 block |= (u64)tf->hob_lbah << 40;
696 block |= (u64)tf->hob_lbam << 32;
697 block |= (u64)tf->hob_lbal << 24;
698 } else
699 block |= (tf->device & 0xf) << 24;
700
701 block |= tf->lbah << 16;
702 block |= tf->lbam << 8;
703 block |= tf->lbal;
704 } else {
705 u32 cyl, head, sect;
706
707 cyl = tf->lbam | (tf->lbah << 8);
708 head = tf->device & 0xf;
709 sect = tf->lbal;
710
711 if (!sect) {
712 ata_dev_warn(dev,
713 "device reported invalid CHS sector 0\n");
714 sect = 1; /* oh well */
715 }
716
717 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
718 }
719
720 return block;
721}
722
723/**
724 * ata_build_rw_tf - Build ATA taskfile for given read/write request
725 * @tf: Target ATA taskfile
726 * @dev: ATA device @tf belongs to
727 * @block: Block address
728 * @n_block: Number of blocks
729 * @tf_flags: RW/FUA etc...
730 * @tag: tag
731 *
732 * LOCKING:
733 * None.
734 *
735 * Build ATA taskfile @tf for read/write request described by
736 * @block, @n_block, @tf_flags and @tag on @dev.
737 *
738 * RETURNS:
739 *
740 * 0 on success, -ERANGE if the request is too large for @dev,
741 * -EINVAL if the request is invalid.
742 */
743int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
744 u64 block, u32 n_block, unsigned int tf_flags,
745 unsigned int tag)
746{
747 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
748 tf->flags |= tf_flags;
749
750 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
751 /* yay, NCQ */
752 if (!lba_48_ok(block, n_block))
753 return -ERANGE;
754
755 tf->protocol = ATA_PROT_NCQ;
756 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
757
758 if (tf->flags & ATA_TFLAG_WRITE)
759 tf->command = ATA_CMD_FPDMA_WRITE;
760 else
761 tf->command = ATA_CMD_FPDMA_READ;
762
763 tf->nsect = tag << 3;
764 tf->hob_feature = (n_block >> 8) & 0xff;
765 tf->feature = n_block & 0xff;
766
767 tf->hob_lbah = (block >> 40) & 0xff;
768 tf->hob_lbam = (block >> 32) & 0xff;
769 tf->hob_lbal = (block >> 24) & 0xff;
770 tf->lbah = (block >> 16) & 0xff;
771 tf->lbam = (block >> 8) & 0xff;
772 tf->lbal = block & 0xff;
773
774 tf->device = 1 << 6;
775 if (tf->flags & ATA_TFLAG_FUA)
776 tf->device |= 1 << 7;
777 } else if (dev->flags & ATA_DFLAG_LBA) {
778 tf->flags |= ATA_TFLAG_LBA;
779
780 if (lba_28_ok(block, n_block)) {
781 /* use LBA28 */
782 tf->device |= (block >> 24) & 0xf;
783 } else if (lba_48_ok(block, n_block)) {
784 if (!(dev->flags & ATA_DFLAG_LBA48))
785 return -ERANGE;
786
787 /* use LBA48 */
788 tf->flags |= ATA_TFLAG_LBA48;
789
790 tf->hob_nsect = (n_block >> 8) & 0xff;
791
792 tf->hob_lbah = (block >> 40) & 0xff;
793 tf->hob_lbam = (block >> 32) & 0xff;
794 tf->hob_lbal = (block >> 24) & 0xff;
795 } else
796 /* request too large even for LBA48 */
797 return -ERANGE;
798
799 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
800 return -EINVAL;
801
802 tf->nsect = n_block & 0xff;
803
804 tf->lbah = (block >> 16) & 0xff;
805 tf->lbam = (block >> 8) & 0xff;
806 tf->lbal = block & 0xff;
807
808 tf->device |= ATA_LBA;
809 } else {
810 /* CHS */
811 u32 sect, head, cyl, track;
812
813 /* The request -may- be too large for CHS addressing. */
814 if (!lba_28_ok(block, n_block))
815 return -ERANGE;
816
817 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
818 return -EINVAL;
819
820 /* Convert LBA to CHS */
821 track = (u32)block / dev->sectors;
822 cyl = track / dev->heads;
823 head = track % dev->heads;
824 sect = (u32)block % dev->sectors + 1;
825
826 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
827 (u32)block, track, cyl, head, sect);
828
829 /* Check whether the converted CHS can fit.
830 Cylinder: 0-65535
831 Head: 0-15
832 Sector: 1-255*/
833 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
834 return -ERANGE;
835
836 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
837 tf->lbal = sect;
838 tf->lbam = cyl;
839 tf->lbah = cyl >> 8;
840 tf->device |= head;
841 }
842
843 return 0;
844}
845
846/**
847 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
848 * @pio_mask: pio_mask
849 * @mwdma_mask: mwdma_mask
850 * @udma_mask: udma_mask
851 *
852 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
853 * unsigned int xfer_mask.
854 *
855 * LOCKING:
856 * None.
857 *
858 * RETURNS:
859 * Packed xfer_mask.
860 */
861unsigned long ata_pack_xfermask(unsigned long pio_mask,
862 unsigned long mwdma_mask,
863 unsigned long udma_mask)
864{
865 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
866 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
867 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
868}
869
870/**
871 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
872 * @xfer_mask: xfer_mask to unpack
873 * @pio_mask: resulting pio_mask
874 * @mwdma_mask: resulting mwdma_mask
875 * @udma_mask: resulting udma_mask
876 *
877 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
878 * Any NULL distination masks will be ignored.
879 */
880void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
881 unsigned long *mwdma_mask, unsigned long *udma_mask)
882{
883 if (pio_mask)
884 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
885 if (mwdma_mask)
886 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
887 if (udma_mask)
888 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
889}
890
891static const struct ata_xfer_ent {
892 int shift, bits;
893 u8 base;
894} ata_xfer_tbl[] = {
895 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
896 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
897 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
898 { -1, },
899};
900
901/**
902 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
903 * @xfer_mask: xfer_mask of interest
904 *
905 * Return matching XFER_* value for @xfer_mask. Only the highest
906 * bit of @xfer_mask is considered.
907 *
908 * LOCKING:
909 * None.
910 *
911 * RETURNS:
912 * Matching XFER_* value, 0xff if no match found.
913 */
914u8 ata_xfer_mask2mode(unsigned long xfer_mask)
915{
916 int highbit = fls(xfer_mask) - 1;
917 const struct ata_xfer_ent *ent;
918
919 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
920 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
921 return ent->base + highbit - ent->shift;
922 return 0xff;
923}
924
925/**
926 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
927 * @xfer_mode: XFER_* of interest
928 *
929 * Return matching xfer_mask for @xfer_mode.
930 *
931 * LOCKING:
932 * None.
933 *
934 * RETURNS:
935 * Matching xfer_mask, 0 if no match found.
936 */
937unsigned long ata_xfer_mode2mask(u8 xfer_mode)
938{
939 const struct ata_xfer_ent *ent;
940
941 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
942 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
943 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
944 & ~((1 << ent->shift) - 1);
945 return 0;
946}
947
948/**
949 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
950 * @xfer_mode: XFER_* of interest
951 *
952 * Return matching xfer_shift for @xfer_mode.
953 *
954 * LOCKING:
955 * None.
956 *
957 * RETURNS:
958 * Matching xfer_shift, -1 if no match found.
959 */
960int ata_xfer_mode2shift(unsigned long xfer_mode)
961{
962 const struct ata_xfer_ent *ent;
963
964 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
965 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
966 return ent->shift;
967 return -1;
968}
969
970/**
971 * ata_mode_string - convert xfer_mask to string
972 * @xfer_mask: mask of bits supported; only highest bit counts.
973 *
974 * Determine string which represents the highest speed
975 * (highest bit in @modemask).
976 *
977 * LOCKING:
978 * None.
979 *
980 * RETURNS:
981 * Constant C string representing highest speed listed in
982 * @mode_mask, or the constant C string "<n/a>".
983 */
984const char *ata_mode_string(unsigned long xfer_mask)
985{
986 static const char * const xfer_mode_str[] = {
987 "PIO0",
988 "PIO1",
989 "PIO2",
990 "PIO3",
991 "PIO4",
992 "PIO5",
993 "PIO6",
994 "MWDMA0",
995 "MWDMA1",
996 "MWDMA2",
997 "MWDMA3",
998 "MWDMA4",
999 "UDMA/16",
1000 "UDMA/25",
1001 "UDMA/33",
1002 "UDMA/44",
1003 "UDMA/66",
1004 "UDMA/100",
1005 "UDMA/133",
1006 "UDMA7",
1007 };
1008 int highbit;
1009
1010 highbit = fls(xfer_mask) - 1;
1011 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1012 return xfer_mode_str[highbit];
1013 return "<n/a>";
1014}
1015
1016const char *sata_spd_string(unsigned int spd)
1017{
1018 static const char * const spd_str[] = {
1019 "1.5 Gbps",
1020 "3.0 Gbps",
1021 "6.0 Gbps",
1022 };
1023
1024 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1025 return "<unknown>";
1026 return spd_str[spd - 1];
1027}
1028
1029/**
1030 * ata_dev_classify - determine device type based on ATA-spec signature
1031 * @tf: ATA taskfile register set for device to be identified
1032 *
1033 * Determine from taskfile register contents whether a device is
1034 * ATA or ATAPI, as per "Signature and persistence" section
1035 * of ATA/PI spec (volume 1, sect 5.14).
1036 *
1037 * LOCKING:
1038 * None.
1039 *
1040 * RETURNS:
1041 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1042 * %ATA_DEV_UNKNOWN the event of failure.
1043 */
1044unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1045{
1046 /* Apple's open source Darwin code hints that some devices only
1047 * put a proper signature into the LBA mid/high registers,
1048 * So, we only check those. It's sufficient for uniqueness.
1049 *
1050 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1051 * signatures for ATA and ATAPI devices attached on SerialATA,
1052 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1053 * spec has never mentioned about using different signatures
1054 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1055 * Multiplier specification began to use 0x69/0x96 to identify
1056 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1057 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1058 * 0x69/0x96 shortly and described them as reserved for
1059 * SerialATA.
1060 *
1061 * We follow the current spec and consider that 0x69/0x96
1062 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1063 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1064 * SEMB signature. This is worked around in
1065 * ata_dev_read_id().
1066 */
1067 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1068 DPRINTK("found ATA device by sig\n");
1069 return ATA_DEV_ATA;
1070 }
1071
1072 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1073 DPRINTK("found ATAPI device by sig\n");
1074 return ATA_DEV_ATAPI;
1075 }
1076
1077 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1078 DPRINTK("found PMP device by sig\n");
1079 return ATA_DEV_PMP;
1080 }
1081
1082 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1083 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1084 return ATA_DEV_SEMB;
1085 }
1086
1087 DPRINTK("unknown device\n");
1088 return ATA_DEV_UNKNOWN;
1089}
1090
1091/**
1092 * ata_id_string - Convert IDENTIFY DEVICE page into string
1093 * @id: IDENTIFY DEVICE results we will examine
1094 * @s: string into which data is output
1095 * @ofs: offset into identify device page
1096 * @len: length of string to return. must be an even number.
1097 *
1098 * The strings in the IDENTIFY DEVICE page are broken up into
1099 * 16-bit chunks. Run through the string, and output each
1100 * 8-bit chunk linearly, regardless of platform.
1101 *
1102 * LOCKING:
1103 * caller.
1104 */
1105
1106void ata_id_string(const u16 *id, unsigned char *s,
1107 unsigned int ofs, unsigned int len)
1108{
1109 unsigned int c;
1110
1111 BUG_ON(len & 1);
1112
1113 while (len > 0) {
1114 c = id[ofs] >> 8;
1115 *s = c;
1116 s++;
1117
1118 c = id[ofs] & 0xff;
1119 *s = c;
1120 s++;
1121
1122 ofs++;
1123 len -= 2;
1124 }
1125}
1126
1127/**
1128 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1129 * @id: IDENTIFY DEVICE results we will examine
1130 * @s: string into which data is output
1131 * @ofs: offset into identify device page
1132 * @len: length of string to return. must be an odd number.
1133 *
1134 * This function is identical to ata_id_string except that it
1135 * trims trailing spaces and terminates the resulting string with
1136 * null. @len must be actual maximum length (even number) + 1.
1137 *
1138 * LOCKING:
1139 * caller.
1140 */
1141void ata_id_c_string(const u16 *id, unsigned char *s,
1142 unsigned int ofs, unsigned int len)
1143{
1144 unsigned char *p;
1145
1146 ata_id_string(id, s, ofs, len - 1);
1147
1148 p = s + strnlen(s, len - 1);
1149 while (p > s && p[-1] == ' ')
1150 p--;
1151 *p = '\0';
1152}
1153
1154static u64 ata_id_n_sectors(const u16 *id)
1155{
1156 if (ata_id_has_lba(id)) {
1157 if (ata_id_has_lba48(id))
1158 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1159 else
1160 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1161 } else {
1162 if (ata_id_current_chs_valid(id))
1163 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1164 id[ATA_ID_CUR_SECTORS];
1165 else
1166 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1167 id[ATA_ID_SECTORS];
1168 }
1169}
1170
1171u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1172{
1173 u64 sectors = 0;
1174
1175 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1176 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1177 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1178 sectors |= (tf->lbah & 0xff) << 16;
1179 sectors |= (tf->lbam & 0xff) << 8;
1180 sectors |= (tf->lbal & 0xff);
1181
1182 return sectors;
1183}
1184
1185u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1186{
1187 u64 sectors = 0;
1188
1189 sectors |= (tf->device & 0x0f) << 24;
1190 sectors |= (tf->lbah & 0xff) << 16;
1191 sectors |= (tf->lbam & 0xff) << 8;
1192 sectors |= (tf->lbal & 0xff);
1193
1194 return sectors;
1195}
1196
1197/**
1198 * ata_read_native_max_address - Read native max address
1199 * @dev: target device
1200 * @max_sectors: out parameter for the result native max address
1201 *
1202 * Perform an LBA48 or LBA28 native size query upon the device in
1203 * question.
1204 *
1205 * RETURNS:
1206 * 0 on success, -EACCES if command is aborted by the drive.
1207 * -EIO on other errors.
1208 */
1209static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1210{
1211 unsigned int err_mask;
1212 struct ata_taskfile tf;
1213 int lba48 = ata_id_has_lba48(dev->id);
1214
1215 ata_tf_init(dev, &tf);
1216
1217 /* always clear all address registers */
1218 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1219
1220 if (lba48) {
1221 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1222 tf.flags |= ATA_TFLAG_LBA48;
1223 } else
1224 tf.command = ATA_CMD_READ_NATIVE_MAX;
1225
1226 tf.protocol |= ATA_PROT_NODATA;
1227 tf.device |= ATA_LBA;
1228
1229 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1230 if (err_mask) {
1231 ata_dev_warn(dev,
1232 "failed to read native max address (err_mask=0x%x)\n",
1233 err_mask);
1234 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1235 return -EACCES;
1236 return -EIO;
1237 }
1238
1239 if (lba48)
1240 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1241 else
1242 *max_sectors = ata_tf_to_lba(&tf) + 1;
1243 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1244 (*max_sectors)--;
1245 return 0;
1246}
1247
1248/**
1249 * ata_set_max_sectors - Set max sectors
1250 * @dev: target device
1251 * @new_sectors: new max sectors value to set for the device
1252 *
1253 * Set max sectors of @dev to @new_sectors.
1254 *
1255 * RETURNS:
1256 * 0 on success, -EACCES if command is aborted or denied (due to
1257 * previous non-volatile SET_MAX) by the drive. -EIO on other
1258 * errors.
1259 */
1260static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1261{
1262 unsigned int err_mask;
1263 struct ata_taskfile tf;
1264 int lba48 = ata_id_has_lba48(dev->id);
1265
1266 new_sectors--;
1267
1268 ata_tf_init(dev, &tf);
1269
1270 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1271
1272 if (lba48) {
1273 tf.command = ATA_CMD_SET_MAX_EXT;
1274 tf.flags |= ATA_TFLAG_LBA48;
1275
1276 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1277 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1278 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1279 } else {
1280 tf.command = ATA_CMD_SET_MAX;
1281
1282 tf.device |= (new_sectors >> 24) & 0xf;
1283 }
1284
1285 tf.protocol |= ATA_PROT_NODATA;
1286 tf.device |= ATA_LBA;
1287
1288 tf.lbal = (new_sectors >> 0) & 0xff;
1289 tf.lbam = (new_sectors >> 8) & 0xff;
1290 tf.lbah = (new_sectors >> 16) & 0xff;
1291
1292 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1293 if (err_mask) {
1294 ata_dev_warn(dev,
1295 "failed to set max address (err_mask=0x%x)\n",
1296 err_mask);
1297 if (err_mask == AC_ERR_DEV &&
1298 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1299 return -EACCES;
1300 return -EIO;
1301 }
1302
1303 return 0;
1304}
1305
1306/**
1307 * ata_hpa_resize - Resize a device with an HPA set
1308 * @dev: Device to resize
1309 *
1310 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1311 * it if required to the full size of the media. The caller must check
1312 * the drive has the HPA feature set enabled.
1313 *
1314 * RETURNS:
1315 * 0 on success, -errno on failure.
1316 */
1317static int ata_hpa_resize(struct ata_device *dev)
1318{
1319 struct ata_eh_context *ehc = &dev->link->eh_context;
1320 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1321 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1322 u64 sectors = ata_id_n_sectors(dev->id);
1323 u64 native_sectors;
1324 int rc;
1325
1326 /* do we need to do it? */
1327 if (dev->class != ATA_DEV_ATA ||
1328 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1329 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1330 return 0;
1331
1332 /* read native max address */
1333 rc = ata_read_native_max_address(dev, &native_sectors);
1334 if (rc) {
1335 /* If device aborted the command or HPA isn't going to
1336 * be unlocked, skip HPA resizing.
1337 */
1338 if (rc == -EACCES || !unlock_hpa) {
1339 ata_dev_warn(dev,
1340 "HPA support seems broken, skipping HPA handling\n");
1341 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1342
1343 /* we can continue if device aborted the command */
1344 if (rc == -EACCES)
1345 rc = 0;
1346 }
1347
1348 return rc;
1349 }
1350 dev->n_native_sectors = native_sectors;
1351
1352 /* nothing to do? */
1353 if (native_sectors <= sectors || !unlock_hpa) {
1354 if (!print_info || native_sectors == sectors)
1355 return 0;
1356
1357 if (native_sectors > sectors)
1358 ata_dev_info(dev,
1359 "HPA detected: current %llu, native %llu\n",
1360 (unsigned long long)sectors,
1361 (unsigned long long)native_sectors);
1362 else if (native_sectors < sectors)
1363 ata_dev_warn(dev,
1364 "native sectors (%llu) is smaller than sectors (%llu)\n",
1365 (unsigned long long)native_sectors,
1366 (unsigned long long)sectors);
1367 return 0;
1368 }
1369
1370 /* let's unlock HPA */
1371 rc = ata_set_max_sectors(dev, native_sectors);
1372 if (rc == -EACCES) {
1373 /* if device aborted the command, skip HPA resizing */
1374 ata_dev_warn(dev,
1375 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1376 (unsigned long long)sectors,
1377 (unsigned long long)native_sectors);
1378 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1379 return 0;
1380 } else if (rc)
1381 return rc;
1382
1383 /* re-read IDENTIFY data */
1384 rc = ata_dev_reread_id(dev, 0);
1385 if (rc) {
1386 ata_dev_err(dev,
1387 "failed to re-read IDENTIFY data after HPA resizing\n");
1388 return rc;
1389 }
1390
1391 if (print_info) {
1392 u64 new_sectors = ata_id_n_sectors(dev->id);
1393 ata_dev_info(dev,
1394 "HPA unlocked: %llu -> %llu, native %llu\n",
1395 (unsigned long long)sectors,
1396 (unsigned long long)new_sectors,
1397 (unsigned long long)native_sectors);
1398 }
1399
1400 return 0;
1401}
1402
1403/**
1404 * ata_dump_id - IDENTIFY DEVICE info debugging output
1405 * @id: IDENTIFY DEVICE page to dump
1406 *
1407 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1408 * page.
1409 *
1410 * LOCKING:
1411 * caller.
1412 */
1413
1414static inline void ata_dump_id(const u16 *id)
1415{
1416 DPRINTK("49==0x%04x "
1417 "53==0x%04x "
1418 "63==0x%04x "
1419 "64==0x%04x "
1420 "75==0x%04x \n",
1421 id[49],
1422 id[53],
1423 id[63],
1424 id[64],
1425 id[75]);
1426 DPRINTK("80==0x%04x "
1427 "81==0x%04x "
1428 "82==0x%04x "
1429 "83==0x%04x "
1430 "84==0x%04x \n",
1431 id[80],
1432 id[81],
1433 id[82],
1434 id[83],
1435 id[84]);
1436 DPRINTK("88==0x%04x "
1437 "93==0x%04x\n",
1438 id[88],
1439 id[93]);
1440}
1441
1442/**
1443 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1444 * @id: IDENTIFY data to compute xfer mask from
1445 *
1446 * Compute the xfermask for this device. This is not as trivial
1447 * as it seems if we must consider early devices correctly.
1448 *
1449 * FIXME: pre IDE drive timing (do we care ?).
1450 *
1451 * LOCKING:
1452 * None.
1453 *
1454 * RETURNS:
1455 * Computed xfermask
1456 */
1457unsigned long ata_id_xfermask(const u16 *id)
1458{
1459 unsigned long pio_mask, mwdma_mask, udma_mask;
1460
1461 /* Usual case. Word 53 indicates word 64 is valid */
1462 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1463 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1464 pio_mask <<= 3;
1465 pio_mask |= 0x7;
1466 } else {
1467 /* If word 64 isn't valid then Word 51 high byte holds
1468 * the PIO timing number for the maximum. Turn it into
1469 * a mask.
1470 */
1471 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1472 if (mode < 5) /* Valid PIO range */
1473 pio_mask = (2 << mode) - 1;
1474 else
1475 pio_mask = 1;
1476
1477 /* But wait.. there's more. Design your standards by
1478 * committee and you too can get a free iordy field to
1479 * process. However its the speeds not the modes that
1480 * are supported... Note drivers using the timing API
1481 * will get this right anyway
1482 */
1483 }
1484
1485 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1486
1487 if (ata_id_is_cfa(id)) {
1488 /*
1489 * Process compact flash extended modes
1490 */
1491 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1492 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1493
1494 if (pio)
1495 pio_mask |= (1 << 5);
1496 if (pio > 1)
1497 pio_mask |= (1 << 6);
1498 if (dma)
1499 mwdma_mask |= (1 << 3);
1500 if (dma > 1)
1501 mwdma_mask |= (1 << 4);
1502 }
1503
1504 udma_mask = 0;
1505 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1506 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1507
1508 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1509}
1510
1511static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1512{
1513 struct completion *waiting = qc->private_data;
1514
1515 complete(waiting);
1516}
1517
1518/**
1519 * ata_exec_internal_sg - execute libata internal command
1520 * @dev: Device to which the command is sent
1521 * @tf: Taskfile registers for the command and the result
1522 * @cdb: CDB for packet command
1523 * @dma_dir: Data tranfer direction of the command
1524 * @sgl: sg list for the data buffer of the command
1525 * @n_elem: Number of sg entries
1526 * @timeout: Timeout in msecs (0 for default)
1527 *
1528 * Executes libata internal command with timeout. @tf contains
1529 * command on entry and result on return. Timeout and error
1530 * conditions are reported via return value. No recovery action
1531 * is taken after a command times out. It's caller's duty to
1532 * clean up after timeout.
1533 *
1534 * LOCKING:
1535 * None. Should be called with kernel context, might sleep.
1536 *
1537 * RETURNS:
1538 * Zero on success, AC_ERR_* mask on failure
1539 */
1540unsigned ata_exec_internal_sg(struct ata_device *dev,
1541 struct ata_taskfile *tf, const u8 *cdb,
1542 int dma_dir, struct scatterlist *sgl,
1543 unsigned int n_elem, unsigned long timeout)
1544{
1545 struct ata_link *link = dev->link;
1546 struct ata_port *ap = link->ap;
1547 u8 command = tf->command;
1548 int auto_timeout = 0;
1549 struct ata_queued_cmd *qc;
1550 unsigned int tag, preempted_tag;
1551 u32 preempted_sactive, preempted_qc_active;
1552 int preempted_nr_active_links;
1553 DECLARE_COMPLETION_ONSTACK(wait);
1554 unsigned long flags;
1555 unsigned int err_mask;
1556 int rc;
1557
1558 spin_lock_irqsave(ap->lock, flags);
1559
1560 /* no internal command while frozen */
1561 if (ap->pflags & ATA_PFLAG_FROZEN) {
1562 spin_unlock_irqrestore(ap->lock, flags);
1563 return AC_ERR_SYSTEM;
1564 }
1565
1566 /* initialize internal qc */
1567
1568 /* XXX: Tag 0 is used for drivers with legacy EH as some
1569 * drivers choke if any other tag is given. This breaks
1570 * ata_tag_internal() test for those drivers. Don't use new
1571 * EH stuff without converting to it.
1572 */
1573 if (ap->ops->error_handler)
1574 tag = ATA_TAG_INTERNAL;
1575 else
1576 tag = 0;
1577
1578 if (test_and_set_bit(tag, &ap->qc_allocated))
1579 BUG();
1580 qc = __ata_qc_from_tag(ap, tag);
1581
1582 qc->tag = tag;
1583 qc->scsicmd = NULL;
1584 qc->ap = ap;
1585 qc->dev = dev;
1586 ata_qc_reinit(qc);
1587
1588 preempted_tag = link->active_tag;
1589 preempted_sactive = link->sactive;
1590 preempted_qc_active = ap->qc_active;
1591 preempted_nr_active_links = ap->nr_active_links;
1592 link->active_tag = ATA_TAG_POISON;
1593 link->sactive = 0;
1594 ap->qc_active = 0;
1595 ap->nr_active_links = 0;
1596
1597 /* prepare & issue qc */
1598 qc->tf = *tf;
1599 if (cdb)
1600 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1601 qc->flags |= ATA_QCFLAG_RESULT_TF;
1602 qc->dma_dir = dma_dir;
1603 if (dma_dir != DMA_NONE) {
1604 unsigned int i, buflen = 0;
1605 struct scatterlist *sg;
1606
1607 for_each_sg(sgl, sg, n_elem, i)
1608 buflen += sg->length;
1609
1610 ata_sg_init(qc, sgl, n_elem);
1611 qc->nbytes = buflen;
1612 }
1613
1614 qc->private_data = &wait;
1615 qc->complete_fn = ata_qc_complete_internal;
1616
1617 ata_qc_issue(qc);
1618
1619 spin_unlock_irqrestore(ap->lock, flags);
1620
1621 if (!timeout) {
1622 if (ata_probe_timeout)
1623 timeout = ata_probe_timeout * 1000;
1624 else {
1625 timeout = ata_internal_cmd_timeout(dev, command);
1626 auto_timeout = 1;
1627 }
1628 }
1629
1630 if (ap->ops->error_handler)
1631 ata_eh_release(ap);
1632
1633 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1634
1635 if (ap->ops->error_handler)
1636 ata_eh_acquire(ap);
1637
1638 ata_sff_flush_pio_task(ap);
1639
1640 if (!rc) {
1641 spin_lock_irqsave(ap->lock, flags);
1642
1643 /* We're racing with irq here. If we lose, the
1644 * following test prevents us from completing the qc
1645 * twice. If we win, the port is frozen and will be
1646 * cleaned up by ->post_internal_cmd().
1647 */
1648 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1649 qc->err_mask |= AC_ERR_TIMEOUT;
1650
1651 if (ap->ops->error_handler)
1652 ata_port_freeze(ap);
1653 else
1654 ata_qc_complete(qc);
1655
1656 if (ata_msg_warn(ap))
1657 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1658 command);
1659 }
1660
1661 spin_unlock_irqrestore(ap->lock, flags);
1662 }
1663
1664 /* do post_internal_cmd */
1665 if (ap->ops->post_internal_cmd)
1666 ap->ops->post_internal_cmd(qc);
1667
1668 /* perform minimal error analysis */
1669 if (qc->flags & ATA_QCFLAG_FAILED) {
1670 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1671 qc->err_mask |= AC_ERR_DEV;
1672
1673 if (!qc->err_mask)
1674 qc->err_mask |= AC_ERR_OTHER;
1675
1676 if (qc->err_mask & ~AC_ERR_OTHER)
1677 qc->err_mask &= ~AC_ERR_OTHER;
1678 }
1679
1680 /* finish up */
1681 spin_lock_irqsave(ap->lock, flags);
1682
1683 *tf = qc->result_tf;
1684 err_mask = qc->err_mask;
1685
1686 ata_qc_free(qc);
1687 link->active_tag = preempted_tag;
1688 link->sactive = preempted_sactive;
1689 ap->qc_active = preempted_qc_active;
1690 ap->nr_active_links = preempted_nr_active_links;
1691
1692 spin_unlock_irqrestore(ap->lock, flags);
1693
1694 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1695 ata_internal_cmd_timed_out(dev, command);
1696
1697 return err_mask;
1698}
1699
1700/**
1701 * ata_exec_internal - execute libata internal command
1702 * @dev: Device to which the command is sent
1703 * @tf: Taskfile registers for the command and the result
1704 * @cdb: CDB for packet command
1705 * @dma_dir: Data tranfer direction of the command
1706 * @buf: Data buffer of the command
1707 * @buflen: Length of data buffer
1708 * @timeout: Timeout in msecs (0 for default)
1709 *
1710 * Wrapper around ata_exec_internal_sg() which takes simple
1711 * buffer instead of sg list.
1712 *
1713 * LOCKING:
1714 * None. Should be called with kernel context, might sleep.
1715 *
1716 * RETURNS:
1717 * Zero on success, AC_ERR_* mask on failure
1718 */
1719unsigned ata_exec_internal(struct ata_device *dev,
1720 struct ata_taskfile *tf, const u8 *cdb,
1721 int dma_dir, void *buf, unsigned int buflen,
1722 unsigned long timeout)
1723{
1724 struct scatterlist *psg = NULL, sg;
1725 unsigned int n_elem = 0;
1726
1727 if (dma_dir != DMA_NONE) {
1728 WARN_ON(!buf);
1729 sg_init_one(&sg, buf, buflen);
1730 psg = &sg;
1731 n_elem++;
1732 }
1733
1734 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1735 timeout);
1736}
1737
1738/**
1739 * ata_do_simple_cmd - execute simple internal command
1740 * @dev: Device to which the command is sent
1741 * @cmd: Opcode to execute
1742 *
1743 * Execute a 'simple' command, that only consists of the opcode
1744 * 'cmd' itself, without filling any other registers
1745 *
1746 * LOCKING:
1747 * Kernel thread context (may sleep).
1748 *
1749 * RETURNS:
1750 * Zero on success, AC_ERR_* mask on failure
1751 */
1752unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1753{
1754 struct ata_taskfile tf;
1755
1756 ata_tf_init(dev, &tf);
1757
1758 tf.command = cmd;
1759 tf.flags |= ATA_TFLAG_DEVICE;
1760 tf.protocol = ATA_PROT_NODATA;
1761
1762 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1763}
1764
1765/**
1766 * ata_pio_need_iordy - check if iordy needed
1767 * @adev: ATA device
1768 *
1769 * Check if the current speed of the device requires IORDY. Used
1770 * by various controllers for chip configuration.
1771 */
1772unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1773{
1774 /* Don't set IORDY if we're preparing for reset. IORDY may
1775 * lead to controller lock up on certain controllers if the
1776 * port is not occupied. See bko#11703 for details.
1777 */
1778 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1779 return 0;
1780 /* Controller doesn't support IORDY. Probably a pointless
1781 * check as the caller should know this.
1782 */
1783 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1784 return 0;
1785 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1786 if (ata_id_is_cfa(adev->id)
1787 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1788 return 0;
1789 /* PIO3 and higher it is mandatory */
1790 if (adev->pio_mode > XFER_PIO_2)
1791 return 1;
1792 /* We turn it on when possible */
1793 if (ata_id_has_iordy(adev->id))
1794 return 1;
1795 return 0;
1796}
1797
1798/**
1799 * ata_pio_mask_no_iordy - Return the non IORDY mask
1800 * @adev: ATA device
1801 *
1802 * Compute the highest mode possible if we are not using iordy. Return
1803 * -1 if no iordy mode is available.
1804 */
1805static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1806{
1807 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1808 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1809 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1810 /* Is the speed faster than the drive allows non IORDY ? */
1811 if (pio) {
1812 /* This is cycle times not frequency - watch the logic! */
1813 if (pio > 240) /* PIO2 is 240nS per cycle */
1814 return 3 << ATA_SHIFT_PIO;
1815 return 7 << ATA_SHIFT_PIO;
1816 }
1817 }
1818 return 3 << ATA_SHIFT_PIO;
1819}
1820
1821/**
1822 * ata_do_dev_read_id - default ID read method
1823 * @dev: device
1824 * @tf: proposed taskfile
1825 * @id: data buffer
1826 *
1827 * Issue the identify taskfile and hand back the buffer containing
1828 * identify data. For some RAID controllers and for pre ATA devices
1829 * this function is wrapped or replaced by the driver
1830 */
1831unsigned int ata_do_dev_read_id(struct ata_device *dev,
1832 struct ata_taskfile *tf, u16 *id)
1833{
1834 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1835 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1836}
1837
1838/**
1839 * ata_dev_read_id - Read ID data from the specified device
1840 * @dev: target device
1841 * @p_class: pointer to class of the target device (may be changed)
1842 * @flags: ATA_READID_* flags
1843 * @id: buffer to read IDENTIFY data into
1844 *
1845 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1846 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1847 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1848 * for pre-ATA4 drives.
1849 *
1850 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1851 * now we abort if we hit that case.
1852 *
1853 * LOCKING:
1854 * Kernel thread context (may sleep)
1855 *
1856 * RETURNS:
1857 * 0 on success, -errno otherwise.
1858 */
1859int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1860 unsigned int flags, u16 *id)
1861{
1862 struct ata_port *ap = dev->link->ap;
1863 unsigned int class = *p_class;
1864 struct ata_taskfile tf;
1865 unsigned int err_mask = 0;
1866 const char *reason;
1867 bool is_semb = class == ATA_DEV_SEMB;
1868 int may_fallback = 1, tried_spinup = 0;
1869 int rc;
1870
1871 if (ata_msg_ctl(ap))
1872 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1873
1874retry:
1875 ata_tf_init(dev, &tf);
1876
1877 switch (class) {
1878 case ATA_DEV_SEMB:
1879 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1880 case ATA_DEV_ATA:
1881 tf.command = ATA_CMD_ID_ATA;
1882 break;
1883 case ATA_DEV_ATAPI:
1884 tf.command = ATA_CMD_ID_ATAPI;
1885 break;
1886 default:
1887 rc = -ENODEV;
1888 reason = "unsupported class";
1889 goto err_out;
1890 }
1891
1892 tf.protocol = ATA_PROT_PIO;
1893
1894 /* Some devices choke if TF registers contain garbage. Make
1895 * sure those are properly initialized.
1896 */
1897 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1898
1899 /* Device presence detection is unreliable on some
1900 * controllers. Always poll IDENTIFY if available.
1901 */
1902 tf.flags |= ATA_TFLAG_POLLING;
1903
1904 if (ap->ops->read_id)
1905 err_mask = ap->ops->read_id(dev, &tf, id);
1906 else
1907 err_mask = ata_do_dev_read_id(dev, &tf, id);
1908
1909 if (err_mask) {
1910 if (err_mask & AC_ERR_NODEV_HINT) {
1911 ata_dev_dbg(dev, "NODEV after polling detection\n");
1912 return -ENOENT;
1913 }
1914
1915 if (is_semb) {
1916 ata_dev_info(dev,
1917 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1918 /* SEMB is not supported yet */
1919 *p_class = ATA_DEV_SEMB_UNSUP;
1920 return 0;
1921 }
1922
1923 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1924 /* Device or controller might have reported
1925 * the wrong device class. Give a shot at the
1926 * other IDENTIFY if the current one is
1927 * aborted by the device.
1928 */
1929 if (may_fallback) {
1930 may_fallback = 0;
1931
1932 if (class == ATA_DEV_ATA)
1933 class = ATA_DEV_ATAPI;
1934 else
1935 class = ATA_DEV_ATA;
1936 goto retry;
1937 }
1938
1939 /* Control reaches here iff the device aborted
1940 * both flavors of IDENTIFYs which happens
1941 * sometimes with phantom devices.
1942 */
1943 ata_dev_dbg(dev,
1944 "both IDENTIFYs aborted, assuming NODEV\n");
1945 return -ENOENT;
1946 }
1947
1948 rc = -EIO;
1949 reason = "I/O error";
1950 goto err_out;
1951 }
1952
1953 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1954 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1955 "class=%d may_fallback=%d tried_spinup=%d\n",
1956 class, may_fallback, tried_spinup);
1957 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1958 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1959 }
1960
1961 /* Falling back doesn't make sense if ID data was read
1962 * successfully at least once.
1963 */
1964 may_fallback = 0;
1965
1966 swap_buf_le16(id, ATA_ID_WORDS);
1967
1968 /* sanity check */
1969 rc = -EINVAL;
1970 reason = "device reports invalid type";
1971
1972 if (class == ATA_DEV_ATA) {
1973 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1974 goto err_out;
1975 } else {
1976 if (ata_id_is_ata(id))
1977 goto err_out;
1978 }
1979
1980 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1981 tried_spinup = 1;
1982 /*
1983 * Drive powered-up in standby mode, and requires a specific
1984 * SET_FEATURES spin-up subcommand before it will accept
1985 * anything other than the original IDENTIFY command.
1986 */
1987 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1988 if (err_mask && id[2] != 0x738c) {
1989 rc = -EIO;
1990 reason = "SPINUP failed";
1991 goto err_out;
1992 }
1993 /*
1994 * If the drive initially returned incomplete IDENTIFY info,
1995 * we now must reissue the IDENTIFY command.
1996 */
1997 if (id[2] == 0x37c8)
1998 goto retry;
1999 }
2000
2001 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2002 /*
2003 * The exact sequence expected by certain pre-ATA4 drives is:
2004 * SRST RESET
2005 * IDENTIFY (optional in early ATA)
2006 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2007 * anything else..
2008 * Some drives were very specific about that exact sequence.
2009 *
2010 * Note that ATA4 says lba is mandatory so the second check
2011 * should never trigger.
2012 */
2013 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2014 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2015 if (err_mask) {
2016 rc = -EIO;
2017 reason = "INIT_DEV_PARAMS failed";
2018 goto err_out;
2019 }
2020
2021 /* current CHS translation info (id[53-58]) might be
2022 * changed. reread the identify device info.
2023 */
2024 flags &= ~ATA_READID_POSTRESET;
2025 goto retry;
2026 }
2027 }
2028
2029 *p_class = class;
2030
2031 return 0;
2032
2033 err_out:
2034 if (ata_msg_warn(ap))
2035 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2036 reason, err_mask);
2037 return rc;
2038}
2039
2040static int ata_do_link_spd_horkage(struct ata_device *dev)
2041{
2042 struct ata_link *plink = ata_dev_phys_link(dev);
2043 u32 target, target_limit;
2044
2045 if (!sata_scr_valid(plink))
2046 return 0;
2047
2048 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2049 target = 1;
2050 else
2051 return 0;
2052
2053 target_limit = (1 << target) - 1;
2054
2055 /* if already on stricter limit, no need to push further */
2056 if (plink->sata_spd_limit <= target_limit)
2057 return 0;
2058
2059 plink->sata_spd_limit = target_limit;
2060
2061 /* Request another EH round by returning -EAGAIN if link is
2062 * going faster than the target speed. Forward progress is
2063 * guaranteed by setting sata_spd_limit to target_limit above.
2064 */
2065 if (plink->sata_spd > target) {
2066 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2067 sata_spd_string(target));
2068 return -EAGAIN;
2069 }
2070 return 0;
2071}
2072
2073static inline u8 ata_dev_knobble(struct ata_device *dev)
2074{
2075 struct ata_port *ap = dev->link->ap;
2076
2077 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2078 return 0;
2079
2080 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2081}
2082
2083static int ata_dev_config_ncq(struct ata_device *dev,
2084 char *desc, size_t desc_sz)
2085{
2086 struct ata_port *ap = dev->link->ap;
2087 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2088 unsigned int err_mask;
2089 char *aa_desc = "";
2090
2091 if (!ata_id_has_ncq(dev->id)) {
2092 desc[0] = '\0';
2093 return 0;
2094 }
2095 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2096 snprintf(desc, desc_sz, "NCQ (not used)");
2097 return 0;
2098 }
2099 if (ap->flags & ATA_FLAG_NCQ) {
2100 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2101 dev->flags |= ATA_DFLAG_NCQ;
2102 }
2103
2104 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2105 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2106 ata_id_has_fpdma_aa(dev->id)) {
2107 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2108 SATA_FPDMA_AA);
2109 if (err_mask) {
2110 ata_dev_err(dev,
2111 "failed to enable AA (error_mask=0x%x)\n",
2112 err_mask);
2113 if (err_mask != AC_ERR_DEV) {
2114 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2115 return -EIO;
2116 }
2117 } else
2118 aa_desc = ", AA";
2119 }
2120
2121 if (hdepth >= ddepth)
2122 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2123 else
2124 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2125 ddepth, aa_desc);
2126 return 0;
2127}
2128
2129/**
2130 * ata_dev_configure - Configure the specified ATA/ATAPI device
2131 * @dev: Target device to configure
2132 *
2133 * Configure @dev according to @dev->id. Generic and low-level
2134 * driver specific fixups are also applied.
2135 *
2136 * LOCKING:
2137 * Kernel thread context (may sleep)
2138 *
2139 * RETURNS:
2140 * 0 on success, -errno otherwise
2141 */
2142int ata_dev_configure(struct ata_device *dev)
2143{
2144 struct ata_port *ap = dev->link->ap;
2145 struct ata_eh_context *ehc = &dev->link->eh_context;
2146 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2147 const u16 *id = dev->id;
2148 unsigned long xfer_mask;
2149 char revbuf[7]; /* XYZ-99\0 */
2150 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2151 char modelbuf[ATA_ID_PROD_LEN+1];
2152 int rc;
2153
2154 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2155 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2156 return 0;
2157 }
2158
2159 if (ata_msg_probe(ap))
2160 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2161
2162 /* set horkage */
2163 dev->horkage |= ata_dev_blacklisted(dev);
2164 ata_force_horkage(dev);
2165
2166 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2167 ata_dev_info(dev, "unsupported device, disabling\n");
2168 ata_dev_disable(dev);
2169 return 0;
2170 }
2171
2172 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2173 dev->class == ATA_DEV_ATAPI) {
2174 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2175 atapi_enabled ? "not supported with this driver"
2176 : "disabled");
2177 ata_dev_disable(dev);
2178 return 0;
2179 }
2180
2181 rc = ata_do_link_spd_horkage(dev);
2182 if (rc)
2183 return rc;
2184
2185 /* let ACPI work its magic */
2186 rc = ata_acpi_on_devcfg(dev);
2187 if (rc)
2188 return rc;
2189
2190 /* massage HPA, do it early as it might change IDENTIFY data */
2191 rc = ata_hpa_resize(dev);
2192 if (rc)
2193 return rc;
2194
2195 /* print device capabilities */
2196 if (ata_msg_probe(ap))
2197 ata_dev_dbg(dev,
2198 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2199 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2200 __func__,
2201 id[49], id[82], id[83], id[84],
2202 id[85], id[86], id[87], id[88]);
2203
2204 /* initialize to-be-configured parameters */
2205 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2206 dev->max_sectors = 0;
2207 dev->cdb_len = 0;
2208 dev->n_sectors = 0;
2209 dev->cylinders = 0;
2210 dev->heads = 0;
2211 dev->sectors = 0;
2212 dev->multi_count = 0;
2213
2214 /*
2215 * common ATA, ATAPI feature tests
2216 */
2217
2218 /* find max transfer mode; for printk only */
2219 xfer_mask = ata_id_xfermask(id);
2220
2221 if (ata_msg_probe(ap))
2222 ata_dump_id(id);
2223
2224 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2225 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2226 sizeof(fwrevbuf));
2227
2228 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2229 sizeof(modelbuf));
2230
2231 /* ATA-specific feature tests */
2232 if (dev->class == ATA_DEV_ATA) {
2233 if (ata_id_is_cfa(id)) {
2234 /* CPRM may make this media unusable */
2235 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2236 ata_dev_warn(dev,
2237 "supports DRM functions and may not be fully accessible\n");
2238 snprintf(revbuf, 7, "CFA");
2239 } else {
2240 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2241 /* Warn the user if the device has TPM extensions */
2242 if (ata_id_has_tpm(id))
2243 ata_dev_warn(dev,
2244 "supports DRM functions and may not be fully accessible\n");
2245 }
2246
2247 dev->n_sectors = ata_id_n_sectors(id);
2248
2249 /* get current R/W Multiple count setting */
2250 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2251 unsigned int max = dev->id[47] & 0xff;
2252 unsigned int cnt = dev->id[59] & 0xff;
2253 /* only recognize/allow powers of two here */
2254 if (is_power_of_2(max) && is_power_of_2(cnt))
2255 if (cnt <= max)
2256 dev->multi_count = cnt;
2257 }
2258
2259 if (ata_id_has_lba(id)) {
2260 const char *lba_desc;
2261 char ncq_desc[24];
2262
2263 lba_desc = "LBA";
2264 dev->flags |= ATA_DFLAG_LBA;
2265 if (ata_id_has_lba48(id)) {
2266 dev->flags |= ATA_DFLAG_LBA48;
2267 lba_desc = "LBA48";
2268
2269 if (dev->n_sectors >= (1UL << 28) &&
2270 ata_id_has_flush_ext(id))
2271 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2272 }
2273
2274 /* config NCQ */
2275 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2276 if (rc)
2277 return rc;
2278
2279 /* print device info to dmesg */
2280 if (ata_msg_drv(ap) && print_info) {
2281 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2282 revbuf, modelbuf, fwrevbuf,
2283 ata_mode_string(xfer_mask));
2284 ata_dev_info(dev,
2285 "%llu sectors, multi %u: %s %s\n",
2286 (unsigned long long)dev->n_sectors,
2287 dev->multi_count, lba_desc, ncq_desc);
2288 }
2289 } else {
2290 /* CHS */
2291
2292 /* Default translation */
2293 dev->cylinders = id[1];
2294 dev->heads = id[3];
2295 dev->sectors = id[6];
2296
2297 if (ata_id_current_chs_valid(id)) {
2298 /* Current CHS translation is valid. */
2299 dev->cylinders = id[54];
2300 dev->heads = id[55];
2301 dev->sectors = id[56];
2302 }
2303
2304 /* print device info to dmesg */
2305 if (ata_msg_drv(ap) && print_info) {
2306 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2307 revbuf, modelbuf, fwrevbuf,
2308 ata_mode_string(xfer_mask));
2309 ata_dev_info(dev,
2310 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2311 (unsigned long long)dev->n_sectors,
2312 dev->multi_count, dev->cylinders,
2313 dev->heads, dev->sectors);
2314 }
2315 }
2316
2317 dev->cdb_len = 16;
2318 }
2319
2320 /* ATAPI-specific feature tests */
2321 else if (dev->class == ATA_DEV_ATAPI) {
2322 const char *cdb_intr_string = "";
2323 const char *atapi_an_string = "";
2324 const char *dma_dir_string = "";
2325 u32 sntf;
2326
2327 rc = atapi_cdb_len(id);
2328 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2329 if (ata_msg_warn(ap))
2330 ata_dev_warn(dev, "unsupported CDB len\n");
2331 rc = -EINVAL;
2332 goto err_out_nosup;
2333 }
2334 dev->cdb_len = (unsigned int) rc;
2335
2336 /* Enable ATAPI AN if both the host and device have
2337 * the support. If PMP is attached, SNTF is required
2338 * to enable ATAPI AN to discern between PHY status
2339 * changed notifications and ATAPI ANs.
2340 */
2341 if (atapi_an &&
2342 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2343 (!sata_pmp_attached(ap) ||
2344 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2345 unsigned int err_mask;
2346
2347 /* issue SET feature command to turn this on */
2348 err_mask = ata_dev_set_feature(dev,
2349 SETFEATURES_SATA_ENABLE, SATA_AN);
2350 if (err_mask)
2351 ata_dev_err(dev,
2352 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2353 err_mask);
2354 else {
2355 dev->flags |= ATA_DFLAG_AN;
2356 atapi_an_string = ", ATAPI AN";
2357 }
2358 }
2359
2360 if (ata_id_cdb_intr(dev->id)) {
2361 dev->flags |= ATA_DFLAG_CDB_INTR;
2362 cdb_intr_string = ", CDB intr";
2363 }
2364
2365 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2366 dev->flags |= ATA_DFLAG_DMADIR;
2367 dma_dir_string = ", DMADIR";
2368 }
2369
2370 /* print device info to dmesg */
2371 if (ata_msg_drv(ap) && print_info)
2372 ata_dev_info(dev,
2373 "ATAPI: %s, %s, max %s%s%s%s\n",
2374 modelbuf, fwrevbuf,
2375 ata_mode_string(xfer_mask),
2376 cdb_intr_string, atapi_an_string,
2377 dma_dir_string);
2378 }
2379
2380 /* determine max_sectors */
2381 dev->max_sectors = ATA_MAX_SECTORS;
2382 if (dev->flags & ATA_DFLAG_LBA48)
2383 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2384
2385 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2386 200 sectors */
2387 if (ata_dev_knobble(dev)) {
2388 if (ata_msg_drv(ap) && print_info)
2389 ata_dev_info(dev, "applying bridge limits\n");
2390 dev->udma_mask &= ATA_UDMA5;
2391 dev->max_sectors = ATA_MAX_SECTORS;
2392 }
2393
2394 if ((dev->class == ATA_DEV_ATAPI) &&
2395 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2396 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2397 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2398 }
2399
2400 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2401 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2402 dev->max_sectors);
2403
2404 if (ap->ops->dev_config)
2405 ap->ops->dev_config(dev);
2406
2407 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2408 /* Let the user know. We don't want to disallow opens for
2409 rescue purposes, or in case the vendor is just a blithering
2410 idiot. Do this after the dev_config call as some controllers
2411 with buggy firmware may want to avoid reporting false device
2412 bugs */
2413
2414 if (print_info) {
2415 ata_dev_warn(dev,
2416"Drive reports diagnostics failure. This may indicate a drive\n");
2417 ata_dev_warn(dev,
2418"fault or invalid emulation. Contact drive vendor for information.\n");
2419 }
2420 }
2421
2422 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2423 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2424 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2425 }
2426
2427 return 0;
2428
2429err_out_nosup:
2430 if (ata_msg_probe(ap))
2431 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2432 return rc;
2433}
2434
2435/**
2436 * ata_cable_40wire - return 40 wire cable type
2437 * @ap: port
2438 *
2439 * Helper method for drivers which want to hardwire 40 wire cable
2440 * detection.
2441 */
2442
2443int ata_cable_40wire(struct ata_port *ap)
2444{
2445 return ATA_CBL_PATA40;
2446}
2447
2448/**
2449 * ata_cable_80wire - return 80 wire cable type
2450 * @ap: port
2451 *
2452 * Helper method for drivers which want to hardwire 80 wire cable
2453 * detection.
2454 */
2455
2456int ata_cable_80wire(struct ata_port *ap)
2457{
2458 return ATA_CBL_PATA80;
2459}
2460
2461/**
2462 * ata_cable_unknown - return unknown PATA cable.
2463 * @ap: port
2464 *
2465 * Helper method for drivers which have no PATA cable detection.
2466 */
2467
2468int ata_cable_unknown(struct ata_port *ap)
2469{
2470 return ATA_CBL_PATA_UNK;
2471}
2472
2473/**
2474 * ata_cable_ignore - return ignored PATA cable.
2475 * @ap: port
2476 *
2477 * Helper method for drivers which don't use cable type to limit
2478 * transfer mode.
2479 */
2480int ata_cable_ignore(struct ata_port *ap)
2481{
2482 return ATA_CBL_PATA_IGN;
2483}
2484
2485/**
2486 * ata_cable_sata - return SATA cable type
2487 * @ap: port
2488 *
2489 * Helper method for drivers which have SATA cables
2490 */
2491
2492int ata_cable_sata(struct ata_port *ap)
2493{
2494 return ATA_CBL_SATA;
2495}
2496
2497/**
2498 * ata_bus_probe - Reset and probe ATA bus
2499 * @ap: Bus to probe
2500 *
2501 * Master ATA bus probing function. Initiates a hardware-dependent
2502 * bus reset, then attempts to identify any devices found on
2503 * the bus.
2504 *
2505 * LOCKING:
2506 * PCI/etc. bus probe sem.
2507 *
2508 * RETURNS:
2509 * Zero on success, negative errno otherwise.
2510 */
2511
2512int ata_bus_probe(struct ata_port *ap)
2513{
2514 unsigned int classes[ATA_MAX_DEVICES];
2515 int tries[ATA_MAX_DEVICES];
2516 int rc;
2517 struct ata_device *dev;
2518
2519 ata_for_each_dev(dev, &ap->link, ALL)
2520 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2521
2522 retry:
2523 ata_for_each_dev(dev, &ap->link, ALL) {
2524 /* If we issue an SRST then an ATA drive (not ATAPI)
2525 * may change configuration and be in PIO0 timing. If
2526 * we do a hard reset (or are coming from power on)
2527 * this is true for ATA or ATAPI. Until we've set a
2528 * suitable controller mode we should not touch the
2529 * bus as we may be talking too fast.
2530 */
2531 dev->pio_mode = XFER_PIO_0;
2532
2533 /* If the controller has a pio mode setup function
2534 * then use it to set the chipset to rights. Don't
2535 * touch the DMA setup as that will be dealt with when
2536 * configuring devices.
2537 */
2538 if (ap->ops->set_piomode)
2539 ap->ops->set_piomode(ap, dev);
2540 }
2541
2542 /* reset and determine device classes */
2543 ap->ops->phy_reset(ap);
2544
2545 ata_for_each_dev(dev, &ap->link, ALL) {
2546 if (dev->class != ATA_DEV_UNKNOWN)
2547 classes[dev->devno] = dev->class;
2548 else
2549 classes[dev->devno] = ATA_DEV_NONE;
2550
2551 dev->class = ATA_DEV_UNKNOWN;
2552 }
2553
2554 /* read IDENTIFY page and configure devices. We have to do the identify
2555 specific sequence bass-ackwards so that PDIAG- is released by
2556 the slave device */
2557
2558 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2559 if (tries[dev->devno])
2560 dev->class = classes[dev->devno];
2561
2562 if (!ata_dev_enabled(dev))
2563 continue;
2564
2565 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2566 dev->id);
2567 if (rc)
2568 goto fail;
2569 }
2570
2571 /* Now ask for the cable type as PDIAG- should have been released */
2572 if (ap->ops->cable_detect)
2573 ap->cbl = ap->ops->cable_detect(ap);
2574
2575 /* We may have SATA bridge glue hiding here irrespective of
2576 * the reported cable types and sensed types. When SATA
2577 * drives indicate we have a bridge, we don't know which end
2578 * of the link the bridge is which is a problem.
2579 */
2580 ata_for_each_dev(dev, &ap->link, ENABLED)
2581 if (ata_id_is_sata(dev->id))
2582 ap->cbl = ATA_CBL_SATA;
2583
2584 /* After the identify sequence we can now set up the devices. We do
2585 this in the normal order so that the user doesn't get confused */
2586
2587 ata_for_each_dev(dev, &ap->link, ENABLED) {
2588 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2589 rc = ata_dev_configure(dev);
2590 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2591 if (rc)
2592 goto fail;
2593 }
2594
2595 /* configure transfer mode */
2596 rc = ata_set_mode(&ap->link, &dev);
2597 if (rc)
2598 goto fail;
2599
2600 ata_for_each_dev(dev, &ap->link, ENABLED)
2601 return 0;
2602
2603 return -ENODEV;
2604
2605 fail:
2606 tries[dev->devno]--;
2607
2608 switch (rc) {
2609 case -EINVAL:
2610 /* eeek, something went very wrong, give up */
2611 tries[dev->devno] = 0;
2612 break;
2613
2614 case -ENODEV:
2615 /* give it just one more chance */
2616 tries[dev->devno] = min(tries[dev->devno], 1);
2617 case -EIO:
2618 if (tries[dev->devno] == 1) {
2619 /* This is the last chance, better to slow
2620 * down than lose it.
2621 */
2622 sata_down_spd_limit(&ap->link, 0);
2623 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2624 }
2625 }
2626
2627 if (!tries[dev->devno])
2628 ata_dev_disable(dev);
2629
2630 goto retry;
2631}
2632
2633/**
2634 * sata_print_link_status - Print SATA link status
2635 * @link: SATA link to printk link status about
2636 *
2637 * This function prints link speed and status of a SATA link.
2638 *
2639 * LOCKING:
2640 * None.
2641 */
2642static void sata_print_link_status(struct ata_link *link)
2643{
2644 u32 sstatus, scontrol, tmp;
2645
2646 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2647 return;
2648 sata_scr_read(link, SCR_CONTROL, &scontrol);
2649
2650 if (ata_phys_link_online(link)) {
2651 tmp = (sstatus >> 4) & 0xf;
2652 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2653 sata_spd_string(tmp), sstatus, scontrol);
2654 } else {
2655 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2656 sstatus, scontrol);
2657 }
2658}
2659
2660/**
2661 * ata_dev_pair - return other device on cable
2662 * @adev: device
2663 *
2664 * Obtain the other device on the same cable, or if none is
2665 * present NULL is returned
2666 */
2667
2668struct ata_device *ata_dev_pair(struct ata_device *adev)
2669{
2670 struct ata_link *link = adev->link;
2671 struct ata_device *pair = &link->device[1 - adev->devno];
2672 if (!ata_dev_enabled(pair))
2673 return NULL;
2674 return pair;
2675}
2676
2677/**
2678 * sata_down_spd_limit - adjust SATA spd limit downward
2679 * @link: Link to adjust SATA spd limit for
2680 * @spd_limit: Additional limit
2681 *
2682 * Adjust SATA spd limit of @link downward. Note that this
2683 * function only adjusts the limit. The change must be applied
2684 * using sata_set_spd().
2685 *
2686 * If @spd_limit is non-zero, the speed is limited to equal to or
2687 * lower than @spd_limit if such speed is supported. If
2688 * @spd_limit is slower than any supported speed, only the lowest
2689 * supported speed is allowed.
2690 *
2691 * LOCKING:
2692 * Inherited from caller.
2693 *
2694 * RETURNS:
2695 * 0 on success, negative errno on failure
2696 */
2697int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2698{
2699 u32 sstatus, spd, mask;
2700 int rc, bit;
2701
2702 if (!sata_scr_valid(link))
2703 return -EOPNOTSUPP;
2704
2705 /* If SCR can be read, use it to determine the current SPD.
2706 * If not, use cached value in link->sata_spd.
2707 */
2708 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2709 if (rc == 0 && ata_sstatus_online(sstatus))
2710 spd = (sstatus >> 4) & 0xf;
2711 else
2712 spd = link->sata_spd;
2713
2714 mask = link->sata_spd_limit;
2715 if (mask <= 1)
2716 return -EINVAL;
2717
2718 /* unconditionally mask off the highest bit */
2719 bit = fls(mask) - 1;
2720 mask &= ~(1 << bit);
2721
2722 /* Mask off all speeds higher than or equal to the current
2723 * one. Force 1.5Gbps if current SPD is not available.
2724 */
2725 if (spd > 1)
2726 mask &= (1 << (spd - 1)) - 1;
2727 else
2728 mask &= 1;
2729
2730 /* were we already at the bottom? */
2731 if (!mask)
2732 return -EINVAL;
2733
2734 if (spd_limit) {
2735 if (mask & ((1 << spd_limit) - 1))
2736 mask &= (1 << spd_limit) - 1;
2737 else {
2738 bit = ffs(mask) - 1;
2739 mask = 1 << bit;
2740 }
2741 }
2742
2743 link->sata_spd_limit = mask;
2744
2745 ata_link_warn(link, "limiting SATA link speed to %s\n",
2746 sata_spd_string(fls(mask)));
2747
2748 return 0;
2749}
2750
2751static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2752{
2753 struct ata_link *host_link = &link->ap->link;
2754 u32 limit, target, spd;
2755
2756 limit = link->sata_spd_limit;
2757
2758 /* Don't configure downstream link faster than upstream link.
2759 * It doesn't speed up anything and some PMPs choke on such
2760 * configuration.
2761 */
2762 if (!ata_is_host_link(link) && host_link->sata_spd)
2763 limit &= (1 << host_link->sata_spd) - 1;
2764
2765 if (limit == UINT_MAX)
2766 target = 0;
2767 else
2768 target = fls(limit);
2769
2770 spd = (*scontrol >> 4) & 0xf;
2771 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2772
2773 return spd != target;
2774}
2775
2776/**
2777 * sata_set_spd_needed - is SATA spd configuration needed
2778 * @link: Link in question
2779 *
2780 * Test whether the spd limit in SControl matches
2781 * @link->sata_spd_limit. This function is used to determine
2782 * whether hardreset is necessary to apply SATA spd
2783 * configuration.
2784 *
2785 * LOCKING:
2786 * Inherited from caller.
2787 *
2788 * RETURNS:
2789 * 1 if SATA spd configuration is needed, 0 otherwise.
2790 */
2791static int sata_set_spd_needed(struct ata_link *link)
2792{
2793 u32 scontrol;
2794
2795 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2796 return 1;
2797
2798 return __sata_set_spd_needed(link, &scontrol);
2799}
2800
2801/**
2802 * sata_set_spd - set SATA spd according to spd limit
2803 * @link: Link to set SATA spd for
2804 *
2805 * Set SATA spd of @link according to sata_spd_limit.
2806 *
2807 * LOCKING:
2808 * Inherited from caller.
2809 *
2810 * RETURNS:
2811 * 0 if spd doesn't need to be changed, 1 if spd has been
2812 * changed. Negative errno if SCR registers are inaccessible.
2813 */
2814int sata_set_spd(struct ata_link *link)
2815{
2816 u32 scontrol;
2817 int rc;
2818
2819 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2820 return rc;
2821
2822 if (!__sata_set_spd_needed(link, &scontrol))
2823 return 0;
2824
2825 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2826 return rc;
2827
2828 return 1;
2829}
2830
2831/*
2832 * This mode timing computation functionality is ported over from
2833 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2834 */
2835/*
2836 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2837 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2838 * for UDMA6, which is currently supported only by Maxtor drives.
2839 *
2840 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2841 */
2842
2843static const struct ata_timing ata_timing[] = {
2844/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
2845 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2846 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2847 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2848 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2849 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2850 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2851 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2852
2853 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2854 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2855 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2856
2857 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2858 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2859 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2860 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2861 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2862
2863/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2864 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2865 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2866 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2867 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2868 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2869 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2870 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2871
2872 { 0xFF }
2873};
2874
2875#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2876#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2877
2878static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2879{
2880 q->setup = EZ(t->setup * 1000, T);
2881 q->act8b = EZ(t->act8b * 1000, T);
2882 q->rec8b = EZ(t->rec8b * 1000, T);
2883 q->cyc8b = EZ(t->cyc8b * 1000, T);
2884 q->active = EZ(t->active * 1000, T);
2885 q->recover = EZ(t->recover * 1000, T);
2886 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2887 q->cycle = EZ(t->cycle * 1000, T);
2888 q->udma = EZ(t->udma * 1000, UT);
2889}
2890
2891void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2892 struct ata_timing *m, unsigned int what)
2893{
2894 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2895 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2896 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2897 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2898 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2899 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2900 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2901 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2902 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2903}
2904
2905const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2906{
2907 const struct ata_timing *t = ata_timing;
2908
2909 while (xfer_mode > t->mode)
2910 t++;
2911
2912 if (xfer_mode == t->mode)
2913 return t;
2914 return NULL;
2915}
2916
2917int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2918 struct ata_timing *t, int T, int UT)
2919{
2920 const u16 *id = adev->id;
2921 const struct ata_timing *s;
2922 struct ata_timing p;
2923
2924 /*
2925 * Find the mode.
2926 */
2927
2928 if (!(s = ata_timing_find_mode(speed)))
2929 return -EINVAL;
2930
2931 memcpy(t, s, sizeof(*s));
2932
2933 /*
2934 * If the drive is an EIDE drive, it can tell us it needs extended
2935 * PIO/MW_DMA cycle timing.
2936 */
2937
2938 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2939 memset(&p, 0, sizeof(p));
2940
2941 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2942 if (speed <= XFER_PIO_2)
2943 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2944 else if ((speed <= XFER_PIO_4) ||
2945 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2946 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2947 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2948 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2949
2950 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2951 }
2952
2953 /*
2954 * Convert the timing to bus clock counts.
2955 */
2956
2957 ata_timing_quantize(t, t, T, UT);
2958
2959 /*
2960 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2961 * S.M.A.R.T * and some other commands. We have to ensure that the
2962 * DMA cycle timing is slower/equal than the fastest PIO timing.
2963 */
2964
2965 if (speed > XFER_PIO_6) {
2966 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2967 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2968 }
2969
2970 /*
2971 * Lengthen active & recovery time so that cycle time is correct.
2972 */
2973
2974 if (t->act8b + t->rec8b < t->cyc8b) {
2975 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2976 t->rec8b = t->cyc8b - t->act8b;
2977 }
2978
2979 if (t->active + t->recover < t->cycle) {
2980 t->active += (t->cycle - (t->active + t->recover)) / 2;
2981 t->recover = t->cycle - t->active;
2982 }
2983
2984 /* In a few cases quantisation may produce enough errors to
2985 leave t->cycle too low for the sum of active and recovery
2986 if so we must correct this */
2987 if (t->active + t->recover > t->cycle)
2988 t->cycle = t->active + t->recover;
2989
2990 return 0;
2991}
2992
2993/**
2994 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2995 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2996 * @cycle: cycle duration in ns
2997 *
2998 * Return matching xfer mode for @cycle. The returned mode is of
2999 * the transfer type specified by @xfer_shift. If @cycle is too
3000 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3001 * than the fastest known mode, the fasted mode is returned.
3002 *
3003 * LOCKING:
3004 * None.
3005 *
3006 * RETURNS:
3007 * Matching xfer_mode, 0xff if no match found.
3008 */
3009u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3010{
3011 u8 base_mode = 0xff, last_mode = 0xff;
3012 const struct ata_xfer_ent *ent;
3013 const struct ata_timing *t;
3014
3015 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3016 if (ent->shift == xfer_shift)
3017 base_mode = ent->base;
3018
3019 for (t = ata_timing_find_mode(base_mode);
3020 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3021 unsigned short this_cycle;
3022
3023 switch (xfer_shift) {
3024 case ATA_SHIFT_PIO:
3025 case ATA_SHIFT_MWDMA:
3026 this_cycle = t->cycle;
3027 break;
3028 case ATA_SHIFT_UDMA:
3029 this_cycle = t->udma;
3030 break;
3031 default:
3032 return 0xff;
3033 }
3034
3035 if (cycle > this_cycle)
3036 break;
3037
3038 last_mode = t->mode;
3039 }
3040
3041 return last_mode;
3042}
3043
3044/**
3045 * ata_down_xfermask_limit - adjust dev xfer masks downward
3046 * @dev: Device to adjust xfer masks
3047 * @sel: ATA_DNXFER_* selector
3048 *
3049 * Adjust xfer masks of @dev downward. Note that this function
3050 * does not apply the change. Invoking ata_set_mode() afterwards
3051 * will apply the limit.
3052 *
3053 * LOCKING:
3054 * Inherited from caller.
3055 *
3056 * RETURNS:
3057 * 0 on success, negative errno on failure
3058 */
3059int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3060{
3061 char buf[32];
3062 unsigned long orig_mask, xfer_mask;
3063 unsigned long pio_mask, mwdma_mask, udma_mask;
3064 int quiet, highbit;
3065
3066 quiet = !!(sel & ATA_DNXFER_QUIET);
3067 sel &= ~ATA_DNXFER_QUIET;
3068
3069 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3070 dev->mwdma_mask,
3071 dev->udma_mask);
3072 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3073
3074 switch (sel) {
3075 case ATA_DNXFER_PIO:
3076 highbit = fls(pio_mask) - 1;
3077 pio_mask &= ~(1 << highbit);
3078 break;
3079
3080 case ATA_DNXFER_DMA:
3081 if (udma_mask) {
3082 highbit = fls(udma_mask) - 1;
3083 udma_mask &= ~(1 << highbit);
3084 if (!udma_mask)
3085 return -ENOENT;
3086 } else if (mwdma_mask) {
3087 highbit = fls(mwdma_mask) - 1;
3088 mwdma_mask &= ~(1 << highbit);
3089 if (!mwdma_mask)
3090 return -ENOENT;
3091 }
3092 break;
3093
3094 case ATA_DNXFER_40C:
3095 udma_mask &= ATA_UDMA_MASK_40C;
3096 break;
3097
3098 case ATA_DNXFER_FORCE_PIO0:
3099 pio_mask &= 1;
3100 case ATA_DNXFER_FORCE_PIO:
3101 mwdma_mask = 0;
3102 udma_mask = 0;
3103 break;
3104
3105 default:
3106 BUG();
3107 }
3108
3109 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3110
3111 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3112 return -ENOENT;
3113
3114 if (!quiet) {
3115 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3116 snprintf(buf, sizeof(buf), "%s:%s",
3117 ata_mode_string(xfer_mask),
3118 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3119 else
3120 snprintf(buf, sizeof(buf), "%s",
3121 ata_mode_string(xfer_mask));
3122
3123 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3124 }
3125
3126 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3127 &dev->udma_mask);
3128
3129 return 0;
3130}
3131
3132static int ata_dev_set_mode(struct ata_device *dev)
3133{
3134 struct ata_port *ap = dev->link->ap;
3135 struct ata_eh_context *ehc = &dev->link->eh_context;
3136 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3137 const char *dev_err_whine = "";
3138 int ign_dev_err = 0;
3139 unsigned int err_mask = 0;
3140 int rc;
3141
3142 dev->flags &= ~ATA_DFLAG_PIO;
3143 if (dev->xfer_shift == ATA_SHIFT_PIO)
3144 dev->flags |= ATA_DFLAG_PIO;
3145
3146 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3147 dev_err_whine = " (SET_XFERMODE skipped)";
3148 else {
3149 if (nosetxfer)
3150 ata_dev_warn(dev,
3151 "NOSETXFER but PATA detected - can't "
3152 "skip SETXFER, might malfunction\n");
3153 err_mask = ata_dev_set_xfermode(dev);
3154 }
3155
3156 if (err_mask & ~AC_ERR_DEV)
3157 goto fail;
3158
3159 /* revalidate */
3160 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3161 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3162 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3163 if (rc)
3164 return rc;
3165
3166 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3167 /* Old CFA may refuse this command, which is just fine */
3168 if (ata_id_is_cfa(dev->id))
3169 ign_dev_err = 1;
3170 /* Catch several broken garbage emulations plus some pre
3171 ATA devices */
3172 if (ata_id_major_version(dev->id) == 0 &&
3173 dev->pio_mode <= XFER_PIO_2)
3174 ign_dev_err = 1;
3175 /* Some very old devices and some bad newer ones fail
3176 any kind of SET_XFERMODE request but support PIO0-2
3177 timings and no IORDY */
3178 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3179 ign_dev_err = 1;
3180 }
3181 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3182 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3183 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3184 dev->dma_mode == XFER_MW_DMA_0 &&
3185 (dev->id[63] >> 8) & 1)
3186 ign_dev_err = 1;
3187
3188 /* if the device is actually configured correctly, ignore dev err */
3189 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3190 ign_dev_err = 1;
3191
3192 if (err_mask & AC_ERR_DEV) {
3193 if (!ign_dev_err)
3194 goto fail;
3195 else
3196 dev_err_whine = " (device error ignored)";
3197 }
3198
3199 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3200 dev->xfer_shift, (int)dev->xfer_mode);
3201
3202 ata_dev_info(dev, "configured for %s%s\n",
3203 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3204 dev_err_whine);
3205
3206 return 0;
3207
3208 fail:
3209 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3210 return -EIO;
3211}
3212
3213/**
3214 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3215 * @link: link on which timings will be programmed
3216 * @r_failed_dev: out parameter for failed device
3217 *
3218 * Standard implementation of the function used to tune and set
3219 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3220 * ata_dev_set_mode() fails, pointer to the failing device is
3221 * returned in @r_failed_dev.
3222 *
3223 * LOCKING:
3224 * PCI/etc. bus probe sem.
3225 *
3226 * RETURNS:
3227 * 0 on success, negative errno otherwise
3228 */
3229
3230int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3231{
3232 struct ata_port *ap = link->ap;
3233 struct ata_device *dev;
3234 int rc = 0, used_dma = 0, found = 0;
3235
3236 /* step 1: calculate xfer_mask */
3237 ata_for_each_dev(dev, link, ENABLED) {
3238 unsigned long pio_mask, dma_mask;
3239 unsigned int mode_mask;
3240
3241 mode_mask = ATA_DMA_MASK_ATA;
3242 if (dev->class == ATA_DEV_ATAPI)
3243 mode_mask = ATA_DMA_MASK_ATAPI;
3244 else if (ata_id_is_cfa(dev->id))
3245 mode_mask = ATA_DMA_MASK_CFA;
3246
3247 ata_dev_xfermask(dev);
3248 ata_force_xfermask(dev);
3249
3250 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3251 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3252
3253 if (libata_dma_mask & mode_mask)
3254 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3255 else
3256 dma_mask = 0;
3257
3258 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3259 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3260
3261 found = 1;
3262 if (ata_dma_enabled(dev))
3263 used_dma = 1;
3264 }
3265 if (!found)
3266 goto out;
3267
3268 /* step 2: always set host PIO timings */
3269 ata_for_each_dev(dev, link, ENABLED) {
3270 if (dev->pio_mode == 0xff) {
3271 ata_dev_warn(dev, "no PIO support\n");
3272 rc = -EINVAL;
3273 goto out;
3274 }
3275
3276 dev->xfer_mode = dev->pio_mode;
3277 dev->xfer_shift = ATA_SHIFT_PIO;
3278 if (ap->ops->set_piomode)
3279 ap->ops->set_piomode(ap, dev);
3280 }
3281
3282 /* step 3: set host DMA timings */
3283 ata_for_each_dev(dev, link, ENABLED) {
3284 if (!ata_dma_enabled(dev))
3285 continue;
3286
3287 dev->xfer_mode = dev->dma_mode;
3288 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3289 if (ap->ops->set_dmamode)
3290 ap->ops->set_dmamode(ap, dev);
3291 }
3292
3293 /* step 4: update devices' xfer mode */
3294 ata_for_each_dev(dev, link, ENABLED) {
3295 rc = ata_dev_set_mode(dev);
3296 if (rc)
3297 goto out;
3298 }
3299
3300 /* Record simplex status. If we selected DMA then the other
3301 * host channels are not permitted to do so.
3302 */
3303 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3304 ap->host->simplex_claimed = ap;
3305
3306 out:
3307 if (rc)
3308 *r_failed_dev = dev;
3309 return rc;
3310}
3311
3312/**
3313 * ata_wait_ready - wait for link to become ready
3314 * @link: link to be waited on
3315 * @deadline: deadline jiffies for the operation
3316 * @check_ready: callback to check link readiness
3317 *
3318 * Wait for @link to become ready. @check_ready should return
3319 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3320 * link doesn't seem to be occupied, other errno for other error
3321 * conditions.
3322 *
3323 * Transient -ENODEV conditions are allowed for
3324 * ATA_TMOUT_FF_WAIT.
3325 *
3326 * LOCKING:
3327 * EH context.
3328 *
3329 * RETURNS:
3330 * 0 if @linke is ready before @deadline; otherwise, -errno.
3331 */
3332int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3333 int (*check_ready)(struct ata_link *link))
3334{
3335 unsigned long start = jiffies;
3336 unsigned long nodev_deadline;
3337 int warned = 0;
3338
3339 /* choose which 0xff timeout to use, read comment in libata.h */
3340 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3341 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3342 else
3343 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3344
3345 /* Slave readiness can't be tested separately from master. On
3346 * M/S emulation configuration, this function should be called
3347 * only on the master and it will handle both master and slave.
3348 */
3349 WARN_ON(link == link->ap->slave_link);
3350
3351 if (time_after(nodev_deadline, deadline))
3352 nodev_deadline = deadline;
3353
3354 while (1) {
3355 unsigned long now = jiffies;
3356 int ready, tmp;
3357
3358 ready = tmp = check_ready(link);
3359 if (ready > 0)
3360 return 0;
3361
3362 /*
3363 * -ENODEV could be transient. Ignore -ENODEV if link
3364 * is online. Also, some SATA devices take a long
3365 * time to clear 0xff after reset. Wait for
3366 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3367 * offline.
3368 *
3369 * Note that some PATA controllers (pata_ali) explode
3370 * if status register is read more than once when
3371 * there's no device attached.
3372 */
3373 if (ready == -ENODEV) {
3374 if (ata_link_online(link))
3375 ready = 0;
3376 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3377 !ata_link_offline(link) &&
3378 time_before(now, nodev_deadline))
3379 ready = 0;
3380 }
3381
3382 if (ready)
3383 return ready;
3384 if (time_after(now, deadline))
3385 return -EBUSY;
3386
3387 if (!warned && time_after(now, start + 5 * HZ) &&
3388 (deadline - now > 3 * HZ)) {
3389 ata_link_warn(link,
3390 "link is slow to respond, please be patient "
3391 "(ready=%d)\n", tmp);
3392 warned = 1;
3393 }
3394
3395 ata_msleep(link->ap, 50);
3396 }
3397}
3398
3399/**
3400 * ata_wait_after_reset - wait for link to become ready after reset
3401 * @link: link to be waited on
3402 * @deadline: deadline jiffies for the operation
3403 * @check_ready: callback to check link readiness
3404 *
3405 * Wait for @link to become ready after reset.
3406 *
3407 * LOCKING:
3408 * EH context.
3409 *
3410 * RETURNS:
3411 * 0 if @linke is ready before @deadline; otherwise, -errno.
3412 */
3413int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3414 int (*check_ready)(struct ata_link *link))
3415{
3416 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3417
3418 return ata_wait_ready(link, deadline, check_ready);
3419}
3420
3421/**
3422 * sata_link_debounce - debounce SATA phy status
3423 * @link: ATA link to debounce SATA phy status for
3424 * @params: timing parameters { interval, duratinon, timeout } in msec
3425 * @deadline: deadline jiffies for the operation
3426 *
3427 * Make sure SStatus of @link reaches stable state, determined by
3428 * holding the same value where DET is not 1 for @duration polled
3429 * every @interval, before @timeout. Timeout constraints the
3430 * beginning of the stable state. Because DET gets stuck at 1 on
3431 * some controllers after hot unplugging, this functions waits
3432 * until timeout then returns 0 if DET is stable at 1.
3433 *
3434 * @timeout is further limited by @deadline. The sooner of the
3435 * two is used.
3436 *
3437 * LOCKING:
3438 * Kernel thread context (may sleep)
3439 *
3440 * RETURNS:
3441 * 0 on success, -errno on failure.
3442 */
3443int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3444 unsigned long deadline)
3445{
3446 unsigned long interval = params[0];
3447 unsigned long duration = params[1];
3448 unsigned long last_jiffies, t;
3449 u32 last, cur;
3450 int rc;
3451
3452 t = ata_deadline(jiffies, params[2]);
3453 if (time_before(t, deadline))
3454 deadline = t;
3455
3456 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3457 return rc;
3458 cur &= 0xf;
3459
3460 last = cur;
3461 last_jiffies = jiffies;
3462
3463 while (1) {
3464 ata_msleep(link->ap, interval);
3465 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3466 return rc;
3467 cur &= 0xf;
3468
3469 /* DET stable? */
3470 if (cur == last) {
3471 if (cur == 1 && time_before(jiffies, deadline))
3472 continue;
3473 if (time_after(jiffies,
3474 ata_deadline(last_jiffies, duration)))
3475 return 0;
3476 continue;
3477 }
3478
3479 /* unstable, start over */
3480 last = cur;
3481 last_jiffies = jiffies;
3482
3483 /* Check deadline. If debouncing failed, return
3484 * -EPIPE to tell upper layer to lower link speed.
3485 */
3486 if (time_after(jiffies, deadline))
3487 return -EPIPE;
3488 }
3489}
3490
3491/**
3492 * sata_link_resume - resume SATA link
3493 * @link: ATA link to resume SATA
3494 * @params: timing parameters { interval, duratinon, timeout } in msec
3495 * @deadline: deadline jiffies for the operation
3496 *
3497 * Resume SATA phy @link and debounce it.
3498 *
3499 * LOCKING:
3500 * Kernel thread context (may sleep)
3501 *
3502 * RETURNS:
3503 * 0 on success, -errno on failure.
3504 */
3505int sata_link_resume(struct ata_link *link, const unsigned long *params,
3506 unsigned long deadline)
3507{
3508 int tries = ATA_LINK_RESUME_TRIES;
3509 u32 scontrol, serror;
3510 int rc;
3511
3512 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3513 return rc;
3514
3515 /*
3516 * Writes to SControl sometimes get ignored under certain
3517 * controllers (ata_piix SIDPR). Make sure DET actually is
3518 * cleared.
3519 */
3520 do {
3521 scontrol = (scontrol & 0x0f0) | 0x300;
3522 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3523 return rc;
3524 /*
3525 * Some PHYs react badly if SStatus is pounded
3526 * immediately after resuming. Delay 200ms before
3527 * debouncing.
3528 */
3529 ata_msleep(link->ap, 200);
3530
3531 /* is SControl restored correctly? */
3532 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3533 return rc;
3534 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3535
3536 if ((scontrol & 0xf0f) != 0x300) {
3537 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3538 scontrol);
3539 return 0;
3540 }
3541
3542 if (tries < ATA_LINK_RESUME_TRIES)
3543 ata_link_warn(link, "link resume succeeded after %d retries\n",
3544 ATA_LINK_RESUME_TRIES - tries);
3545
3546 if ((rc = sata_link_debounce(link, params, deadline)))
3547 return rc;
3548
3549 /* clear SError, some PHYs require this even for SRST to work */
3550 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3551 rc = sata_scr_write(link, SCR_ERROR, serror);
3552
3553 return rc != -EINVAL ? rc : 0;
3554}
3555
3556/**
3557 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3558 * @link: ATA link to manipulate SControl for
3559 * @policy: LPM policy to configure
3560 * @spm_wakeup: initiate LPM transition to active state
3561 *
3562 * Manipulate the IPM field of the SControl register of @link
3563 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3564 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3565 * the link. This function also clears PHYRDY_CHG before
3566 * returning.
3567 *
3568 * LOCKING:
3569 * EH context.
3570 *
3571 * RETURNS:
3572 * 0 on succes, -errno otherwise.
3573 */
3574int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3575 bool spm_wakeup)
3576{
3577 struct ata_eh_context *ehc = &link->eh_context;
3578 bool woken_up = false;
3579 u32 scontrol;
3580 int rc;
3581
3582 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3583 if (rc)
3584 return rc;
3585
3586 switch (policy) {
3587 case ATA_LPM_MAX_POWER:
3588 /* disable all LPM transitions */
3589 scontrol |= (0x3 << 8);
3590 /* initiate transition to active state */
3591 if (spm_wakeup) {
3592 scontrol |= (0x4 << 12);
3593 woken_up = true;
3594 }
3595 break;
3596 case ATA_LPM_MED_POWER:
3597 /* allow LPM to PARTIAL */
3598 scontrol &= ~(0x1 << 8);
3599 scontrol |= (0x2 << 8);
3600 break;
3601 case ATA_LPM_MIN_POWER:
3602 if (ata_link_nr_enabled(link) > 0)
3603 /* no restrictions on LPM transitions */
3604 scontrol &= ~(0x3 << 8);
3605 else {
3606 /* empty port, power off */
3607 scontrol &= ~0xf;
3608 scontrol |= (0x1 << 2);
3609 }
3610 break;
3611 default:
3612 WARN_ON(1);
3613 }
3614
3615 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3616 if (rc)
3617 return rc;
3618
3619 /* give the link time to transit out of LPM state */
3620 if (woken_up)
3621 msleep(10);
3622
3623 /* clear PHYRDY_CHG from SError */
3624 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3625 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3626}
3627
3628/**
3629 * ata_std_prereset - prepare for reset
3630 * @link: ATA link to be reset
3631 * @deadline: deadline jiffies for the operation
3632 *
3633 * @link is about to be reset. Initialize it. Failure from
3634 * prereset makes libata abort whole reset sequence and give up
3635 * that port, so prereset should be best-effort. It does its
3636 * best to prepare for reset sequence but if things go wrong, it
3637 * should just whine, not fail.
3638 *
3639 * LOCKING:
3640 * Kernel thread context (may sleep)
3641 *
3642 * RETURNS:
3643 * 0 on success, -errno otherwise.
3644 */
3645int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3646{
3647 struct ata_port *ap = link->ap;
3648 struct ata_eh_context *ehc = &link->eh_context;
3649 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3650 int rc;
3651
3652 /* if we're about to do hardreset, nothing more to do */
3653 if (ehc->i.action & ATA_EH_HARDRESET)
3654 return 0;
3655
3656 /* if SATA, resume link */
3657 if (ap->flags & ATA_FLAG_SATA) {
3658 rc = sata_link_resume(link, timing, deadline);
3659 /* whine about phy resume failure but proceed */
3660 if (rc && rc != -EOPNOTSUPP)
3661 ata_link_warn(link,
3662 "failed to resume link for reset (errno=%d)\n",
3663 rc);
3664 }
3665
3666 /* no point in trying softreset on offline link */
3667 if (ata_phys_link_offline(link))
3668 ehc->i.action &= ~ATA_EH_SOFTRESET;
3669
3670 return 0;
3671}
3672
3673/**
3674 * sata_link_hardreset - reset link via SATA phy reset
3675 * @link: link to reset
3676 * @timing: timing parameters { interval, duratinon, timeout } in msec
3677 * @deadline: deadline jiffies for the operation
3678 * @online: optional out parameter indicating link onlineness
3679 * @check_ready: optional callback to check link readiness
3680 *
3681 * SATA phy-reset @link using DET bits of SControl register.
3682 * After hardreset, link readiness is waited upon using
3683 * ata_wait_ready() if @check_ready is specified. LLDs are
3684 * allowed to not specify @check_ready and wait itself after this
3685 * function returns. Device classification is LLD's
3686 * responsibility.
3687 *
3688 * *@online is set to one iff reset succeeded and @link is online
3689 * after reset.
3690 *
3691 * LOCKING:
3692 * Kernel thread context (may sleep)
3693 *
3694 * RETURNS:
3695 * 0 on success, -errno otherwise.
3696 */
3697int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3698 unsigned long deadline,
3699 bool *online, int (*check_ready)(struct ata_link *))
3700{
3701 u32 scontrol;
3702 int rc;
3703
3704 DPRINTK("ENTER\n");
3705
3706 if (online)
3707 *online = false;
3708
3709 if (sata_set_spd_needed(link)) {
3710 /* SATA spec says nothing about how to reconfigure
3711 * spd. To be on the safe side, turn off phy during
3712 * reconfiguration. This works for at least ICH7 AHCI
3713 * and Sil3124.
3714 */
3715 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3716 goto out;
3717
3718 scontrol = (scontrol & 0x0f0) | 0x304;
3719
3720 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3721 goto out;
3722
3723 sata_set_spd(link);
3724 }
3725
3726 /* issue phy wake/reset */
3727 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3728 goto out;
3729
3730 scontrol = (scontrol & 0x0f0) | 0x301;
3731
3732 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3733 goto out;
3734
3735 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3736 * 10.4.2 says at least 1 ms.
3737 */
3738 ata_msleep(link->ap, 1);
3739
3740 /* bring link back */
3741 rc = sata_link_resume(link, timing, deadline);
3742 if (rc)
3743 goto out;
3744 /* if link is offline nothing more to do */
3745 if (ata_phys_link_offline(link))
3746 goto out;
3747
3748 /* Link is online. From this point, -ENODEV too is an error. */
3749 if (online)
3750 *online = true;
3751
3752 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3753 /* If PMP is supported, we have to do follow-up SRST.
3754 * Some PMPs don't send D2H Reg FIS after hardreset if
3755 * the first port is empty. Wait only for
3756 * ATA_TMOUT_PMP_SRST_WAIT.
3757 */
3758 if (check_ready) {
3759 unsigned long pmp_deadline;
3760
3761 pmp_deadline = ata_deadline(jiffies,
3762 ATA_TMOUT_PMP_SRST_WAIT);
3763 if (time_after(pmp_deadline, deadline))
3764 pmp_deadline = deadline;
3765 ata_wait_ready(link, pmp_deadline, check_ready);
3766 }
3767 rc = -EAGAIN;
3768 goto out;
3769 }
3770
3771 rc = 0;
3772 if (check_ready)
3773 rc = ata_wait_ready(link, deadline, check_ready);
3774 out:
3775 if (rc && rc != -EAGAIN) {
3776 /* online is set iff link is online && reset succeeded */
3777 if (online)
3778 *online = false;
3779 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3780 }
3781 DPRINTK("EXIT, rc=%d\n", rc);
3782 return rc;
3783}
3784
3785/**
3786 * sata_std_hardreset - COMRESET w/o waiting or classification
3787 * @link: link to reset
3788 * @class: resulting class of attached device
3789 * @deadline: deadline jiffies for the operation
3790 *
3791 * Standard SATA COMRESET w/o waiting or classification.
3792 *
3793 * LOCKING:
3794 * Kernel thread context (may sleep)
3795 *
3796 * RETURNS:
3797 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3798 */
3799int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3800 unsigned long deadline)
3801{
3802 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3803 bool online;
3804 int rc;
3805
3806 /* do hardreset */
3807 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3808 return online ? -EAGAIN : rc;
3809}
3810
3811/**
3812 * ata_std_postreset - standard postreset callback
3813 * @link: the target ata_link
3814 * @classes: classes of attached devices
3815 *
3816 * This function is invoked after a successful reset. Note that
3817 * the device might have been reset more than once using
3818 * different reset methods before postreset is invoked.
3819 *
3820 * LOCKING:
3821 * Kernel thread context (may sleep)
3822 */
3823void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3824{
3825 u32 serror;
3826
3827 DPRINTK("ENTER\n");
3828
3829 /* reset complete, clear SError */
3830 if (!sata_scr_read(link, SCR_ERROR, &serror))
3831 sata_scr_write(link, SCR_ERROR, serror);
3832
3833 /* print link status */
3834 sata_print_link_status(link);
3835
3836 DPRINTK("EXIT\n");
3837}
3838
3839/**
3840 * ata_dev_same_device - Determine whether new ID matches configured device
3841 * @dev: device to compare against
3842 * @new_class: class of the new device
3843 * @new_id: IDENTIFY page of the new device
3844 *
3845 * Compare @new_class and @new_id against @dev and determine
3846 * whether @dev is the device indicated by @new_class and
3847 * @new_id.
3848 *
3849 * LOCKING:
3850 * None.
3851 *
3852 * RETURNS:
3853 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3854 */
3855static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3856 const u16 *new_id)
3857{
3858 const u16 *old_id = dev->id;
3859 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3860 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3861
3862 if (dev->class != new_class) {
3863 ata_dev_info(dev, "class mismatch %d != %d\n",
3864 dev->class, new_class);
3865 return 0;
3866 }
3867
3868 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3869 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3870 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3871 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3872
3873 if (strcmp(model[0], model[1])) {
3874 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3875 model[0], model[1]);
3876 return 0;
3877 }
3878
3879 if (strcmp(serial[0], serial[1])) {
3880 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3881 serial[0], serial[1]);
3882 return 0;
3883 }
3884
3885 return 1;
3886}
3887
3888/**
3889 * ata_dev_reread_id - Re-read IDENTIFY data
3890 * @dev: target ATA device
3891 * @readid_flags: read ID flags
3892 *
3893 * Re-read IDENTIFY page and make sure @dev is still attached to
3894 * the port.
3895 *
3896 * LOCKING:
3897 * Kernel thread context (may sleep)
3898 *
3899 * RETURNS:
3900 * 0 on success, negative errno otherwise
3901 */
3902int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3903{
3904 unsigned int class = dev->class;
3905 u16 *id = (void *)dev->link->ap->sector_buf;
3906 int rc;
3907
3908 /* read ID data */
3909 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3910 if (rc)
3911 return rc;
3912
3913 /* is the device still there? */
3914 if (!ata_dev_same_device(dev, class, id))
3915 return -ENODEV;
3916
3917 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3918 return 0;
3919}
3920
3921/**
3922 * ata_dev_revalidate - Revalidate ATA device
3923 * @dev: device to revalidate
3924 * @new_class: new class code
3925 * @readid_flags: read ID flags
3926 *
3927 * Re-read IDENTIFY page, make sure @dev is still attached to the
3928 * port and reconfigure it according to the new IDENTIFY page.
3929 *
3930 * LOCKING:
3931 * Kernel thread context (may sleep)
3932 *
3933 * RETURNS:
3934 * 0 on success, negative errno otherwise
3935 */
3936int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3937 unsigned int readid_flags)
3938{
3939 u64 n_sectors = dev->n_sectors;
3940 u64 n_native_sectors = dev->n_native_sectors;
3941 int rc;
3942
3943 if (!ata_dev_enabled(dev))
3944 return -ENODEV;
3945
3946 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3947 if (ata_class_enabled(new_class) &&
3948 new_class != ATA_DEV_ATA &&
3949 new_class != ATA_DEV_ATAPI &&
3950 new_class != ATA_DEV_SEMB) {
3951 ata_dev_info(dev, "class mismatch %u != %u\n",
3952 dev->class, new_class);
3953 rc = -ENODEV;
3954 goto fail;
3955 }
3956
3957 /* re-read ID */
3958 rc = ata_dev_reread_id(dev, readid_flags);
3959 if (rc)
3960 goto fail;
3961
3962 /* configure device according to the new ID */
3963 rc = ata_dev_configure(dev);
3964 if (rc)
3965 goto fail;
3966
3967 /* verify n_sectors hasn't changed */
3968 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3969 dev->n_sectors == n_sectors)
3970 return 0;
3971
3972 /* n_sectors has changed */
3973 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3974 (unsigned long long)n_sectors,
3975 (unsigned long long)dev->n_sectors);
3976
3977 /*
3978 * Something could have caused HPA to be unlocked
3979 * involuntarily. If n_native_sectors hasn't changed and the
3980 * new size matches it, keep the device.
3981 */
3982 if (dev->n_native_sectors == n_native_sectors &&
3983 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3984 ata_dev_warn(dev,
3985 "new n_sectors matches native, probably "
3986 "late HPA unlock, n_sectors updated\n");
3987 /* use the larger n_sectors */
3988 return 0;
3989 }
3990
3991 /*
3992 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
3993 * unlocking HPA in those cases.
3994 *
3995 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3996 */
3997 if (dev->n_native_sectors == n_native_sectors &&
3998 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3999 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4000 ata_dev_warn(dev,
4001 "old n_sectors matches native, probably "
4002 "late HPA lock, will try to unlock HPA\n");
4003 /* try unlocking HPA */
4004 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4005 rc = -EIO;
4006 } else
4007 rc = -ENODEV;
4008
4009 /* restore original n_[native_]sectors and fail */
4010 dev->n_native_sectors = n_native_sectors;
4011 dev->n_sectors = n_sectors;
4012 fail:
4013 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4014 return rc;
4015}
4016
4017struct ata_blacklist_entry {
4018 const char *model_num;
4019 const char *model_rev;
4020 unsigned long horkage;
4021};
4022
4023static const struct ata_blacklist_entry ata_device_blacklist [] = {
4024 /* Devices with DMA related problems under Linux */
4025 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4026 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4027 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4028 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4029 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4030 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4031 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4032 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4033 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4034 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4035 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4036 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4037 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4038 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4039 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4040 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4041 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4042 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4043 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4044 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4045 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4046 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4047 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4048 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4049 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4050 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4051 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4052 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4053 /* Odd clown on sil3726/4726 PMPs */
4054 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4055
4056 /* Weird ATAPI devices */
4057 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4058 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4059
4060 /* Devices we expect to fail diagnostics */
4061
4062 /* Devices where NCQ should be avoided */
4063 /* NCQ is slow */
4064 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4065 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4066 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4067 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4068 /* NCQ is broken */
4069 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4070 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4071 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4072 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4073 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4074
4075 /* Seagate NCQ + FLUSH CACHE firmware bug */
4076 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4077 ATA_HORKAGE_FIRMWARE_WARN },
4078
4079 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4080 ATA_HORKAGE_FIRMWARE_WARN },
4081
4082 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4083 ATA_HORKAGE_FIRMWARE_WARN },
4084
4085 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4086 ATA_HORKAGE_FIRMWARE_WARN },
4087
4088 /* Blacklist entries taken from Silicon Image 3124/3132
4089 Windows driver .inf file - also several Linux problem reports */
4090 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4091 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4092 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4093
4094 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4095 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4096
4097 /* devices which puke on READ_NATIVE_MAX */
4098 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4099 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4100 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4101 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4102
4103 /* this one allows HPA unlocking but fails IOs on the area */
4104 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4105
4106 /* Devices which report 1 sector over size HPA */
4107 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4108 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4109 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4110
4111 /* Devices which get the IVB wrong */
4112 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4113 /* Maybe we should just blacklist TSSTcorp... */
4114 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4115
4116 /* Devices that do not need bridging limits applied */
4117 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4118
4119 /* Devices which aren't very happy with higher link speeds */
4120 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4121
4122 /*
4123 * Devices which choke on SETXFER. Applies only if both the
4124 * device and controller are SATA.
4125 */
4126 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4127 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4128 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4129
4130 /* End Marker */
4131 { }
4132};
4133
4134/**
4135 * glob_match - match a text string against a glob-style pattern
4136 * @text: the string to be examined
4137 * @pattern: the glob-style pattern to be matched against
4138 *
4139 * Either/both of text and pattern can be empty strings.
4140 *
4141 * Match text against a glob-style pattern, with wildcards and simple sets:
4142 *
4143 * ? matches any single character.
4144 * * matches any run of characters.
4145 * [xyz] matches a single character from the set: x, y, or z.
4146 * [a-d] matches a single character from the range: a, b, c, or d.
4147 * [a-d0-9] matches a single character from either range.
4148 *
4149 * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4150 * Behaviour with malformed patterns is undefined, though generally reasonable.
4151 *
4152 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
4153 *
4154 * This function uses one level of recursion per '*' in pattern.
4155 * Since it calls _nothing_ else, and has _no_ explicit local variables,
4156 * this will not cause stack problems for any reasonable use here.
4157 *
4158 * RETURNS:
4159 * 0 on match, 1 otherwise.
4160 */
4161static int glob_match (const char *text, const char *pattern)
4162{
4163 do {
4164 /* Match single character or a '?' wildcard */
4165 if (*text == *pattern || *pattern == '?') {
4166 if (!*pattern++)
4167 return 0; /* End of both strings: match */
4168 } else {
4169 /* Match single char against a '[' bracketed ']' pattern set */
4170 if (!*text || *pattern != '[')
4171 break; /* Not a pattern set */
4172 while (*++pattern && *pattern != ']' && *text != *pattern) {
4173 if (*pattern == '-' && *(pattern - 1) != '[')
4174 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4175 ++pattern;
4176 break;
4177 }
4178 }
4179 if (!*pattern || *pattern == ']')
4180 return 1; /* No match */
4181 while (*pattern && *pattern++ != ']');
4182 }
4183 } while (*++text && *pattern);
4184
4185 /* Match any run of chars against a '*' wildcard */
4186 if (*pattern == '*') {
4187 if (!*++pattern)
4188 return 0; /* Match: avoid recursion at end of pattern */
4189 /* Loop to handle additional pattern chars after the wildcard */
4190 while (*text) {
4191 if (glob_match(text, pattern) == 0)
4192 return 0; /* Remainder matched */
4193 ++text; /* Absorb (match) this char and try again */
4194 }
4195 }
4196 if (!*text && !*pattern)
4197 return 0; /* End of both strings: match */
4198 return 1; /* No match */
4199}
4200
4201static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4202{
4203 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4204 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4205 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4206
4207 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4208 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4209
4210 while (ad->model_num) {
4211 if (!glob_match(model_num, ad->model_num)) {
4212 if (ad->model_rev == NULL)
4213 return ad->horkage;
4214 if (!glob_match(model_rev, ad->model_rev))
4215 return ad->horkage;
4216 }
4217 ad++;
4218 }
4219 return 0;
4220}
4221
4222static int ata_dma_blacklisted(const struct ata_device *dev)
4223{
4224 /* We don't support polling DMA.
4225 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4226 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4227 */
4228 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4229 (dev->flags & ATA_DFLAG_CDB_INTR))
4230 return 1;
4231 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4232}
4233
4234/**
4235 * ata_is_40wire - check drive side detection
4236 * @dev: device
4237 *
4238 * Perform drive side detection decoding, allowing for device vendors
4239 * who can't follow the documentation.
4240 */
4241
4242static int ata_is_40wire(struct ata_device *dev)
4243{
4244 if (dev->horkage & ATA_HORKAGE_IVB)
4245 return ata_drive_40wire_relaxed(dev->id);
4246 return ata_drive_40wire(dev->id);
4247}
4248
4249/**
4250 * cable_is_40wire - 40/80/SATA decider
4251 * @ap: port to consider
4252 *
4253 * This function encapsulates the policy for speed management
4254 * in one place. At the moment we don't cache the result but
4255 * there is a good case for setting ap->cbl to the result when
4256 * we are called with unknown cables (and figuring out if it
4257 * impacts hotplug at all).
4258 *
4259 * Return 1 if the cable appears to be 40 wire.
4260 */
4261
4262static int cable_is_40wire(struct ata_port *ap)
4263{
4264 struct ata_link *link;
4265 struct ata_device *dev;
4266
4267 /* If the controller thinks we are 40 wire, we are. */
4268 if (ap->cbl == ATA_CBL_PATA40)
4269 return 1;
4270
4271 /* If the controller thinks we are 80 wire, we are. */
4272 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4273 return 0;
4274
4275 /* If the system is known to be 40 wire short cable (eg
4276 * laptop), then we allow 80 wire modes even if the drive
4277 * isn't sure.
4278 */
4279 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4280 return 0;
4281
4282 /* If the controller doesn't know, we scan.
4283 *
4284 * Note: We look for all 40 wire detects at this point. Any
4285 * 80 wire detect is taken to be 80 wire cable because
4286 * - in many setups only the one drive (slave if present) will
4287 * give a valid detect
4288 * - if you have a non detect capable drive you don't want it
4289 * to colour the choice
4290 */
4291 ata_for_each_link(link, ap, EDGE) {
4292 ata_for_each_dev(dev, link, ENABLED) {
4293 if (!ata_is_40wire(dev))
4294 return 0;
4295 }
4296 }
4297 return 1;
4298}
4299
4300/**
4301 * ata_dev_xfermask - Compute supported xfermask of the given device
4302 * @dev: Device to compute xfermask for
4303 *
4304 * Compute supported xfermask of @dev and store it in
4305 * dev->*_mask. This function is responsible for applying all
4306 * known limits including host controller limits, device
4307 * blacklist, etc...
4308 *
4309 * LOCKING:
4310 * None.
4311 */
4312static void ata_dev_xfermask(struct ata_device *dev)
4313{
4314 struct ata_link *link = dev->link;
4315 struct ata_port *ap = link->ap;
4316 struct ata_host *host = ap->host;
4317 unsigned long xfer_mask;
4318
4319 /* controller modes available */
4320 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4321 ap->mwdma_mask, ap->udma_mask);
4322
4323 /* drive modes available */
4324 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4325 dev->mwdma_mask, dev->udma_mask);
4326 xfer_mask &= ata_id_xfermask(dev->id);
4327
4328 /*
4329 * CFA Advanced TrueIDE timings are not allowed on a shared
4330 * cable
4331 */
4332 if (ata_dev_pair(dev)) {
4333 /* No PIO5 or PIO6 */
4334 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4335 /* No MWDMA3 or MWDMA 4 */
4336 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4337 }
4338
4339 if (ata_dma_blacklisted(dev)) {
4340 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4341 ata_dev_warn(dev,
4342 "device is on DMA blacklist, disabling DMA\n");
4343 }
4344
4345 if ((host->flags & ATA_HOST_SIMPLEX) &&
4346 host->simplex_claimed && host->simplex_claimed != ap) {
4347 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4348 ata_dev_warn(dev,
4349 "simplex DMA is claimed by other device, disabling DMA\n");
4350 }
4351
4352 if (ap->flags & ATA_FLAG_NO_IORDY)
4353 xfer_mask &= ata_pio_mask_no_iordy(dev);
4354
4355 if (ap->ops->mode_filter)
4356 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4357
4358 /* Apply cable rule here. Don't apply it early because when
4359 * we handle hot plug the cable type can itself change.
4360 * Check this last so that we know if the transfer rate was
4361 * solely limited by the cable.
4362 * Unknown or 80 wire cables reported host side are checked
4363 * drive side as well. Cases where we know a 40wire cable
4364 * is used safely for 80 are not checked here.
4365 */
4366 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4367 /* UDMA/44 or higher would be available */
4368 if (cable_is_40wire(ap)) {
4369 ata_dev_warn(dev,
4370 "limited to UDMA/33 due to 40-wire cable\n");
4371 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4372 }
4373
4374 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4375 &dev->mwdma_mask, &dev->udma_mask);
4376}
4377
4378/**
4379 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4380 * @dev: Device to which command will be sent
4381 *
4382 * Issue SET FEATURES - XFER MODE command to device @dev
4383 * on port @ap.
4384 *
4385 * LOCKING:
4386 * PCI/etc. bus probe sem.
4387 *
4388 * RETURNS:
4389 * 0 on success, AC_ERR_* mask otherwise.
4390 */
4391
4392static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4393{
4394 struct ata_taskfile tf;
4395 unsigned int err_mask;
4396
4397 /* set up set-features taskfile */
4398 DPRINTK("set features - xfer mode\n");
4399
4400 /* Some controllers and ATAPI devices show flaky interrupt
4401 * behavior after setting xfer mode. Use polling instead.
4402 */
4403 ata_tf_init(dev, &tf);
4404 tf.command = ATA_CMD_SET_FEATURES;
4405 tf.feature = SETFEATURES_XFER;
4406 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4407 tf.protocol = ATA_PROT_NODATA;
4408 /* If we are using IORDY we must send the mode setting command */
4409 if (ata_pio_need_iordy(dev))
4410 tf.nsect = dev->xfer_mode;
4411 /* If the device has IORDY and the controller does not - turn it off */
4412 else if (ata_id_has_iordy(dev->id))
4413 tf.nsect = 0x01;
4414 else /* In the ancient relic department - skip all of this */
4415 return 0;
4416
4417 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4418
4419 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4420 return err_mask;
4421}
4422
4423/**
4424 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4425 * @dev: Device to which command will be sent
4426 * @enable: Whether to enable or disable the feature
4427 * @feature: The sector count represents the feature to set
4428 *
4429 * Issue SET FEATURES - SATA FEATURES command to device @dev
4430 * on port @ap with sector count
4431 *
4432 * LOCKING:
4433 * PCI/etc. bus probe sem.
4434 *
4435 * RETURNS:
4436 * 0 on success, AC_ERR_* mask otherwise.
4437 */
4438unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4439{
4440 struct ata_taskfile tf;
4441 unsigned int err_mask;
4442
4443 /* set up set-features taskfile */
4444 DPRINTK("set features - SATA features\n");
4445
4446 ata_tf_init(dev, &tf);
4447 tf.command = ATA_CMD_SET_FEATURES;
4448 tf.feature = enable;
4449 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4450 tf.protocol = ATA_PROT_NODATA;
4451 tf.nsect = feature;
4452
4453 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4454
4455 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4456 return err_mask;
4457}
4458
4459/**
4460 * ata_dev_init_params - Issue INIT DEV PARAMS command
4461 * @dev: Device to which command will be sent
4462 * @heads: Number of heads (taskfile parameter)
4463 * @sectors: Number of sectors (taskfile parameter)
4464 *
4465 * LOCKING:
4466 * Kernel thread context (may sleep)
4467 *
4468 * RETURNS:
4469 * 0 on success, AC_ERR_* mask otherwise.
4470 */
4471static unsigned int ata_dev_init_params(struct ata_device *dev,
4472 u16 heads, u16 sectors)
4473{
4474 struct ata_taskfile tf;
4475 unsigned int err_mask;
4476
4477 /* Number of sectors per track 1-255. Number of heads 1-16 */
4478 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4479 return AC_ERR_INVALID;
4480
4481 /* set up init dev params taskfile */
4482 DPRINTK("init dev params \n");
4483
4484 ata_tf_init(dev, &tf);
4485 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4486 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4487 tf.protocol = ATA_PROT_NODATA;
4488 tf.nsect = sectors;
4489 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4490
4491 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4492 /* A clean abort indicates an original or just out of spec drive
4493 and we should continue as we issue the setup based on the
4494 drive reported working geometry */
4495 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4496 err_mask = 0;
4497
4498 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4499 return err_mask;
4500}
4501
4502/**
4503 * ata_sg_clean - Unmap DMA memory associated with command
4504 * @qc: Command containing DMA memory to be released
4505 *
4506 * Unmap all mapped DMA memory associated with this command.
4507 *
4508 * LOCKING:
4509 * spin_lock_irqsave(host lock)
4510 */
4511void ata_sg_clean(struct ata_queued_cmd *qc)
4512{
4513 struct ata_port *ap = qc->ap;
4514 struct scatterlist *sg = qc->sg;
4515 int dir = qc->dma_dir;
4516
4517 WARN_ON_ONCE(sg == NULL);
4518
4519 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4520
4521 if (qc->n_elem)
4522 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4523
4524 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4525 qc->sg = NULL;
4526}
4527
4528/**
4529 * atapi_check_dma - Check whether ATAPI DMA can be supported
4530 * @qc: Metadata associated with taskfile to check
4531 *
4532 * Allow low-level driver to filter ATA PACKET commands, returning
4533 * a status indicating whether or not it is OK to use DMA for the
4534 * supplied PACKET command.
4535 *
4536 * LOCKING:
4537 * spin_lock_irqsave(host lock)
4538 *
4539 * RETURNS: 0 when ATAPI DMA can be used
4540 * nonzero otherwise
4541 */
4542int atapi_check_dma(struct ata_queued_cmd *qc)
4543{
4544 struct ata_port *ap = qc->ap;
4545
4546 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4547 * few ATAPI devices choke on such DMA requests.
4548 */
4549 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4550 unlikely(qc->nbytes & 15))
4551 return 1;
4552
4553 if (ap->ops->check_atapi_dma)
4554 return ap->ops->check_atapi_dma(qc);
4555
4556 return 0;
4557}
4558
4559/**
4560 * ata_std_qc_defer - Check whether a qc needs to be deferred
4561 * @qc: ATA command in question
4562 *
4563 * Non-NCQ commands cannot run with any other command, NCQ or
4564 * not. As upper layer only knows the queue depth, we are
4565 * responsible for maintaining exclusion. This function checks
4566 * whether a new command @qc can be issued.
4567 *
4568 * LOCKING:
4569 * spin_lock_irqsave(host lock)
4570 *
4571 * RETURNS:
4572 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4573 */
4574int ata_std_qc_defer(struct ata_queued_cmd *qc)
4575{
4576 struct ata_link *link = qc->dev->link;
4577
4578 if (qc->tf.protocol == ATA_PROT_NCQ) {
4579 if (!ata_tag_valid(link->active_tag))
4580 return 0;
4581 } else {
4582 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4583 return 0;
4584 }
4585
4586 return ATA_DEFER_LINK;
4587}
4588
4589void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4590
4591/**
4592 * ata_sg_init - Associate command with scatter-gather table.
4593 * @qc: Command to be associated
4594 * @sg: Scatter-gather table.
4595 * @n_elem: Number of elements in s/g table.
4596 *
4597 * Initialize the data-related elements of queued_cmd @qc
4598 * to point to a scatter-gather table @sg, containing @n_elem
4599 * elements.
4600 *
4601 * LOCKING:
4602 * spin_lock_irqsave(host lock)
4603 */
4604void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4605 unsigned int n_elem)
4606{
4607 qc->sg = sg;
4608 qc->n_elem = n_elem;
4609 qc->cursg = qc->sg;
4610}
4611
4612/**
4613 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4614 * @qc: Command with scatter-gather table to be mapped.
4615 *
4616 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4617 *
4618 * LOCKING:
4619 * spin_lock_irqsave(host lock)
4620 *
4621 * RETURNS:
4622 * Zero on success, negative on error.
4623 *
4624 */
4625static int ata_sg_setup(struct ata_queued_cmd *qc)
4626{
4627 struct ata_port *ap = qc->ap;
4628 unsigned int n_elem;
4629
4630 VPRINTK("ENTER, ata%u\n", ap->print_id);
4631
4632 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4633 if (n_elem < 1)
4634 return -1;
4635
4636 DPRINTK("%d sg elements mapped\n", n_elem);
4637 qc->orig_n_elem = qc->n_elem;
4638 qc->n_elem = n_elem;
4639 qc->flags |= ATA_QCFLAG_DMAMAP;
4640
4641 return 0;
4642}
4643
4644/**
4645 * swap_buf_le16 - swap halves of 16-bit words in place
4646 * @buf: Buffer to swap
4647 * @buf_words: Number of 16-bit words in buffer.
4648 *
4649 * Swap halves of 16-bit words if needed to convert from
4650 * little-endian byte order to native cpu byte order, or
4651 * vice-versa.
4652 *
4653 * LOCKING:
4654 * Inherited from caller.
4655 */
4656void swap_buf_le16(u16 *buf, unsigned int buf_words)
4657{
4658#ifdef __BIG_ENDIAN
4659 unsigned int i;
4660
4661 for (i = 0; i < buf_words; i++)
4662 buf[i] = le16_to_cpu(buf[i]);
4663#endif /* __BIG_ENDIAN */
4664}
4665
4666/**
4667 * ata_qc_new - Request an available ATA command, for queueing
4668 * @ap: target port
4669 *
4670 * LOCKING:
4671 * None.
4672 */
4673
4674static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4675{
4676 struct ata_queued_cmd *qc = NULL;
4677 unsigned int i;
4678
4679 /* no command while frozen */
4680 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4681 return NULL;
4682
4683 /* the last tag is reserved for internal command. */
4684 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4685 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4686 qc = __ata_qc_from_tag(ap, i);
4687 break;
4688 }
4689
4690 if (qc)
4691 qc->tag = i;
4692
4693 return qc;
4694}
4695
4696/**
4697 * ata_qc_new_init - Request an available ATA command, and initialize it
4698 * @dev: Device from whom we request an available command structure
4699 *
4700 * LOCKING:
4701 * None.
4702 */
4703
4704struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4705{
4706 struct ata_port *ap = dev->link->ap;
4707 struct ata_queued_cmd *qc;
4708
4709 qc = ata_qc_new(ap);
4710 if (qc) {
4711 qc->scsicmd = NULL;
4712 qc->ap = ap;
4713 qc->dev = dev;
4714
4715 ata_qc_reinit(qc);
4716 }
4717
4718 return qc;
4719}
4720
4721/**
4722 * ata_qc_free - free unused ata_queued_cmd
4723 * @qc: Command to complete
4724 *
4725 * Designed to free unused ata_queued_cmd object
4726 * in case something prevents using it.
4727 *
4728 * LOCKING:
4729 * spin_lock_irqsave(host lock)
4730 */
4731void ata_qc_free(struct ata_queued_cmd *qc)
4732{
4733 struct ata_port *ap;
4734 unsigned int tag;
4735
4736 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4737 ap = qc->ap;
4738
4739 qc->flags = 0;
4740 tag = qc->tag;
4741 if (likely(ata_tag_valid(tag))) {
4742 qc->tag = ATA_TAG_POISON;
4743 clear_bit(tag, &ap->qc_allocated);
4744 }
4745}
4746
4747void __ata_qc_complete(struct ata_queued_cmd *qc)
4748{
4749 struct ata_port *ap;
4750 struct ata_link *link;
4751
4752 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4753 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4754 ap = qc->ap;
4755 link = qc->dev->link;
4756
4757 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4758 ata_sg_clean(qc);
4759
4760 /* command should be marked inactive atomically with qc completion */
4761 if (qc->tf.protocol == ATA_PROT_NCQ) {
4762 link->sactive &= ~(1 << qc->tag);
4763 if (!link->sactive)
4764 ap->nr_active_links--;
4765 } else {
4766 link->active_tag = ATA_TAG_POISON;
4767 ap->nr_active_links--;
4768 }
4769
4770 /* clear exclusive status */
4771 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4772 ap->excl_link == link))
4773 ap->excl_link = NULL;
4774
4775 /* atapi: mark qc as inactive to prevent the interrupt handler
4776 * from completing the command twice later, before the error handler
4777 * is called. (when rc != 0 and atapi request sense is needed)
4778 */
4779 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4780 ap->qc_active &= ~(1 << qc->tag);
4781
4782 /* call completion callback */
4783 qc->complete_fn(qc);
4784}
4785
4786static void fill_result_tf(struct ata_queued_cmd *qc)
4787{
4788 struct ata_port *ap = qc->ap;
4789
4790 qc->result_tf.flags = qc->tf.flags;
4791 ap->ops->qc_fill_rtf(qc);
4792}
4793
4794static void ata_verify_xfer(struct ata_queued_cmd *qc)
4795{
4796 struct ata_device *dev = qc->dev;
4797
4798 if (ata_is_nodata(qc->tf.protocol))
4799 return;
4800
4801 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4802 return;
4803
4804 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4805}
4806
4807/**
4808 * ata_qc_complete - Complete an active ATA command
4809 * @qc: Command to complete
4810 *
4811 * Indicate to the mid and upper layers that an ATA command has
4812 * completed, with either an ok or not-ok status.
4813 *
4814 * Refrain from calling this function multiple times when
4815 * successfully completing multiple NCQ commands.
4816 * ata_qc_complete_multiple() should be used instead, which will
4817 * properly update IRQ expect state.
4818 *
4819 * LOCKING:
4820 * spin_lock_irqsave(host lock)
4821 */
4822void ata_qc_complete(struct ata_queued_cmd *qc)
4823{
4824 struct ata_port *ap = qc->ap;
4825
4826 /* XXX: New EH and old EH use different mechanisms to
4827 * synchronize EH with regular execution path.
4828 *
4829 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4830 * Normal execution path is responsible for not accessing a
4831 * failed qc. libata core enforces the rule by returning NULL
4832 * from ata_qc_from_tag() for failed qcs.
4833 *
4834 * Old EH depends on ata_qc_complete() nullifying completion
4835 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4836 * not synchronize with interrupt handler. Only PIO task is
4837 * taken care of.
4838 */
4839 if (ap->ops->error_handler) {
4840 struct ata_device *dev = qc->dev;
4841 struct ata_eh_info *ehi = &dev->link->eh_info;
4842
4843 if (unlikely(qc->err_mask))
4844 qc->flags |= ATA_QCFLAG_FAILED;
4845
4846 /*
4847 * Finish internal commands without any further processing
4848 * and always with the result TF filled.
4849 */
4850 if (unlikely(ata_tag_internal(qc->tag))) {
4851 fill_result_tf(qc);
4852 __ata_qc_complete(qc);
4853 return;
4854 }
4855
4856 /*
4857 * Non-internal qc has failed. Fill the result TF and
4858 * summon EH.
4859 */
4860 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4861 fill_result_tf(qc);
4862 ata_qc_schedule_eh(qc);
4863 return;
4864 }
4865
4866 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4867
4868 /* read result TF if requested */
4869 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4870 fill_result_tf(qc);
4871
4872 /* Some commands need post-processing after successful
4873 * completion.
4874 */
4875 switch (qc->tf.command) {
4876 case ATA_CMD_SET_FEATURES:
4877 if (qc->tf.feature != SETFEATURES_WC_ON &&
4878 qc->tf.feature != SETFEATURES_WC_OFF)
4879 break;
4880 /* fall through */
4881 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4882 case ATA_CMD_SET_MULTI: /* multi_count changed */
4883 /* revalidate device */
4884 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4885 ata_port_schedule_eh(ap);
4886 break;
4887
4888 case ATA_CMD_SLEEP:
4889 dev->flags |= ATA_DFLAG_SLEEPING;
4890 break;
4891 }
4892
4893 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4894 ata_verify_xfer(qc);
4895
4896 __ata_qc_complete(qc);
4897 } else {
4898 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4899 return;
4900
4901 /* read result TF if failed or requested */
4902 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4903 fill_result_tf(qc);
4904
4905 __ata_qc_complete(qc);
4906 }
4907}
4908
4909/**
4910 * ata_qc_complete_multiple - Complete multiple qcs successfully
4911 * @ap: port in question
4912 * @qc_active: new qc_active mask
4913 *
4914 * Complete in-flight commands. This functions is meant to be
4915 * called from low-level driver's interrupt routine to complete
4916 * requests normally. ap->qc_active and @qc_active is compared
4917 * and commands are completed accordingly.
4918 *
4919 * Always use this function when completing multiple NCQ commands
4920 * from IRQ handlers instead of calling ata_qc_complete()
4921 * multiple times to keep IRQ expect status properly in sync.
4922 *
4923 * LOCKING:
4924 * spin_lock_irqsave(host lock)
4925 *
4926 * RETURNS:
4927 * Number of completed commands on success, -errno otherwise.
4928 */
4929int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4930{
4931 int nr_done = 0;
4932 u32 done_mask;
4933
4934 done_mask = ap->qc_active ^ qc_active;
4935
4936 if (unlikely(done_mask & qc_active)) {
4937 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
4938 ap->qc_active, qc_active);
4939 return -EINVAL;
4940 }
4941
4942 while (done_mask) {
4943 struct ata_queued_cmd *qc;
4944 unsigned int tag = __ffs(done_mask);
4945
4946 qc = ata_qc_from_tag(ap, tag);
4947 if (qc) {
4948 ata_qc_complete(qc);
4949 nr_done++;
4950 }
4951 done_mask &= ~(1 << tag);
4952 }
4953
4954 return nr_done;
4955}
4956
4957/**
4958 * ata_qc_issue - issue taskfile to device
4959 * @qc: command to issue to device
4960 *
4961 * Prepare an ATA command to submission to device.
4962 * This includes mapping the data into a DMA-able
4963 * area, filling in the S/G table, and finally
4964 * writing the taskfile to hardware, starting the command.
4965 *
4966 * LOCKING:
4967 * spin_lock_irqsave(host lock)
4968 */
4969void ata_qc_issue(struct ata_queued_cmd *qc)
4970{
4971 struct ata_port *ap = qc->ap;
4972 struct ata_link *link = qc->dev->link;
4973 u8 prot = qc->tf.protocol;
4974
4975 /* Make sure only one non-NCQ command is outstanding. The
4976 * check is skipped for old EH because it reuses active qc to
4977 * request ATAPI sense.
4978 */
4979 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4980
4981 if (ata_is_ncq(prot)) {
4982 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
4983
4984 if (!link->sactive)
4985 ap->nr_active_links++;
4986 link->sactive |= 1 << qc->tag;
4987 } else {
4988 WARN_ON_ONCE(link->sactive);
4989
4990 ap->nr_active_links++;
4991 link->active_tag = qc->tag;
4992 }
4993
4994 qc->flags |= ATA_QCFLAG_ACTIVE;
4995 ap->qc_active |= 1 << qc->tag;
4996
4997 /*
4998 * We guarantee to LLDs that they will have at least one
4999 * non-zero sg if the command is a data command.
5000 */
5001 if (WARN_ON_ONCE(ata_is_data(prot) &&
5002 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5003 goto sys_err;
5004
5005 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5006 (ap->flags & ATA_FLAG_PIO_DMA)))
5007 if (ata_sg_setup(qc))
5008 goto sys_err;
5009
5010 /* if device is sleeping, schedule reset and abort the link */
5011 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5012 link->eh_info.action |= ATA_EH_RESET;
5013 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5014 ata_link_abort(link);
5015 return;
5016 }
5017
5018 ap->ops->qc_prep(qc);
5019
5020 qc->err_mask |= ap->ops->qc_issue(qc);
5021 if (unlikely(qc->err_mask))
5022 goto err;
5023 return;
5024
5025sys_err:
5026 qc->err_mask |= AC_ERR_SYSTEM;
5027err:
5028 ata_qc_complete(qc);
5029}
5030
5031/**
5032 * sata_scr_valid - test whether SCRs are accessible
5033 * @link: ATA link to test SCR accessibility for
5034 *
5035 * Test whether SCRs are accessible for @link.
5036 *
5037 * LOCKING:
5038 * None.
5039 *
5040 * RETURNS:
5041 * 1 if SCRs are accessible, 0 otherwise.
5042 */
5043int sata_scr_valid(struct ata_link *link)
5044{
5045 struct ata_port *ap = link->ap;
5046
5047 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5048}
5049
5050/**
5051 * sata_scr_read - read SCR register of the specified port
5052 * @link: ATA link to read SCR for
5053 * @reg: SCR to read
5054 * @val: Place to store read value
5055 *
5056 * Read SCR register @reg of @link into *@val. This function is
5057 * guaranteed to succeed if @link is ap->link, the cable type of
5058 * the port is SATA and the port implements ->scr_read.
5059 *
5060 * LOCKING:
5061 * None if @link is ap->link. Kernel thread context otherwise.
5062 *
5063 * RETURNS:
5064 * 0 on success, negative errno on failure.
5065 */
5066int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5067{
5068 if (ata_is_host_link(link)) {
5069 if (sata_scr_valid(link))
5070 return link->ap->ops->scr_read(link, reg, val);
5071 return -EOPNOTSUPP;
5072 }
5073
5074 return sata_pmp_scr_read(link, reg, val);
5075}
5076
5077/**
5078 * sata_scr_write - write SCR register of the specified port
5079 * @link: ATA link to write SCR for
5080 * @reg: SCR to write
5081 * @val: value to write
5082 *
5083 * Write @val to SCR register @reg of @link. This function is
5084 * guaranteed to succeed if @link is ap->link, the cable type of
5085 * the port is SATA and the port implements ->scr_read.
5086 *
5087 * LOCKING:
5088 * None if @link is ap->link. Kernel thread context otherwise.
5089 *
5090 * RETURNS:
5091 * 0 on success, negative errno on failure.
5092 */
5093int sata_scr_write(struct ata_link *link, int reg, u32 val)
5094{
5095 if (ata_is_host_link(link)) {
5096 if (sata_scr_valid(link))
5097 return link->ap->ops->scr_write(link, reg, val);
5098 return -EOPNOTSUPP;
5099 }
5100
5101 return sata_pmp_scr_write(link, reg, val);
5102}
5103
5104/**
5105 * sata_scr_write_flush - write SCR register of the specified port and flush
5106 * @link: ATA link to write SCR for
5107 * @reg: SCR to write
5108 * @val: value to write
5109 *
5110 * This function is identical to sata_scr_write() except that this
5111 * function performs flush after writing to the register.
5112 *
5113 * LOCKING:
5114 * None if @link is ap->link. Kernel thread context otherwise.
5115 *
5116 * RETURNS:
5117 * 0 on success, negative errno on failure.
5118 */
5119int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5120{
5121 if (ata_is_host_link(link)) {
5122 int rc;
5123
5124 if (sata_scr_valid(link)) {
5125 rc = link->ap->ops->scr_write(link, reg, val);
5126 if (rc == 0)
5127 rc = link->ap->ops->scr_read(link, reg, &val);
5128 return rc;
5129 }
5130 return -EOPNOTSUPP;
5131 }
5132
5133 return sata_pmp_scr_write(link, reg, val);
5134}
5135
5136/**
5137 * ata_phys_link_online - test whether the given link is online
5138 * @link: ATA link to test
5139 *
5140 * Test whether @link is online. Note that this function returns
5141 * 0 if online status of @link cannot be obtained, so
5142 * ata_link_online(link) != !ata_link_offline(link).
5143 *
5144 * LOCKING:
5145 * None.
5146 *
5147 * RETURNS:
5148 * True if the port online status is available and online.
5149 */
5150bool ata_phys_link_online(struct ata_link *link)
5151{
5152 u32 sstatus;
5153
5154 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5155 ata_sstatus_online(sstatus))
5156 return true;
5157 return false;
5158}
5159
5160/**
5161 * ata_phys_link_offline - test whether the given link is offline
5162 * @link: ATA link to test
5163 *
5164 * Test whether @link is offline. Note that this function
5165 * returns 0 if offline status of @link cannot be obtained, so
5166 * ata_link_online(link) != !ata_link_offline(link).
5167 *
5168 * LOCKING:
5169 * None.
5170 *
5171 * RETURNS:
5172 * True if the port offline status is available and offline.
5173 */
5174bool ata_phys_link_offline(struct ata_link *link)
5175{
5176 u32 sstatus;
5177
5178 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5179 !ata_sstatus_online(sstatus))
5180 return true;
5181 return false;
5182}
5183
5184/**
5185 * ata_link_online - test whether the given link is online
5186 * @link: ATA link to test
5187 *
5188 * Test whether @link is online. This is identical to
5189 * ata_phys_link_online() when there's no slave link. When
5190 * there's a slave link, this function should only be called on
5191 * the master link and will return true if any of M/S links is
5192 * online.
5193 *
5194 * LOCKING:
5195 * None.
5196 *
5197 * RETURNS:
5198 * True if the port online status is available and online.
5199 */
5200bool ata_link_online(struct ata_link *link)
5201{
5202 struct ata_link *slave = link->ap->slave_link;
5203
5204 WARN_ON(link == slave); /* shouldn't be called on slave link */
5205
5206 return ata_phys_link_online(link) ||
5207 (slave && ata_phys_link_online(slave));
5208}
5209
5210/**
5211 * ata_link_offline - test whether the given link is offline
5212 * @link: ATA link to test
5213 *
5214 * Test whether @link is offline. This is identical to
5215 * ata_phys_link_offline() when there's no slave link. When
5216 * there's a slave link, this function should only be called on
5217 * the master link and will return true if both M/S links are
5218 * offline.
5219 *
5220 * LOCKING:
5221 * None.
5222 *
5223 * RETURNS:
5224 * True if the port offline status is available and offline.
5225 */
5226bool ata_link_offline(struct ata_link *link)
5227{
5228 struct ata_link *slave = link->ap->slave_link;
5229
5230 WARN_ON(link == slave); /* shouldn't be called on slave link */
5231
5232 return ata_phys_link_offline(link) &&
5233 (!slave || ata_phys_link_offline(slave));
5234}
5235
5236#ifdef CONFIG_PM
5237static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5238 unsigned int action, unsigned int ehi_flags,
5239 int wait)
5240{
5241 unsigned long flags;
5242 int i, rc;
5243
5244 for (i = 0; i < host->n_ports; i++) {
5245 struct ata_port *ap = host->ports[i];
5246 struct ata_link *link;
5247
5248 /* Previous resume operation might still be in
5249 * progress. Wait for PM_PENDING to clear.
5250 */
5251 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5252 ata_port_wait_eh(ap);
5253 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5254 }
5255
5256 /* request PM ops to EH */
5257 spin_lock_irqsave(ap->lock, flags);
5258
5259 ap->pm_mesg = mesg;
5260 if (wait) {
5261 rc = 0;
5262 ap->pm_result = &rc;
5263 }
5264
5265 ap->pflags |= ATA_PFLAG_PM_PENDING;
5266 ata_for_each_link(link, ap, HOST_FIRST) {
5267 link->eh_info.action |= action;
5268 link->eh_info.flags |= ehi_flags;
5269 }
5270
5271 ata_port_schedule_eh(ap);
5272
5273 spin_unlock_irqrestore(ap->lock, flags);
5274
5275 /* wait and check result */
5276 if (wait) {
5277 ata_port_wait_eh(ap);
5278 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5279 if (rc)
5280 return rc;
5281 }
5282 }
5283
5284 return 0;
5285}
5286
5287/**
5288 * ata_host_suspend - suspend host
5289 * @host: host to suspend
5290 * @mesg: PM message
5291 *
5292 * Suspend @host. Actual operation is performed by EH. This
5293 * function requests EH to perform PM operations and waits for EH
5294 * to finish.
5295 *
5296 * LOCKING:
5297 * Kernel thread context (may sleep).
5298 *
5299 * RETURNS:
5300 * 0 on success, -errno on failure.
5301 */
5302int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5303{
5304 unsigned int ehi_flags = ATA_EHI_QUIET;
5305 int rc;
5306
5307 /*
5308 * On some hardware, device fails to respond after spun down
5309 * for suspend. As the device won't be used before being
5310 * resumed, we don't need to touch the device. Ask EH to skip
5311 * the usual stuff and proceed directly to suspend.
5312 *
5313 * http://thread.gmane.org/gmane.linux.ide/46764
5314 */
5315 if (mesg.event == PM_EVENT_SUSPEND)
5316 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5317
5318 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5319 if (rc == 0)
5320 host->dev->power.power_state = mesg;
5321 return rc;
5322}
5323
5324/**
5325 * ata_host_resume - resume host
5326 * @host: host to resume
5327 *
5328 * Resume @host. Actual operation is performed by EH. This
5329 * function requests EH to perform PM operations and returns.
5330 * Note that all resume operations are performed parallelly.
5331 *
5332 * LOCKING:
5333 * Kernel thread context (may sleep).
5334 */
5335void ata_host_resume(struct ata_host *host)
5336{
5337 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5338 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5339 host->dev->power.power_state = PMSG_ON;
5340}
5341#endif
5342
5343/**
5344 * ata_dev_init - Initialize an ata_device structure
5345 * @dev: Device structure to initialize
5346 *
5347 * Initialize @dev in preparation for probing.
5348 *
5349 * LOCKING:
5350 * Inherited from caller.
5351 */
5352void ata_dev_init(struct ata_device *dev)
5353{
5354 struct ata_link *link = ata_dev_phys_link(dev);
5355 struct ata_port *ap = link->ap;
5356 unsigned long flags;
5357
5358 /* SATA spd limit is bound to the attached device, reset together */
5359 link->sata_spd_limit = link->hw_sata_spd_limit;
5360 link->sata_spd = 0;
5361
5362 /* High bits of dev->flags are used to record warm plug
5363 * requests which occur asynchronously. Synchronize using
5364 * host lock.
5365 */
5366 spin_lock_irqsave(ap->lock, flags);
5367 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5368 dev->horkage = 0;
5369 spin_unlock_irqrestore(ap->lock, flags);
5370
5371 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5372 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5373 dev->pio_mask = UINT_MAX;
5374 dev->mwdma_mask = UINT_MAX;
5375 dev->udma_mask = UINT_MAX;
5376}
5377
5378/**
5379 * ata_link_init - Initialize an ata_link structure
5380 * @ap: ATA port link is attached to
5381 * @link: Link structure to initialize
5382 * @pmp: Port multiplier port number
5383 *
5384 * Initialize @link.
5385 *
5386 * LOCKING:
5387 * Kernel thread context (may sleep)
5388 */
5389void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5390{
5391 int i;
5392
5393 /* clear everything except for devices */
5394 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5395 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5396
5397 link->ap = ap;
5398 link->pmp = pmp;
5399 link->active_tag = ATA_TAG_POISON;
5400 link->hw_sata_spd_limit = UINT_MAX;
5401
5402 /* can't use iterator, ap isn't initialized yet */
5403 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5404 struct ata_device *dev = &link->device[i];
5405
5406 dev->link = link;
5407 dev->devno = dev - link->device;
5408#ifdef CONFIG_ATA_ACPI
5409 dev->gtf_filter = ata_acpi_gtf_filter;
5410#endif
5411 ata_dev_init(dev);
5412 }
5413}
5414
5415/**
5416 * sata_link_init_spd - Initialize link->sata_spd_limit
5417 * @link: Link to configure sata_spd_limit for
5418 *
5419 * Initialize @link->[hw_]sata_spd_limit to the currently
5420 * configured value.
5421 *
5422 * LOCKING:
5423 * Kernel thread context (may sleep).
5424 *
5425 * RETURNS:
5426 * 0 on success, -errno on failure.
5427 */
5428int sata_link_init_spd(struct ata_link *link)
5429{
5430 u8 spd;
5431 int rc;
5432
5433 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5434 if (rc)
5435 return rc;
5436
5437 spd = (link->saved_scontrol >> 4) & 0xf;
5438 if (spd)
5439 link->hw_sata_spd_limit &= (1 << spd) - 1;
5440
5441 ata_force_link_limits(link);
5442
5443 link->sata_spd_limit = link->hw_sata_spd_limit;
5444
5445 return 0;
5446}
5447
5448/**
5449 * ata_port_alloc - allocate and initialize basic ATA port resources
5450 * @host: ATA host this allocated port belongs to
5451 *
5452 * Allocate and initialize basic ATA port resources.
5453 *
5454 * RETURNS:
5455 * Allocate ATA port on success, NULL on failure.
5456 *
5457 * LOCKING:
5458 * Inherited from calling layer (may sleep).
5459 */
5460struct ata_port *ata_port_alloc(struct ata_host *host)
5461{
5462 struct ata_port *ap;
5463
5464 DPRINTK("ENTER\n");
5465
5466 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5467 if (!ap)
5468 return NULL;
5469
5470 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5471 ap->lock = &host->lock;
5472 ap->print_id = -1;
5473 ap->host = host;
5474 ap->dev = host->dev;
5475
5476#if defined(ATA_VERBOSE_DEBUG)
5477 /* turn on all debugging levels */
5478 ap->msg_enable = 0x00FF;
5479#elif defined(ATA_DEBUG)
5480 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5481#else
5482 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5483#endif
5484
5485 mutex_init(&ap->scsi_scan_mutex);
5486 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5487 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5488 INIT_LIST_HEAD(&ap->eh_done_q);
5489 init_waitqueue_head(&ap->eh_wait_q);
5490 init_completion(&ap->park_req_pending);
5491 init_timer_deferrable(&ap->fastdrain_timer);
5492 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5493 ap->fastdrain_timer.data = (unsigned long)ap;
5494
5495 ap->cbl = ATA_CBL_NONE;
5496
5497 ata_link_init(ap, &ap->link, 0);
5498
5499#ifdef ATA_IRQ_TRAP
5500 ap->stats.unhandled_irq = 1;
5501 ap->stats.idle_irq = 1;
5502#endif
5503 ata_sff_port_init(ap);
5504
5505 return ap;
5506}
5507
5508static void ata_host_release(struct device *gendev, void *res)
5509{
5510 struct ata_host *host = dev_get_drvdata(gendev);
5511 int i;
5512
5513 for (i = 0; i < host->n_ports; i++) {
5514 struct ata_port *ap = host->ports[i];
5515
5516 if (!ap)
5517 continue;
5518
5519 if (ap->scsi_host)
5520 scsi_host_put(ap->scsi_host);
5521
5522 kfree(ap->pmp_link);
5523 kfree(ap->slave_link);
5524 kfree(ap);
5525 host->ports[i] = NULL;
5526 }
5527
5528 dev_set_drvdata(gendev, NULL);
5529}
5530
5531/**
5532 * ata_host_alloc - allocate and init basic ATA host resources
5533 * @dev: generic device this host is associated with
5534 * @max_ports: maximum number of ATA ports associated with this host
5535 *
5536 * Allocate and initialize basic ATA host resources. LLD calls
5537 * this function to allocate a host, initializes it fully and
5538 * attaches it using ata_host_register().
5539 *
5540 * @max_ports ports are allocated and host->n_ports is
5541 * initialized to @max_ports. The caller is allowed to decrease
5542 * host->n_ports before calling ata_host_register(). The unused
5543 * ports will be automatically freed on registration.
5544 *
5545 * RETURNS:
5546 * Allocate ATA host on success, NULL on failure.
5547 *
5548 * LOCKING:
5549 * Inherited from calling layer (may sleep).
5550 */
5551struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5552{
5553 struct ata_host *host;
5554 size_t sz;
5555 int i;
5556
5557 DPRINTK("ENTER\n");
5558
5559 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5560 return NULL;
5561
5562 /* alloc a container for our list of ATA ports (buses) */
5563 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5564 /* alloc a container for our list of ATA ports (buses) */
5565 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5566 if (!host)
5567 goto err_out;
5568
5569 devres_add(dev, host);
5570 dev_set_drvdata(dev, host);
5571
5572 spin_lock_init(&host->lock);
5573 mutex_init(&host->eh_mutex);
5574 host->dev = dev;
5575 host->n_ports = max_ports;
5576
5577 /* allocate ports bound to this host */
5578 for (i = 0; i < max_ports; i++) {
5579 struct ata_port *ap;
5580
5581 ap = ata_port_alloc(host);
5582 if (!ap)
5583 goto err_out;
5584
5585 ap->port_no = i;
5586 host->ports[i] = ap;
5587 }
5588
5589 devres_remove_group(dev, NULL);
5590 return host;
5591
5592 err_out:
5593 devres_release_group(dev, NULL);
5594 return NULL;
5595}
5596
5597/**
5598 * ata_host_alloc_pinfo - alloc host and init with port_info array
5599 * @dev: generic device this host is associated with
5600 * @ppi: array of ATA port_info to initialize host with
5601 * @n_ports: number of ATA ports attached to this host
5602 *
5603 * Allocate ATA host and initialize with info from @ppi. If NULL
5604 * terminated, @ppi may contain fewer entries than @n_ports. The
5605 * last entry will be used for the remaining ports.
5606 *
5607 * RETURNS:
5608 * Allocate ATA host on success, NULL on failure.
5609 *
5610 * LOCKING:
5611 * Inherited from calling layer (may sleep).
5612 */
5613struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5614 const struct ata_port_info * const * ppi,
5615 int n_ports)
5616{
5617 const struct ata_port_info *pi;
5618 struct ata_host *host;
5619 int i, j;
5620
5621 host = ata_host_alloc(dev, n_ports);
5622 if (!host)
5623 return NULL;
5624
5625 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5626 struct ata_port *ap = host->ports[i];
5627
5628 if (ppi[j])
5629 pi = ppi[j++];
5630
5631 ap->pio_mask = pi->pio_mask;
5632 ap->mwdma_mask = pi->mwdma_mask;
5633 ap->udma_mask = pi->udma_mask;
5634 ap->flags |= pi->flags;
5635 ap->link.flags |= pi->link_flags;
5636 ap->ops = pi->port_ops;
5637
5638 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5639 host->ops = pi->port_ops;
5640 }
5641
5642 return host;
5643}
5644
5645/**
5646 * ata_slave_link_init - initialize slave link
5647 * @ap: port to initialize slave link for
5648 *
5649 * Create and initialize slave link for @ap. This enables slave
5650 * link handling on the port.
5651 *
5652 * In libata, a port contains links and a link contains devices.
5653 * There is single host link but if a PMP is attached to it,
5654 * there can be multiple fan-out links. On SATA, there's usually
5655 * a single device connected to a link but PATA and SATA
5656 * controllers emulating TF based interface can have two - master
5657 * and slave.
5658 *
5659 * However, there are a few controllers which don't fit into this
5660 * abstraction too well - SATA controllers which emulate TF
5661 * interface with both master and slave devices but also have
5662 * separate SCR register sets for each device. These controllers
5663 * need separate links for physical link handling
5664 * (e.g. onlineness, link speed) but should be treated like a
5665 * traditional M/S controller for everything else (e.g. command
5666 * issue, softreset).
5667 *
5668 * slave_link is libata's way of handling this class of
5669 * controllers without impacting core layer too much. For
5670 * anything other than physical link handling, the default host
5671 * link is used for both master and slave. For physical link
5672 * handling, separate @ap->slave_link is used. All dirty details
5673 * are implemented inside libata core layer. From LLD's POV, the
5674 * only difference is that prereset, hardreset and postreset are
5675 * called once more for the slave link, so the reset sequence
5676 * looks like the following.
5677 *
5678 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5679 * softreset(M) -> postreset(M) -> postreset(S)
5680 *
5681 * Note that softreset is called only for the master. Softreset
5682 * resets both M/S by definition, so SRST on master should handle
5683 * both (the standard method will work just fine).
5684 *
5685 * LOCKING:
5686 * Should be called before host is registered.
5687 *
5688 * RETURNS:
5689 * 0 on success, -errno on failure.
5690 */
5691int ata_slave_link_init(struct ata_port *ap)
5692{
5693 struct ata_link *link;
5694
5695 WARN_ON(ap->slave_link);
5696 WARN_ON(ap->flags & ATA_FLAG_PMP);
5697
5698 link = kzalloc(sizeof(*link), GFP_KERNEL);
5699 if (!link)
5700 return -ENOMEM;
5701
5702 ata_link_init(ap, link, 1);
5703 ap->slave_link = link;
5704 return 0;
5705}
5706
5707static void ata_host_stop(struct device *gendev, void *res)
5708{
5709 struct ata_host *host = dev_get_drvdata(gendev);
5710 int i;
5711
5712 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5713
5714 for (i = 0; i < host->n_ports; i++) {
5715 struct ata_port *ap = host->ports[i];
5716
5717 if (ap->ops->port_stop)
5718 ap->ops->port_stop(ap);
5719 }
5720
5721 if (host->ops->host_stop)
5722 host->ops->host_stop(host);
5723}
5724
5725/**
5726 * ata_finalize_port_ops - finalize ata_port_operations
5727 * @ops: ata_port_operations to finalize
5728 *
5729 * An ata_port_operations can inherit from another ops and that
5730 * ops can again inherit from another. This can go on as many
5731 * times as necessary as long as there is no loop in the
5732 * inheritance chain.
5733 *
5734 * Ops tables are finalized when the host is started. NULL or
5735 * unspecified entries are inherited from the closet ancestor
5736 * which has the method and the entry is populated with it.
5737 * After finalization, the ops table directly points to all the
5738 * methods and ->inherits is no longer necessary and cleared.
5739 *
5740 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5741 *
5742 * LOCKING:
5743 * None.
5744 */
5745static void ata_finalize_port_ops(struct ata_port_operations *ops)
5746{
5747 static DEFINE_SPINLOCK(lock);
5748 const struct ata_port_operations *cur;
5749 void **begin = (void **)ops;
5750 void **end = (void **)&ops->inherits;
5751 void **pp;
5752
5753 if (!ops || !ops->inherits)
5754 return;
5755
5756 spin_lock(&lock);
5757
5758 for (cur = ops->inherits; cur; cur = cur->inherits) {
5759 void **inherit = (void **)cur;
5760
5761 for (pp = begin; pp < end; pp++, inherit++)
5762 if (!*pp)
5763 *pp = *inherit;
5764 }
5765
5766 for (pp = begin; pp < end; pp++)
5767 if (IS_ERR(*pp))
5768 *pp = NULL;
5769
5770 ops->inherits = NULL;
5771
5772 spin_unlock(&lock);
5773}
5774
5775/**
5776 * ata_host_start - start and freeze ports of an ATA host
5777 * @host: ATA host to start ports for
5778 *
5779 * Start and then freeze ports of @host. Started status is
5780 * recorded in host->flags, so this function can be called
5781 * multiple times. Ports are guaranteed to get started only
5782 * once. If host->ops isn't initialized yet, its set to the
5783 * first non-dummy port ops.
5784 *
5785 * LOCKING:
5786 * Inherited from calling layer (may sleep).
5787 *
5788 * RETURNS:
5789 * 0 if all ports are started successfully, -errno otherwise.
5790 */
5791int ata_host_start(struct ata_host *host)
5792{
5793 int have_stop = 0;
5794 void *start_dr = NULL;
5795 int i, rc;
5796
5797 if (host->flags & ATA_HOST_STARTED)
5798 return 0;
5799
5800 ata_finalize_port_ops(host->ops);
5801
5802 for (i = 0; i < host->n_ports; i++) {
5803 struct ata_port *ap = host->ports[i];
5804
5805 ata_finalize_port_ops(ap->ops);
5806
5807 if (!host->ops && !ata_port_is_dummy(ap))
5808 host->ops = ap->ops;
5809
5810 if (ap->ops->port_stop)
5811 have_stop = 1;
5812 }
5813
5814 if (host->ops->host_stop)
5815 have_stop = 1;
5816
5817 if (have_stop) {
5818 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5819 if (!start_dr)
5820 return -ENOMEM;
5821 }
5822
5823 for (i = 0; i < host->n_ports; i++) {
5824 struct ata_port *ap = host->ports[i];
5825
5826 if (ap->ops->port_start) {
5827 rc = ap->ops->port_start(ap);
5828 if (rc) {
5829 if (rc != -ENODEV)
5830 dev_err(host->dev,
5831 "failed to start port %d (errno=%d)\n",
5832 i, rc);
5833 goto err_out;
5834 }
5835 }
5836 ata_eh_freeze_port(ap);
5837 }
5838
5839 if (start_dr)
5840 devres_add(host->dev, start_dr);
5841 host->flags |= ATA_HOST_STARTED;
5842 return 0;
5843
5844 err_out:
5845 while (--i >= 0) {
5846 struct ata_port *ap = host->ports[i];
5847
5848 if (ap->ops->port_stop)
5849 ap->ops->port_stop(ap);
5850 }
5851 devres_free(start_dr);
5852 return rc;
5853}
5854
5855/**
5856 * ata_sas_host_init - Initialize a host struct
5857 * @host: host to initialize
5858 * @dev: device host is attached to
5859 * @flags: host flags
5860 * @ops: port_ops
5861 *
5862 * LOCKING:
5863 * PCI/etc. bus probe sem.
5864 *
5865 */
5866/* KILLME - the only user left is ipr */
5867void ata_host_init(struct ata_host *host, struct device *dev,
5868 unsigned long flags, struct ata_port_operations *ops)
5869{
5870 spin_lock_init(&host->lock);
5871 mutex_init(&host->eh_mutex);
5872 host->dev = dev;
5873 host->flags = flags;
5874 host->ops = ops;
5875}
5876
5877int ata_port_probe(struct ata_port *ap)
5878{
5879 int rc = 0;
5880
5881 /* probe */
5882 if (ap->ops->error_handler) {
5883 struct ata_eh_info *ehi = &ap->link.eh_info;
5884 unsigned long flags;
5885
5886 /* kick EH for boot probing */
5887 spin_lock_irqsave(ap->lock, flags);
5888
5889 ehi->probe_mask |= ATA_ALL_DEVICES;
5890 ehi->action |= ATA_EH_RESET;
5891 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5892
5893 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5894 ap->pflags |= ATA_PFLAG_LOADING;
5895 ata_port_schedule_eh(ap);
5896
5897 spin_unlock_irqrestore(ap->lock, flags);
5898
5899 /* wait for EH to finish */
5900 ata_port_wait_eh(ap);
5901 } else {
5902 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5903 rc = ata_bus_probe(ap);
5904 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5905 }
5906 return rc;
5907}
5908
5909
5910static void async_port_probe(void *data, async_cookie_t cookie)
5911{
5912 struct ata_port *ap = data;
5913
5914 /*
5915 * If we're not allowed to scan this host in parallel,
5916 * we need to wait until all previous scans have completed
5917 * before going further.
5918 * Jeff Garzik says this is only within a controller, so we
5919 * don't need to wait for port 0, only for later ports.
5920 */
5921 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5922 async_synchronize_cookie(cookie);
5923
5924 (void)ata_port_probe(ap);
5925
5926 /* in order to keep device order, we need to synchronize at this point */
5927 async_synchronize_cookie(cookie);
5928
5929 ata_scsi_scan_host(ap, 1);
5930}
5931
5932/**
5933 * ata_host_register - register initialized ATA host
5934 * @host: ATA host to register
5935 * @sht: template for SCSI host
5936 *
5937 * Register initialized ATA host. @host is allocated using
5938 * ata_host_alloc() and fully initialized by LLD. This function
5939 * starts ports, registers @host with ATA and SCSI layers and
5940 * probe registered devices.
5941 *
5942 * LOCKING:
5943 * Inherited from calling layer (may sleep).
5944 *
5945 * RETURNS:
5946 * 0 on success, -errno otherwise.
5947 */
5948int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5949{
5950 int i, rc;
5951
5952 /* host must have been started */
5953 if (!(host->flags & ATA_HOST_STARTED)) {
5954 dev_err(host->dev, "BUG: trying to register unstarted host\n");
5955 WARN_ON(1);
5956 return -EINVAL;
5957 }
5958
5959 /* Blow away unused ports. This happens when LLD can't
5960 * determine the exact number of ports to allocate at
5961 * allocation time.
5962 */
5963 for (i = host->n_ports; host->ports[i]; i++)
5964 kfree(host->ports[i]);
5965
5966 /* give ports names and add SCSI hosts */
5967 for (i = 0; i < host->n_ports; i++)
5968 host->ports[i]->print_id = ata_print_id++;
5969
5970
5971 /* Create associated sysfs transport objects */
5972 for (i = 0; i < host->n_ports; i++) {
5973 rc = ata_tport_add(host->dev,host->ports[i]);
5974 if (rc) {
5975 goto err_tadd;
5976 }
5977 }
5978
5979 rc = ata_scsi_add_hosts(host, sht);
5980 if (rc)
5981 goto err_tadd;
5982
5983 /* associate with ACPI nodes */
5984 ata_acpi_associate(host);
5985
5986 /* set cable, sata_spd_limit and report */
5987 for (i = 0; i < host->n_ports; i++) {
5988 struct ata_port *ap = host->ports[i];
5989 unsigned long xfer_mask;
5990
5991 /* set SATA cable type if still unset */
5992 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5993 ap->cbl = ATA_CBL_SATA;
5994
5995 /* init sata_spd_limit to the current value */
5996 sata_link_init_spd(&ap->link);
5997 if (ap->slave_link)
5998 sata_link_init_spd(ap->slave_link);
5999
6000 /* print per-port info to dmesg */
6001 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6002 ap->udma_mask);
6003
6004 if (!ata_port_is_dummy(ap)) {
6005 ata_port_info(ap, "%cATA max %s %s\n",
6006 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6007 ata_mode_string(xfer_mask),
6008 ap->link.eh_info.desc);
6009 ata_ehi_clear_desc(&ap->link.eh_info);
6010 } else
6011 ata_port_info(ap, "DUMMY\n");
6012 }
6013
6014 /* perform each probe asynchronously */
6015 for (i = 0; i < host->n_ports; i++) {
6016 struct ata_port *ap = host->ports[i];
6017 async_schedule(async_port_probe, ap);
6018 }
6019
6020 return 0;
6021
6022 err_tadd:
6023 while (--i >= 0) {
6024 ata_tport_delete(host->ports[i]);
6025 }
6026 return rc;
6027
6028}
6029
6030/**
6031 * ata_host_activate - start host, request IRQ and register it
6032 * @host: target ATA host
6033 * @irq: IRQ to request
6034 * @irq_handler: irq_handler used when requesting IRQ
6035 * @irq_flags: irq_flags used when requesting IRQ
6036 * @sht: scsi_host_template to use when registering the host
6037 *
6038 * After allocating an ATA host and initializing it, most libata
6039 * LLDs perform three steps to activate the host - start host,
6040 * request IRQ and register it. This helper takes necessasry
6041 * arguments and performs the three steps in one go.
6042 *
6043 * An invalid IRQ skips the IRQ registration and expects the host to
6044 * have set polling mode on the port. In this case, @irq_handler
6045 * should be NULL.
6046 *
6047 * LOCKING:
6048 * Inherited from calling layer (may sleep).
6049 *
6050 * RETURNS:
6051 * 0 on success, -errno otherwise.
6052 */
6053int ata_host_activate(struct ata_host *host, int irq,
6054 irq_handler_t irq_handler, unsigned long irq_flags,
6055 struct scsi_host_template *sht)
6056{
6057 int i, rc;
6058
6059 rc = ata_host_start(host);
6060 if (rc)
6061 return rc;
6062
6063 /* Special case for polling mode */
6064 if (!irq) {
6065 WARN_ON(irq_handler);
6066 return ata_host_register(host, sht);
6067 }
6068
6069 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6070 dev_driver_string(host->dev), host);
6071 if (rc)
6072 return rc;
6073
6074 for (i = 0; i < host->n_ports; i++)
6075 ata_port_desc(host->ports[i], "irq %d", irq);
6076
6077 rc = ata_host_register(host, sht);
6078 /* if failed, just free the IRQ and leave ports alone */
6079 if (rc)
6080 devm_free_irq(host->dev, irq, host);
6081
6082 return rc;
6083}
6084
6085/**
6086 * ata_port_detach - Detach ATA port in prepration of device removal
6087 * @ap: ATA port to be detached
6088 *
6089 * Detach all ATA devices and the associated SCSI devices of @ap;
6090 * then, remove the associated SCSI host. @ap is guaranteed to
6091 * be quiescent on return from this function.
6092 *
6093 * LOCKING:
6094 * Kernel thread context (may sleep).
6095 */
6096static void ata_port_detach(struct ata_port *ap)
6097{
6098 unsigned long flags;
6099
6100 if (!ap->ops->error_handler)
6101 goto skip_eh;
6102
6103 /* tell EH we're leaving & flush EH */
6104 spin_lock_irqsave(ap->lock, flags);
6105 ap->pflags |= ATA_PFLAG_UNLOADING;
6106 ata_port_schedule_eh(ap);
6107 spin_unlock_irqrestore(ap->lock, flags);
6108
6109 /* wait till EH commits suicide */
6110 ata_port_wait_eh(ap);
6111
6112 /* it better be dead now */
6113 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6114
6115 cancel_delayed_work_sync(&ap->hotplug_task);
6116
6117 skip_eh:
6118 if (ap->pmp_link) {
6119 int i;
6120 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6121 ata_tlink_delete(&ap->pmp_link[i]);
6122 }
6123 ata_tport_delete(ap);
6124
6125 /* remove the associated SCSI host */
6126 scsi_remove_host(ap->scsi_host);
6127}
6128
6129/**
6130 * ata_host_detach - Detach all ports of an ATA host
6131 * @host: Host to detach
6132 *
6133 * Detach all ports of @host.
6134 *
6135 * LOCKING:
6136 * Kernel thread context (may sleep).
6137 */
6138void ata_host_detach(struct ata_host *host)
6139{
6140 int i;
6141
6142 for (i = 0; i < host->n_ports; i++)
6143 ata_port_detach(host->ports[i]);
6144
6145 /* the host is dead now, dissociate ACPI */
6146 ata_acpi_dissociate(host);
6147}
6148
6149#ifdef CONFIG_PCI
6150
6151/**
6152 * ata_pci_remove_one - PCI layer callback for device removal
6153 * @pdev: PCI device that was removed
6154 *
6155 * PCI layer indicates to libata via this hook that hot-unplug or
6156 * module unload event has occurred. Detach all ports. Resource
6157 * release is handled via devres.
6158 *
6159 * LOCKING:
6160 * Inherited from PCI layer (may sleep).
6161 */
6162void ata_pci_remove_one(struct pci_dev *pdev)
6163{
6164 struct device *dev = &pdev->dev;
6165 struct ata_host *host = dev_get_drvdata(dev);
6166
6167 ata_host_detach(host);
6168}
6169
6170/* move to PCI subsystem */
6171int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6172{
6173 unsigned long tmp = 0;
6174
6175 switch (bits->width) {
6176 case 1: {
6177 u8 tmp8 = 0;
6178 pci_read_config_byte(pdev, bits->reg, &tmp8);
6179 tmp = tmp8;
6180 break;
6181 }
6182 case 2: {
6183 u16 tmp16 = 0;
6184 pci_read_config_word(pdev, bits->reg, &tmp16);
6185 tmp = tmp16;
6186 break;
6187 }
6188 case 4: {
6189 u32 tmp32 = 0;
6190 pci_read_config_dword(pdev, bits->reg, &tmp32);
6191 tmp = tmp32;
6192 break;
6193 }
6194
6195 default:
6196 return -EINVAL;
6197 }
6198
6199 tmp &= bits->mask;
6200
6201 return (tmp == bits->val) ? 1 : 0;
6202}
6203
6204#ifdef CONFIG_PM
6205void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6206{
6207 pci_save_state(pdev);
6208 pci_disable_device(pdev);
6209
6210 if (mesg.event & PM_EVENT_SLEEP)
6211 pci_set_power_state(pdev, PCI_D3hot);
6212}
6213
6214int ata_pci_device_do_resume(struct pci_dev *pdev)
6215{
6216 int rc;
6217
6218 pci_set_power_state(pdev, PCI_D0);
6219 pci_restore_state(pdev);
6220
6221 rc = pcim_enable_device(pdev);
6222 if (rc) {
6223 dev_err(&pdev->dev,
6224 "failed to enable device after resume (%d)\n", rc);
6225 return rc;
6226 }
6227
6228 pci_set_master(pdev);
6229 return 0;
6230}
6231
6232int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6233{
6234 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6235 int rc = 0;
6236
6237 rc = ata_host_suspend(host, mesg);
6238 if (rc)
6239 return rc;
6240
6241 ata_pci_device_do_suspend(pdev, mesg);
6242
6243 return 0;
6244}
6245
6246int ata_pci_device_resume(struct pci_dev *pdev)
6247{
6248 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6249 int rc;
6250
6251 rc = ata_pci_device_do_resume(pdev);
6252 if (rc == 0)
6253 ata_host_resume(host);
6254 return rc;
6255}
6256#endif /* CONFIG_PM */
6257
6258#endif /* CONFIG_PCI */
6259
6260static int __init ata_parse_force_one(char **cur,
6261 struct ata_force_ent *force_ent,
6262 const char **reason)
6263{
6264 /* FIXME: Currently, there's no way to tag init const data and
6265 * using __initdata causes build failure on some versions of
6266 * gcc. Once __initdataconst is implemented, add const to the
6267 * following structure.
6268 */
6269 static struct ata_force_param force_tbl[] __initdata = {
6270 { "40c", .cbl = ATA_CBL_PATA40 },
6271 { "80c", .cbl = ATA_CBL_PATA80 },
6272 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6273 { "unk", .cbl = ATA_CBL_PATA_UNK },
6274 { "ign", .cbl = ATA_CBL_PATA_IGN },
6275 { "sata", .cbl = ATA_CBL_SATA },
6276 { "1.5Gbps", .spd_limit = 1 },
6277 { "3.0Gbps", .spd_limit = 2 },
6278 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6279 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6280 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6281 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6282 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6283 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6284 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6285 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6286 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6287 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6288 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6289 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6290 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6291 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6292 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6293 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6294 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6295 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6296 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6297 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6298 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6299 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6300 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6301 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6302 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6303 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6304 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6305 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6306 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6307 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6308 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6309 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6310 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6311 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6312 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6313 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6314 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6315 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6316 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6317 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6318 };
6319 char *start = *cur, *p = *cur;
6320 char *id, *val, *endp;
6321 const struct ata_force_param *match_fp = NULL;
6322 int nr_matches = 0, i;
6323
6324 /* find where this param ends and update *cur */
6325 while (*p != '\0' && *p != ',')
6326 p++;
6327
6328 if (*p == '\0')
6329 *cur = p;
6330 else
6331 *cur = p + 1;
6332
6333 *p = '\0';
6334
6335 /* parse */
6336 p = strchr(start, ':');
6337 if (!p) {
6338 val = strstrip(start);
6339 goto parse_val;
6340 }
6341 *p = '\0';
6342
6343 id = strstrip(start);
6344 val = strstrip(p + 1);
6345
6346 /* parse id */
6347 p = strchr(id, '.');
6348 if (p) {
6349 *p++ = '\0';
6350 force_ent->device = simple_strtoul(p, &endp, 10);
6351 if (p == endp || *endp != '\0') {
6352 *reason = "invalid device";
6353 return -EINVAL;
6354 }
6355 }
6356
6357 force_ent->port = simple_strtoul(id, &endp, 10);
6358 if (p == endp || *endp != '\0') {
6359 *reason = "invalid port/link";
6360 return -EINVAL;
6361 }
6362
6363 parse_val:
6364 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6365 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6366 const struct ata_force_param *fp = &force_tbl[i];
6367
6368 if (strncasecmp(val, fp->name, strlen(val)))
6369 continue;
6370
6371 nr_matches++;
6372 match_fp = fp;
6373
6374 if (strcasecmp(val, fp->name) == 0) {
6375 nr_matches = 1;
6376 break;
6377 }
6378 }
6379
6380 if (!nr_matches) {
6381 *reason = "unknown value";
6382 return -EINVAL;
6383 }
6384 if (nr_matches > 1) {
6385 *reason = "ambigious value";
6386 return -EINVAL;
6387 }
6388
6389 force_ent->param = *match_fp;
6390
6391 return 0;
6392}
6393
6394static void __init ata_parse_force_param(void)
6395{
6396 int idx = 0, size = 1;
6397 int last_port = -1, last_device = -1;
6398 char *p, *cur, *next;
6399
6400 /* calculate maximum number of params and allocate force_tbl */
6401 for (p = ata_force_param_buf; *p; p++)
6402 if (*p == ',')
6403 size++;
6404
6405 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6406 if (!ata_force_tbl) {
6407 printk(KERN_WARNING "ata: failed to extend force table, "
6408 "libata.force ignored\n");
6409 return;
6410 }
6411
6412 /* parse and populate the table */
6413 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6414 const char *reason = "";
6415 struct ata_force_ent te = { .port = -1, .device = -1 };
6416
6417 next = cur;
6418 if (ata_parse_force_one(&next, &te, &reason)) {
6419 printk(KERN_WARNING "ata: failed to parse force "
6420 "parameter \"%s\" (%s)\n",
6421 cur, reason);
6422 continue;
6423 }
6424
6425 if (te.port == -1) {
6426 te.port = last_port;
6427 te.device = last_device;
6428 }
6429
6430 ata_force_tbl[idx++] = te;
6431
6432 last_port = te.port;
6433 last_device = te.device;
6434 }
6435
6436 ata_force_tbl_size = idx;
6437}
6438
6439static int __init ata_init(void)
6440{
6441 int rc;
6442
6443 ata_parse_force_param();
6444
6445 rc = ata_sff_init();
6446 if (rc) {
6447 kfree(ata_force_tbl);
6448 return rc;
6449 }
6450
6451 libata_transport_init();
6452 ata_scsi_transport_template = ata_attach_transport();
6453 if (!ata_scsi_transport_template) {
6454 ata_sff_exit();
6455 rc = -ENOMEM;
6456 goto err_out;
6457 }
6458
6459 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6460 return 0;
6461
6462err_out:
6463 return rc;
6464}
6465
6466static void __exit ata_exit(void)
6467{
6468 ata_release_transport(ata_scsi_transport_template);
6469 libata_transport_exit();
6470 ata_sff_exit();
6471 kfree(ata_force_tbl);
6472}
6473
6474subsys_initcall(ata_init);
6475module_exit(ata_exit);
6476
6477static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6478
6479int ata_ratelimit(void)
6480{
6481 return __ratelimit(&ratelimit);
6482}
6483
6484/**
6485 * ata_msleep - ATA EH owner aware msleep
6486 * @ap: ATA port to attribute the sleep to
6487 * @msecs: duration to sleep in milliseconds
6488 *
6489 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6490 * ownership is released before going to sleep and reacquired
6491 * after the sleep is complete. IOW, other ports sharing the
6492 * @ap->host will be allowed to own the EH while this task is
6493 * sleeping.
6494 *
6495 * LOCKING:
6496 * Might sleep.
6497 */
6498void ata_msleep(struct ata_port *ap, unsigned int msecs)
6499{
6500 bool owns_eh = ap && ap->host->eh_owner == current;
6501
6502 if (owns_eh)
6503 ata_eh_release(ap);
6504
6505 msleep(msecs);
6506
6507 if (owns_eh)
6508 ata_eh_acquire(ap);
6509}
6510
6511/**
6512 * ata_wait_register - wait until register value changes
6513 * @ap: ATA port to wait register for, can be NULL
6514 * @reg: IO-mapped register
6515 * @mask: Mask to apply to read register value
6516 * @val: Wait condition
6517 * @interval: polling interval in milliseconds
6518 * @timeout: timeout in milliseconds
6519 *
6520 * Waiting for some bits of register to change is a common
6521 * operation for ATA controllers. This function reads 32bit LE
6522 * IO-mapped register @reg and tests for the following condition.
6523 *
6524 * (*@reg & mask) != val
6525 *
6526 * If the condition is met, it returns; otherwise, the process is
6527 * repeated after @interval_msec until timeout.
6528 *
6529 * LOCKING:
6530 * Kernel thread context (may sleep)
6531 *
6532 * RETURNS:
6533 * The final register value.
6534 */
6535u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6536 unsigned long interval, unsigned long timeout)
6537{
6538 unsigned long deadline;
6539 u32 tmp;
6540
6541 tmp = ioread32(reg);
6542
6543 /* Calculate timeout _after_ the first read to make sure
6544 * preceding writes reach the controller before starting to
6545 * eat away the timeout.
6546 */
6547 deadline = ata_deadline(jiffies, timeout);
6548
6549 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6550 ata_msleep(ap, interval);
6551 tmp = ioread32(reg);
6552 }
6553
6554 return tmp;
6555}
6556
6557/*
6558 * Dummy port_ops
6559 */
6560static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6561{
6562 return AC_ERR_SYSTEM;
6563}
6564
6565static void ata_dummy_error_handler(struct ata_port *ap)
6566{
6567 /* truly dummy */
6568}
6569
6570struct ata_port_operations ata_dummy_port_ops = {
6571 .qc_prep = ata_noop_qc_prep,
6572 .qc_issue = ata_dummy_qc_issue,
6573 .error_handler = ata_dummy_error_handler,
6574};
6575
6576const struct ata_port_info ata_dummy_port_info = {
6577 .port_ops = &ata_dummy_port_ops,
6578};
6579
6580/*
6581 * Utility print functions
6582 */
6583int ata_port_printk(const struct ata_port *ap, const char *level,
6584 const char *fmt, ...)
6585{
6586 struct va_format vaf;
6587 va_list args;
6588 int r;
6589
6590 va_start(args, fmt);
6591
6592 vaf.fmt = fmt;
6593 vaf.va = &args;
6594
6595 r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6596
6597 va_end(args);
6598
6599 return r;
6600}
6601EXPORT_SYMBOL(ata_port_printk);
6602
6603int ata_link_printk(const struct ata_link *link, const char *level,
6604 const char *fmt, ...)
6605{
6606 struct va_format vaf;
6607 va_list args;
6608 int r;
6609
6610 va_start(args, fmt);
6611
6612 vaf.fmt = fmt;
6613 vaf.va = &args;
6614
6615 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6616 r = printk("%sata%u.%02u: %pV",
6617 level, link->ap->print_id, link->pmp, &vaf);
6618 else
6619 r = printk("%sata%u: %pV",
6620 level, link->ap->print_id, &vaf);
6621
6622 va_end(args);
6623
6624 return r;
6625}
6626EXPORT_SYMBOL(ata_link_printk);
6627
6628int ata_dev_printk(const struct ata_device *dev, const char *level,
6629 const char *fmt, ...)
6630{
6631 struct va_format vaf;
6632 va_list args;
6633 int r;
6634
6635 va_start(args, fmt);
6636
6637 vaf.fmt = fmt;
6638 vaf.va = &args;
6639
6640 r = printk("%sata%u.%02u: %pV",
6641 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6642 &vaf);
6643
6644 va_end(args);
6645
6646 return r;
6647}
6648EXPORT_SYMBOL(ata_dev_printk);
6649
6650void ata_print_version(const struct device *dev, const char *version)
6651{
6652 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6653}
6654EXPORT_SYMBOL(ata_print_version);
6655
6656/*
6657 * libata is essentially a library of internal helper functions for
6658 * low-level ATA host controller drivers. As such, the API/ABI is
6659 * likely to change as new drivers are added and updated.
6660 * Do not depend on ABI/API stability.
6661 */
6662EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6663EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6664EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6665EXPORT_SYMBOL_GPL(ata_base_port_ops);
6666EXPORT_SYMBOL_GPL(sata_port_ops);
6667EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6668EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6669EXPORT_SYMBOL_GPL(ata_link_next);
6670EXPORT_SYMBOL_GPL(ata_dev_next);
6671EXPORT_SYMBOL_GPL(ata_std_bios_param);
6672EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6673EXPORT_SYMBOL_GPL(ata_host_init);
6674EXPORT_SYMBOL_GPL(ata_host_alloc);
6675EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6676EXPORT_SYMBOL_GPL(ata_slave_link_init);
6677EXPORT_SYMBOL_GPL(ata_host_start);
6678EXPORT_SYMBOL_GPL(ata_host_register);
6679EXPORT_SYMBOL_GPL(ata_host_activate);
6680EXPORT_SYMBOL_GPL(ata_host_detach);
6681EXPORT_SYMBOL_GPL(ata_sg_init);
6682EXPORT_SYMBOL_GPL(ata_qc_complete);
6683EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6684EXPORT_SYMBOL_GPL(atapi_cmd_type);
6685EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6686EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6687EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6688EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6689EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6690EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6691EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6692EXPORT_SYMBOL_GPL(ata_mode_string);
6693EXPORT_SYMBOL_GPL(ata_id_xfermask);
6694EXPORT_SYMBOL_GPL(ata_do_set_mode);
6695EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6696EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6697EXPORT_SYMBOL_GPL(ata_dev_disable);
6698EXPORT_SYMBOL_GPL(sata_set_spd);
6699EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6700EXPORT_SYMBOL_GPL(sata_link_debounce);
6701EXPORT_SYMBOL_GPL(sata_link_resume);
6702EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6703EXPORT_SYMBOL_GPL(ata_std_prereset);
6704EXPORT_SYMBOL_GPL(sata_link_hardreset);
6705EXPORT_SYMBOL_GPL(sata_std_hardreset);
6706EXPORT_SYMBOL_GPL(ata_std_postreset);
6707EXPORT_SYMBOL_GPL(ata_dev_classify);
6708EXPORT_SYMBOL_GPL(ata_dev_pair);
6709EXPORT_SYMBOL_GPL(ata_ratelimit);
6710EXPORT_SYMBOL_GPL(ata_msleep);
6711EXPORT_SYMBOL_GPL(ata_wait_register);
6712EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6713EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6714EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6715EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6716EXPORT_SYMBOL_GPL(sata_scr_valid);
6717EXPORT_SYMBOL_GPL(sata_scr_read);
6718EXPORT_SYMBOL_GPL(sata_scr_write);
6719EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6720EXPORT_SYMBOL_GPL(ata_link_online);
6721EXPORT_SYMBOL_GPL(ata_link_offline);
6722#ifdef CONFIG_PM
6723EXPORT_SYMBOL_GPL(ata_host_suspend);
6724EXPORT_SYMBOL_GPL(ata_host_resume);
6725#endif /* CONFIG_PM */
6726EXPORT_SYMBOL_GPL(ata_id_string);
6727EXPORT_SYMBOL_GPL(ata_id_c_string);
6728EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6729EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6730
6731EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6732EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6733EXPORT_SYMBOL_GPL(ata_timing_compute);
6734EXPORT_SYMBOL_GPL(ata_timing_merge);
6735EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6736
6737#ifdef CONFIG_PCI
6738EXPORT_SYMBOL_GPL(pci_test_config_bits);
6739EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6740#ifdef CONFIG_PM
6741EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6742EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6743EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6744EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6745#endif /* CONFIG_PM */
6746#endif /* CONFIG_PCI */
6747
6748EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6749EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6750EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6751EXPORT_SYMBOL_GPL(ata_port_desc);
6752#ifdef CONFIG_PCI
6753EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6754#endif /* CONFIG_PCI */
6755EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6756EXPORT_SYMBOL_GPL(ata_link_abort);
6757EXPORT_SYMBOL_GPL(ata_port_abort);
6758EXPORT_SYMBOL_GPL(ata_port_freeze);
6759EXPORT_SYMBOL_GPL(sata_async_notification);
6760EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6761EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6762EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6763EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6764EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6765EXPORT_SYMBOL_GPL(ata_do_eh);
6766EXPORT_SYMBOL_GPL(ata_std_error_handler);
6767
6768EXPORT_SYMBOL_GPL(ata_cable_40wire);
6769EXPORT_SYMBOL_GPL(ata_cable_80wire);
6770EXPORT_SYMBOL_GPL(ata_cable_unknown);
6771EXPORT_SYMBOL_GPL(ata_cable_ignore);
6772EXPORT_SYMBOL_GPL(ata_cable_sata);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * libata-core.c - helper library for ATA
4 *
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
7 *
8 * libata documentation is available via 'make {ps|pdf}docs',
9 * as Documentation/driver-api/libata.rst
10 *
11 * Hardware documentation available from http://www.t13.org/ and
12 * http://www.sata-io.org/
13 *
14 * Standards documents from:
15 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
16 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
17 * http://www.sata-io.org (SATA)
18 * http://www.compactflash.org (CF)
19 * http://www.qic.org (QIC157 - Tape and DSC)
20 * http://www.ce-ata.org (CE-ATA: not supported)
21 *
22 * libata is essentially a library of internal helper functions for
23 * low-level ATA host controller drivers. As such, the API/ABI is
24 * likely to change as new drivers are added and updated.
25 * Do not depend on ABI/API stability.
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/list.h>
33#include <linux/mm.h>
34#include <linux/spinlock.h>
35#include <linux/blkdev.h>
36#include <linux/delay.h>
37#include <linux/timer.h>
38#include <linux/time.h>
39#include <linux/interrupt.h>
40#include <linux/completion.h>
41#include <linux/suspend.h>
42#include <linux/workqueue.h>
43#include <linux/scatterlist.h>
44#include <linux/io.h>
45#include <linux/log2.h>
46#include <linux/slab.h>
47#include <linux/glob.h>
48#include <scsi/scsi.h>
49#include <scsi/scsi_cmnd.h>
50#include <scsi/scsi_host.h>
51#include <linux/libata.h>
52#include <asm/byteorder.h>
53#include <asm/unaligned.h>
54#include <linux/cdrom.h>
55#include <linux/ratelimit.h>
56#include <linux/leds.h>
57#include <linux/pm_runtime.h>
58#include <linux/platform_device.h>
59#include <asm/setup.h>
60
61#define CREATE_TRACE_POINTS
62#include <trace/events/libata.h>
63
64#include "libata.h"
65#include "libata-transport.h"
66
67const struct ata_port_operations ata_base_port_ops = {
68 .prereset = ata_std_prereset,
69 .postreset = ata_std_postreset,
70 .error_handler = ata_std_error_handler,
71 .sched_eh = ata_std_sched_eh,
72 .end_eh = ata_std_end_eh,
73};
74
75const struct ata_port_operations sata_port_ops = {
76 .inherits = &ata_base_port_ops,
77
78 .qc_defer = ata_std_qc_defer,
79 .hardreset = sata_std_hardreset,
80};
81EXPORT_SYMBOL_GPL(sata_port_ops);
82
83static unsigned int ata_dev_init_params(struct ata_device *dev,
84 u16 heads, u16 sectors);
85static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
86static void ata_dev_xfermask(struct ata_device *dev);
87static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
88
89atomic_t ata_print_id = ATOMIC_INIT(0);
90
91#ifdef CONFIG_ATA_FORCE
92struct ata_force_param {
93 const char *name;
94 u8 cbl;
95 u8 spd_limit;
96 unsigned int xfer_mask;
97 unsigned int horkage_on;
98 unsigned int horkage_off;
99 u16 lflags_on;
100 u16 lflags_off;
101};
102
103struct ata_force_ent {
104 int port;
105 int device;
106 struct ata_force_param param;
107};
108
109static struct ata_force_ent *ata_force_tbl;
110static int ata_force_tbl_size;
111
112static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
113/* param_buf is thrown away after initialization, disallow read */
114module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
115MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
116#endif
117
118static int atapi_enabled = 1;
119module_param(atapi_enabled, int, 0444);
120MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
121
122static int atapi_dmadir = 0;
123module_param(atapi_dmadir, int, 0444);
124MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
125
126int atapi_passthru16 = 1;
127module_param(atapi_passthru16, int, 0444);
128MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
129
130int libata_fua = 0;
131module_param_named(fua, libata_fua, int, 0444);
132MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
133
134static int ata_ignore_hpa;
135module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
136MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
137
138static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
139module_param_named(dma, libata_dma_mask, int, 0444);
140MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
141
142static int ata_probe_timeout;
143module_param(ata_probe_timeout, int, 0444);
144MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
145
146int libata_noacpi = 0;
147module_param_named(noacpi, libata_noacpi, int, 0444);
148MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
149
150int libata_allow_tpm = 0;
151module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
152MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
153
154static int atapi_an;
155module_param(atapi_an, int, 0444);
156MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
157
158MODULE_AUTHOR("Jeff Garzik");
159MODULE_DESCRIPTION("Library module for ATA devices");
160MODULE_LICENSE("GPL");
161MODULE_VERSION(DRV_VERSION);
162
163static inline bool ata_dev_print_info(struct ata_device *dev)
164{
165 struct ata_eh_context *ehc = &dev->link->eh_context;
166
167 return ehc->i.flags & ATA_EHI_PRINTINFO;
168}
169
170static bool ata_sstatus_online(u32 sstatus)
171{
172 return (sstatus & 0xf) == 0x3;
173}
174
175/**
176 * ata_link_next - link iteration helper
177 * @link: the previous link, NULL to start
178 * @ap: ATA port containing links to iterate
179 * @mode: iteration mode, one of ATA_LITER_*
180 *
181 * LOCKING:
182 * Host lock or EH context.
183 *
184 * RETURNS:
185 * Pointer to the next link.
186 */
187struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
188 enum ata_link_iter_mode mode)
189{
190 BUG_ON(mode != ATA_LITER_EDGE &&
191 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
192
193 /* NULL link indicates start of iteration */
194 if (!link)
195 switch (mode) {
196 case ATA_LITER_EDGE:
197 case ATA_LITER_PMP_FIRST:
198 if (sata_pmp_attached(ap))
199 return ap->pmp_link;
200 fallthrough;
201 case ATA_LITER_HOST_FIRST:
202 return &ap->link;
203 }
204
205 /* we just iterated over the host link, what's next? */
206 if (link == &ap->link)
207 switch (mode) {
208 case ATA_LITER_HOST_FIRST:
209 if (sata_pmp_attached(ap))
210 return ap->pmp_link;
211 fallthrough;
212 case ATA_LITER_PMP_FIRST:
213 if (unlikely(ap->slave_link))
214 return ap->slave_link;
215 fallthrough;
216 case ATA_LITER_EDGE:
217 return NULL;
218 }
219
220 /* slave_link excludes PMP */
221 if (unlikely(link == ap->slave_link))
222 return NULL;
223
224 /* we were over a PMP link */
225 if (++link < ap->pmp_link + ap->nr_pmp_links)
226 return link;
227
228 if (mode == ATA_LITER_PMP_FIRST)
229 return &ap->link;
230
231 return NULL;
232}
233EXPORT_SYMBOL_GPL(ata_link_next);
234
235/**
236 * ata_dev_next - device iteration helper
237 * @dev: the previous device, NULL to start
238 * @link: ATA link containing devices to iterate
239 * @mode: iteration mode, one of ATA_DITER_*
240 *
241 * LOCKING:
242 * Host lock or EH context.
243 *
244 * RETURNS:
245 * Pointer to the next device.
246 */
247struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
248 enum ata_dev_iter_mode mode)
249{
250 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
251 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
252
253 /* NULL dev indicates start of iteration */
254 if (!dev)
255 switch (mode) {
256 case ATA_DITER_ENABLED:
257 case ATA_DITER_ALL:
258 dev = link->device;
259 goto check;
260 case ATA_DITER_ENABLED_REVERSE:
261 case ATA_DITER_ALL_REVERSE:
262 dev = link->device + ata_link_max_devices(link) - 1;
263 goto check;
264 }
265
266 next:
267 /* move to the next one */
268 switch (mode) {
269 case ATA_DITER_ENABLED:
270 case ATA_DITER_ALL:
271 if (++dev < link->device + ata_link_max_devices(link))
272 goto check;
273 return NULL;
274 case ATA_DITER_ENABLED_REVERSE:
275 case ATA_DITER_ALL_REVERSE:
276 if (--dev >= link->device)
277 goto check;
278 return NULL;
279 }
280
281 check:
282 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
283 !ata_dev_enabled(dev))
284 goto next;
285 return dev;
286}
287EXPORT_SYMBOL_GPL(ata_dev_next);
288
289/**
290 * ata_dev_phys_link - find physical link for a device
291 * @dev: ATA device to look up physical link for
292 *
293 * Look up physical link which @dev is attached to. Note that
294 * this is different from @dev->link only when @dev is on slave
295 * link. For all other cases, it's the same as @dev->link.
296 *
297 * LOCKING:
298 * Don't care.
299 *
300 * RETURNS:
301 * Pointer to the found physical link.
302 */
303struct ata_link *ata_dev_phys_link(struct ata_device *dev)
304{
305 struct ata_port *ap = dev->link->ap;
306
307 if (!ap->slave_link)
308 return dev->link;
309 if (!dev->devno)
310 return &ap->link;
311 return ap->slave_link;
312}
313
314#ifdef CONFIG_ATA_FORCE
315/**
316 * ata_force_cbl - force cable type according to libata.force
317 * @ap: ATA port of interest
318 *
319 * Force cable type according to libata.force and whine about it.
320 * The last entry which has matching port number is used, so it
321 * can be specified as part of device force parameters. For
322 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
323 * same effect.
324 *
325 * LOCKING:
326 * EH context.
327 */
328void ata_force_cbl(struct ata_port *ap)
329{
330 int i;
331
332 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
333 const struct ata_force_ent *fe = &ata_force_tbl[i];
334
335 if (fe->port != -1 && fe->port != ap->print_id)
336 continue;
337
338 if (fe->param.cbl == ATA_CBL_NONE)
339 continue;
340
341 ap->cbl = fe->param.cbl;
342 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
343 return;
344 }
345}
346
347/**
348 * ata_force_link_limits - force link limits according to libata.force
349 * @link: ATA link of interest
350 *
351 * Force link flags and SATA spd limit according to libata.force
352 * and whine about it. When only the port part is specified
353 * (e.g. 1:), the limit applies to all links connected to both
354 * the host link and all fan-out ports connected via PMP. If the
355 * device part is specified as 0 (e.g. 1.00:), it specifies the
356 * first fan-out link not the host link. Device number 15 always
357 * points to the host link whether PMP is attached or not. If the
358 * controller has slave link, device number 16 points to it.
359 *
360 * LOCKING:
361 * EH context.
362 */
363static void ata_force_link_limits(struct ata_link *link)
364{
365 bool did_spd = false;
366 int linkno = link->pmp;
367 int i;
368
369 if (ata_is_host_link(link))
370 linkno += 15;
371
372 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
373 const struct ata_force_ent *fe = &ata_force_tbl[i];
374
375 if (fe->port != -1 && fe->port != link->ap->print_id)
376 continue;
377
378 if (fe->device != -1 && fe->device != linkno)
379 continue;
380
381 /* only honor the first spd limit */
382 if (!did_spd && fe->param.spd_limit) {
383 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
384 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
385 fe->param.name);
386 did_spd = true;
387 }
388
389 /* let lflags stack */
390 if (fe->param.lflags_on) {
391 link->flags |= fe->param.lflags_on;
392 ata_link_notice(link,
393 "FORCE: link flag 0x%x forced -> 0x%x\n",
394 fe->param.lflags_on, link->flags);
395 }
396 if (fe->param.lflags_off) {
397 link->flags &= ~fe->param.lflags_off;
398 ata_link_notice(link,
399 "FORCE: link flag 0x%x cleared -> 0x%x\n",
400 fe->param.lflags_off, link->flags);
401 }
402 }
403}
404
405/**
406 * ata_force_xfermask - force xfermask according to libata.force
407 * @dev: ATA device of interest
408 *
409 * Force xfer_mask according to libata.force and whine about it.
410 * For consistency with link selection, device number 15 selects
411 * the first device connected to the host link.
412 *
413 * LOCKING:
414 * EH context.
415 */
416static void ata_force_xfermask(struct ata_device *dev)
417{
418 int devno = dev->link->pmp + dev->devno;
419 int alt_devno = devno;
420 int i;
421
422 /* allow n.15/16 for devices attached to host port */
423 if (ata_is_host_link(dev->link))
424 alt_devno += 15;
425
426 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
427 const struct ata_force_ent *fe = &ata_force_tbl[i];
428 unsigned int pio_mask, mwdma_mask, udma_mask;
429
430 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
431 continue;
432
433 if (fe->device != -1 && fe->device != devno &&
434 fe->device != alt_devno)
435 continue;
436
437 if (!fe->param.xfer_mask)
438 continue;
439
440 ata_unpack_xfermask(fe->param.xfer_mask,
441 &pio_mask, &mwdma_mask, &udma_mask);
442 if (udma_mask)
443 dev->udma_mask = udma_mask;
444 else if (mwdma_mask) {
445 dev->udma_mask = 0;
446 dev->mwdma_mask = mwdma_mask;
447 } else {
448 dev->udma_mask = 0;
449 dev->mwdma_mask = 0;
450 dev->pio_mask = pio_mask;
451 }
452
453 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
454 fe->param.name);
455 return;
456 }
457}
458
459/**
460 * ata_force_horkage - force horkage according to libata.force
461 * @dev: ATA device of interest
462 *
463 * Force horkage according to libata.force and whine about it.
464 * For consistency with link selection, device number 15 selects
465 * the first device connected to the host link.
466 *
467 * LOCKING:
468 * EH context.
469 */
470static void ata_force_horkage(struct ata_device *dev)
471{
472 int devno = dev->link->pmp + dev->devno;
473 int alt_devno = devno;
474 int i;
475
476 /* allow n.15/16 for devices attached to host port */
477 if (ata_is_host_link(dev->link))
478 alt_devno += 15;
479
480 for (i = 0; i < ata_force_tbl_size; i++) {
481 const struct ata_force_ent *fe = &ata_force_tbl[i];
482
483 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
484 continue;
485
486 if (fe->device != -1 && fe->device != devno &&
487 fe->device != alt_devno)
488 continue;
489
490 if (!(~dev->horkage & fe->param.horkage_on) &&
491 !(dev->horkage & fe->param.horkage_off))
492 continue;
493
494 dev->horkage |= fe->param.horkage_on;
495 dev->horkage &= ~fe->param.horkage_off;
496
497 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
498 fe->param.name);
499 }
500}
501#else
502static inline void ata_force_link_limits(struct ata_link *link) { }
503static inline void ata_force_xfermask(struct ata_device *dev) { }
504static inline void ata_force_horkage(struct ata_device *dev) { }
505#endif
506
507/**
508 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
509 * @opcode: SCSI opcode
510 *
511 * Determine ATAPI command type from @opcode.
512 *
513 * LOCKING:
514 * None.
515 *
516 * RETURNS:
517 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
518 */
519int atapi_cmd_type(u8 opcode)
520{
521 switch (opcode) {
522 case GPCMD_READ_10:
523 case GPCMD_READ_12:
524 return ATAPI_READ;
525
526 case GPCMD_WRITE_10:
527 case GPCMD_WRITE_12:
528 case GPCMD_WRITE_AND_VERIFY_10:
529 return ATAPI_WRITE;
530
531 case GPCMD_READ_CD:
532 case GPCMD_READ_CD_MSF:
533 return ATAPI_READ_CD;
534
535 case ATA_16:
536 case ATA_12:
537 if (atapi_passthru16)
538 return ATAPI_PASS_THRU;
539 fallthrough;
540 default:
541 return ATAPI_MISC;
542 }
543}
544EXPORT_SYMBOL_GPL(atapi_cmd_type);
545
546static const u8 ata_rw_cmds[] = {
547 /* pio multi */
548 ATA_CMD_READ_MULTI,
549 ATA_CMD_WRITE_MULTI,
550 ATA_CMD_READ_MULTI_EXT,
551 ATA_CMD_WRITE_MULTI_EXT,
552 0,
553 0,
554 0,
555 ATA_CMD_WRITE_MULTI_FUA_EXT,
556 /* pio */
557 ATA_CMD_PIO_READ,
558 ATA_CMD_PIO_WRITE,
559 ATA_CMD_PIO_READ_EXT,
560 ATA_CMD_PIO_WRITE_EXT,
561 0,
562 0,
563 0,
564 0,
565 /* dma */
566 ATA_CMD_READ,
567 ATA_CMD_WRITE,
568 ATA_CMD_READ_EXT,
569 ATA_CMD_WRITE_EXT,
570 0,
571 0,
572 0,
573 ATA_CMD_WRITE_FUA_EXT
574};
575
576/**
577 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
578 * @tf: command to examine and configure
579 * @dev: device tf belongs to
580 *
581 * Examine the device configuration and tf->flags to calculate
582 * the proper read/write commands and protocol to use.
583 *
584 * LOCKING:
585 * caller.
586 */
587static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
588{
589 u8 cmd;
590
591 int index, fua, lba48, write;
592
593 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
594 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
595 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
596
597 if (dev->flags & ATA_DFLAG_PIO) {
598 tf->protocol = ATA_PROT_PIO;
599 index = dev->multi_count ? 0 : 8;
600 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
601 /* Unable to use DMA due to host limitation */
602 tf->protocol = ATA_PROT_PIO;
603 index = dev->multi_count ? 0 : 8;
604 } else {
605 tf->protocol = ATA_PROT_DMA;
606 index = 16;
607 }
608
609 cmd = ata_rw_cmds[index + fua + lba48 + write];
610 if (cmd) {
611 tf->command = cmd;
612 return 0;
613 }
614 return -1;
615}
616
617/**
618 * ata_tf_read_block - Read block address from ATA taskfile
619 * @tf: ATA taskfile of interest
620 * @dev: ATA device @tf belongs to
621 *
622 * LOCKING:
623 * None.
624 *
625 * Read block address from @tf. This function can handle all
626 * three address formats - LBA, LBA48 and CHS. tf->protocol and
627 * flags select the address format to use.
628 *
629 * RETURNS:
630 * Block address read from @tf.
631 */
632u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
633{
634 u64 block = 0;
635
636 if (tf->flags & ATA_TFLAG_LBA) {
637 if (tf->flags & ATA_TFLAG_LBA48) {
638 block |= (u64)tf->hob_lbah << 40;
639 block |= (u64)tf->hob_lbam << 32;
640 block |= (u64)tf->hob_lbal << 24;
641 } else
642 block |= (tf->device & 0xf) << 24;
643
644 block |= tf->lbah << 16;
645 block |= tf->lbam << 8;
646 block |= tf->lbal;
647 } else {
648 u32 cyl, head, sect;
649
650 cyl = tf->lbam | (tf->lbah << 8);
651 head = tf->device & 0xf;
652 sect = tf->lbal;
653
654 if (!sect) {
655 ata_dev_warn(dev,
656 "device reported invalid CHS sector 0\n");
657 return U64_MAX;
658 }
659
660 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
661 }
662
663 return block;
664}
665
666/**
667 * ata_build_rw_tf - Build ATA taskfile for given read/write request
668 * @qc: Metadata associated with the taskfile to build
669 * @block: Block address
670 * @n_block: Number of blocks
671 * @tf_flags: RW/FUA etc...
672 * @class: IO priority class
673 *
674 * LOCKING:
675 * None.
676 *
677 * Build ATA taskfile for the command @qc for read/write request described
678 * by @block, @n_block, @tf_flags and @class.
679 *
680 * RETURNS:
681 *
682 * 0 on success, -ERANGE if the request is too large for @dev,
683 * -EINVAL if the request is invalid.
684 */
685int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
686 unsigned int tf_flags, int class)
687{
688 struct ata_taskfile *tf = &qc->tf;
689 struct ata_device *dev = qc->dev;
690
691 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
692 tf->flags |= tf_flags;
693
694 if (ata_ncq_enabled(dev)) {
695 /* yay, NCQ */
696 if (!lba_48_ok(block, n_block))
697 return -ERANGE;
698
699 tf->protocol = ATA_PROT_NCQ;
700 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
701
702 if (tf->flags & ATA_TFLAG_WRITE)
703 tf->command = ATA_CMD_FPDMA_WRITE;
704 else
705 tf->command = ATA_CMD_FPDMA_READ;
706
707 tf->nsect = qc->hw_tag << 3;
708 tf->hob_feature = (n_block >> 8) & 0xff;
709 tf->feature = n_block & 0xff;
710
711 tf->hob_lbah = (block >> 40) & 0xff;
712 tf->hob_lbam = (block >> 32) & 0xff;
713 tf->hob_lbal = (block >> 24) & 0xff;
714 tf->lbah = (block >> 16) & 0xff;
715 tf->lbam = (block >> 8) & 0xff;
716 tf->lbal = block & 0xff;
717
718 tf->device = ATA_LBA;
719 if (tf->flags & ATA_TFLAG_FUA)
720 tf->device |= 1 << 7;
721
722 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED &&
723 class == IOPRIO_CLASS_RT)
724 tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
725 } else if (dev->flags & ATA_DFLAG_LBA) {
726 tf->flags |= ATA_TFLAG_LBA;
727
728 if (lba_28_ok(block, n_block)) {
729 /* use LBA28 */
730 tf->device |= (block >> 24) & 0xf;
731 } else if (lba_48_ok(block, n_block)) {
732 if (!(dev->flags & ATA_DFLAG_LBA48))
733 return -ERANGE;
734
735 /* use LBA48 */
736 tf->flags |= ATA_TFLAG_LBA48;
737
738 tf->hob_nsect = (n_block >> 8) & 0xff;
739
740 tf->hob_lbah = (block >> 40) & 0xff;
741 tf->hob_lbam = (block >> 32) & 0xff;
742 tf->hob_lbal = (block >> 24) & 0xff;
743 } else
744 /* request too large even for LBA48 */
745 return -ERANGE;
746
747 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
748 return -EINVAL;
749
750 tf->nsect = n_block & 0xff;
751
752 tf->lbah = (block >> 16) & 0xff;
753 tf->lbam = (block >> 8) & 0xff;
754 tf->lbal = block & 0xff;
755
756 tf->device |= ATA_LBA;
757 } else {
758 /* CHS */
759 u32 sect, head, cyl, track;
760
761 /* The request -may- be too large for CHS addressing. */
762 if (!lba_28_ok(block, n_block))
763 return -ERANGE;
764
765 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
766 return -EINVAL;
767
768 /* Convert LBA to CHS */
769 track = (u32)block / dev->sectors;
770 cyl = track / dev->heads;
771 head = track % dev->heads;
772 sect = (u32)block % dev->sectors + 1;
773
774 /* Check whether the converted CHS can fit.
775 Cylinder: 0-65535
776 Head: 0-15
777 Sector: 1-255*/
778 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
779 return -ERANGE;
780
781 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
782 tf->lbal = sect;
783 tf->lbam = cyl;
784 tf->lbah = cyl >> 8;
785 tf->device |= head;
786 }
787
788 return 0;
789}
790
791/**
792 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
793 * @pio_mask: pio_mask
794 * @mwdma_mask: mwdma_mask
795 * @udma_mask: udma_mask
796 *
797 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
798 * unsigned int xfer_mask.
799 *
800 * LOCKING:
801 * None.
802 *
803 * RETURNS:
804 * Packed xfer_mask.
805 */
806unsigned int ata_pack_xfermask(unsigned int pio_mask,
807 unsigned int mwdma_mask,
808 unsigned int udma_mask)
809{
810 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
811 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
812 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
813}
814EXPORT_SYMBOL_GPL(ata_pack_xfermask);
815
816/**
817 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
818 * @xfer_mask: xfer_mask to unpack
819 * @pio_mask: resulting pio_mask
820 * @mwdma_mask: resulting mwdma_mask
821 * @udma_mask: resulting udma_mask
822 *
823 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
824 * Any NULL destination masks will be ignored.
825 */
826void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
827 unsigned int *mwdma_mask, unsigned int *udma_mask)
828{
829 if (pio_mask)
830 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
831 if (mwdma_mask)
832 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
833 if (udma_mask)
834 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
835}
836
837static const struct ata_xfer_ent {
838 int shift, bits;
839 u8 base;
840} ata_xfer_tbl[] = {
841 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
842 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
843 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
844 { -1, },
845};
846
847/**
848 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
849 * @xfer_mask: xfer_mask of interest
850 *
851 * Return matching XFER_* value for @xfer_mask. Only the highest
852 * bit of @xfer_mask is considered.
853 *
854 * LOCKING:
855 * None.
856 *
857 * RETURNS:
858 * Matching XFER_* value, 0xff if no match found.
859 */
860u8 ata_xfer_mask2mode(unsigned int xfer_mask)
861{
862 int highbit = fls(xfer_mask) - 1;
863 const struct ata_xfer_ent *ent;
864
865 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
866 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
867 return ent->base + highbit - ent->shift;
868 return 0xff;
869}
870EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
871
872/**
873 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
874 * @xfer_mode: XFER_* of interest
875 *
876 * Return matching xfer_mask for @xfer_mode.
877 *
878 * LOCKING:
879 * None.
880 *
881 * RETURNS:
882 * Matching xfer_mask, 0 if no match found.
883 */
884unsigned int ata_xfer_mode2mask(u8 xfer_mode)
885{
886 const struct ata_xfer_ent *ent;
887
888 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
889 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
890 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
891 & ~((1 << ent->shift) - 1);
892 return 0;
893}
894EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
895
896/**
897 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
898 * @xfer_mode: XFER_* of interest
899 *
900 * Return matching xfer_shift for @xfer_mode.
901 *
902 * LOCKING:
903 * None.
904 *
905 * RETURNS:
906 * Matching xfer_shift, -1 if no match found.
907 */
908int ata_xfer_mode2shift(u8 xfer_mode)
909{
910 const struct ata_xfer_ent *ent;
911
912 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
913 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
914 return ent->shift;
915 return -1;
916}
917EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
918
919/**
920 * ata_mode_string - convert xfer_mask to string
921 * @xfer_mask: mask of bits supported; only highest bit counts.
922 *
923 * Determine string which represents the highest speed
924 * (highest bit in @modemask).
925 *
926 * LOCKING:
927 * None.
928 *
929 * RETURNS:
930 * Constant C string representing highest speed listed in
931 * @mode_mask, or the constant C string "<n/a>".
932 */
933const char *ata_mode_string(unsigned int xfer_mask)
934{
935 static const char * const xfer_mode_str[] = {
936 "PIO0",
937 "PIO1",
938 "PIO2",
939 "PIO3",
940 "PIO4",
941 "PIO5",
942 "PIO6",
943 "MWDMA0",
944 "MWDMA1",
945 "MWDMA2",
946 "MWDMA3",
947 "MWDMA4",
948 "UDMA/16",
949 "UDMA/25",
950 "UDMA/33",
951 "UDMA/44",
952 "UDMA/66",
953 "UDMA/100",
954 "UDMA/133",
955 "UDMA7",
956 };
957 int highbit;
958
959 highbit = fls(xfer_mask) - 1;
960 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
961 return xfer_mode_str[highbit];
962 return "<n/a>";
963}
964EXPORT_SYMBOL_GPL(ata_mode_string);
965
966const char *sata_spd_string(unsigned int spd)
967{
968 static const char * const spd_str[] = {
969 "1.5 Gbps",
970 "3.0 Gbps",
971 "6.0 Gbps",
972 };
973
974 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
975 return "<unknown>";
976 return spd_str[spd - 1];
977}
978
979/**
980 * ata_dev_classify - determine device type based on ATA-spec signature
981 * @tf: ATA taskfile register set for device to be identified
982 *
983 * Determine from taskfile register contents whether a device is
984 * ATA or ATAPI, as per "Signature and persistence" section
985 * of ATA/PI spec (volume 1, sect 5.14).
986 *
987 * LOCKING:
988 * None.
989 *
990 * RETURNS:
991 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
992 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
993 */
994unsigned int ata_dev_classify(const struct ata_taskfile *tf)
995{
996 /* Apple's open source Darwin code hints that some devices only
997 * put a proper signature into the LBA mid/high registers,
998 * So, we only check those. It's sufficient for uniqueness.
999 *
1000 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1001 * signatures for ATA and ATAPI devices attached on SerialATA,
1002 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1003 * spec has never mentioned about using different signatures
1004 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1005 * Multiplier specification began to use 0x69/0x96 to identify
1006 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1007 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1008 * 0x69/0x96 shortly and described them as reserved for
1009 * SerialATA.
1010 *
1011 * We follow the current spec and consider that 0x69/0x96
1012 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1013 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1014 * SEMB signature. This is worked around in
1015 * ata_dev_read_id().
1016 */
1017 if (tf->lbam == 0 && tf->lbah == 0)
1018 return ATA_DEV_ATA;
1019
1020 if (tf->lbam == 0x14 && tf->lbah == 0xeb)
1021 return ATA_DEV_ATAPI;
1022
1023 if (tf->lbam == 0x69 && tf->lbah == 0x96)
1024 return ATA_DEV_PMP;
1025
1026 if (tf->lbam == 0x3c && tf->lbah == 0xc3)
1027 return ATA_DEV_SEMB;
1028
1029 if (tf->lbam == 0xcd && tf->lbah == 0xab)
1030 return ATA_DEV_ZAC;
1031
1032 return ATA_DEV_UNKNOWN;
1033}
1034EXPORT_SYMBOL_GPL(ata_dev_classify);
1035
1036/**
1037 * ata_id_string - Convert IDENTIFY DEVICE page into string
1038 * @id: IDENTIFY DEVICE results we will examine
1039 * @s: string into which data is output
1040 * @ofs: offset into identify device page
1041 * @len: length of string to return. must be an even number.
1042 *
1043 * The strings in the IDENTIFY DEVICE page are broken up into
1044 * 16-bit chunks. Run through the string, and output each
1045 * 8-bit chunk linearly, regardless of platform.
1046 *
1047 * LOCKING:
1048 * caller.
1049 */
1050
1051void ata_id_string(const u16 *id, unsigned char *s,
1052 unsigned int ofs, unsigned int len)
1053{
1054 unsigned int c;
1055
1056 BUG_ON(len & 1);
1057
1058 while (len > 0) {
1059 c = id[ofs] >> 8;
1060 *s = c;
1061 s++;
1062
1063 c = id[ofs] & 0xff;
1064 *s = c;
1065 s++;
1066
1067 ofs++;
1068 len -= 2;
1069 }
1070}
1071EXPORT_SYMBOL_GPL(ata_id_string);
1072
1073/**
1074 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1075 * @id: IDENTIFY DEVICE results we will examine
1076 * @s: string into which data is output
1077 * @ofs: offset into identify device page
1078 * @len: length of string to return. must be an odd number.
1079 *
1080 * This function is identical to ata_id_string except that it
1081 * trims trailing spaces and terminates the resulting string with
1082 * null. @len must be actual maximum length (even number) + 1.
1083 *
1084 * LOCKING:
1085 * caller.
1086 */
1087void ata_id_c_string(const u16 *id, unsigned char *s,
1088 unsigned int ofs, unsigned int len)
1089{
1090 unsigned char *p;
1091
1092 ata_id_string(id, s, ofs, len - 1);
1093
1094 p = s + strnlen(s, len - 1);
1095 while (p > s && p[-1] == ' ')
1096 p--;
1097 *p = '\0';
1098}
1099EXPORT_SYMBOL_GPL(ata_id_c_string);
1100
1101static u64 ata_id_n_sectors(const u16 *id)
1102{
1103 if (ata_id_has_lba(id)) {
1104 if (ata_id_has_lba48(id))
1105 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1106
1107 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1108 }
1109
1110 if (ata_id_current_chs_valid(id))
1111 return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
1112 (u32)id[ATA_ID_CUR_SECTORS];
1113
1114 return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
1115 (u32)id[ATA_ID_SECTORS];
1116}
1117
1118u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1119{
1120 u64 sectors = 0;
1121
1122 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1123 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1124 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1125 sectors |= (tf->lbah & 0xff) << 16;
1126 sectors |= (tf->lbam & 0xff) << 8;
1127 sectors |= (tf->lbal & 0xff);
1128
1129 return sectors;
1130}
1131
1132u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1133{
1134 u64 sectors = 0;
1135
1136 sectors |= (tf->device & 0x0f) << 24;
1137 sectors |= (tf->lbah & 0xff) << 16;
1138 sectors |= (tf->lbam & 0xff) << 8;
1139 sectors |= (tf->lbal & 0xff);
1140
1141 return sectors;
1142}
1143
1144/**
1145 * ata_read_native_max_address - Read native max address
1146 * @dev: target device
1147 * @max_sectors: out parameter for the result native max address
1148 *
1149 * Perform an LBA48 or LBA28 native size query upon the device in
1150 * question.
1151 *
1152 * RETURNS:
1153 * 0 on success, -EACCES if command is aborted by the drive.
1154 * -EIO on other errors.
1155 */
1156static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1157{
1158 unsigned int err_mask;
1159 struct ata_taskfile tf;
1160 int lba48 = ata_id_has_lba48(dev->id);
1161
1162 ata_tf_init(dev, &tf);
1163
1164 /* always clear all address registers */
1165 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1166
1167 if (lba48) {
1168 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1169 tf.flags |= ATA_TFLAG_LBA48;
1170 } else
1171 tf.command = ATA_CMD_READ_NATIVE_MAX;
1172
1173 tf.protocol = ATA_PROT_NODATA;
1174 tf.device |= ATA_LBA;
1175
1176 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1177 if (err_mask) {
1178 ata_dev_warn(dev,
1179 "failed to read native max address (err_mask=0x%x)\n",
1180 err_mask);
1181 if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
1182 return -EACCES;
1183 return -EIO;
1184 }
1185
1186 if (lba48)
1187 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1188 else
1189 *max_sectors = ata_tf_to_lba(&tf) + 1;
1190 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1191 (*max_sectors)--;
1192 return 0;
1193}
1194
1195/**
1196 * ata_set_max_sectors - Set max sectors
1197 * @dev: target device
1198 * @new_sectors: new max sectors value to set for the device
1199 *
1200 * Set max sectors of @dev to @new_sectors.
1201 *
1202 * RETURNS:
1203 * 0 on success, -EACCES if command is aborted or denied (due to
1204 * previous non-volatile SET_MAX) by the drive. -EIO on other
1205 * errors.
1206 */
1207static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1208{
1209 unsigned int err_mask;
1210 struct ata_taskfile tf;
1211 int lba48 = ata_id_has_lba48(dev->id);
1212
1213 new_sectors--;
1214
1215 ata_tf_init(dev, &tf);
1216
1217 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1218
1219 if (lba48) {
1220 tf.command = ATA_CMD_SET_MAX_EXT;
1221 tf.flags |= ATA_TFLAG_LBA48;
1222
1223 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1224 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1225 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1226 } else {
1227 tf.command = ATA_CMD_SET_MAX;
1228
1229 tf.device |= (new_sectors >> 24) & 0xf;
1230 }
1231
1232 tf.protocol = ATA_PROT_NODATA;
1233 tf.device |= ATA_LBA;
1234
1235 tf.lbal = (new_sectors >> 0) & 0xff;
1236 tf.lbam = (new_sectors >> 8) & 0xff;
1237 tf.lbah = (new_sectors >> 16) & 0xff;
1238
1239 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1240 if (err_mask) {
1241 ata_dev_warn(dev,
1242 "failed to set max address (err_mask=0x%x)\n",
1243 err_mask);
1244 if (err_mask == AC_ERR_DEV &&
1245 (tf.error & (ATA_ABORTED | ATA_IDNF)))
1246 return -EACCES;
1247 return -EIO;
1248 }
1249
1250 return 0;
1251}
1252
1253/**
1254 * ata_hpa_resize - Resize a device with an HPA set
1255 * @dev: Device to resize
1256 *
1257 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1258 * it if required to the full size of the media. The caller must check
1259 * the drive has the HPA feature set enabled.
1260 *
1261 * RETURNS:
1262 * 0 on success, -errno on failure.
1263 */
1264static int ata_hpa_resize(struct ata_device *dev)
1265{
1266 bool print_info = ata_dev_print_info(dev);
1267 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1268 u64 sectors = ata_id_n_sectors(dev->id);
1269 u64 native_sectors;
1270 int rc;
1271
1272 /* do we need to do it? */
1273 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1274 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1275 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1276 return 0;
1277
1278 /* read native max address */
1279 rc = ata_read_native_max_address(dev, &native_sectors);
1280 if (rc) {
1281 /* If device aborted the command or HPA isn't going to
1282 * be unlocked, skip HPA resizing.
1283 */
1284 if (rc == -EACCES || !unlock_hpa) {
1285 ata_dev_warn(dev,
1286 "HPA support seems broken, skipping HPA handling\n");
1287 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1288
1289 /* we can continue if device aborted the command */
1290 if (rc == -EACCES)
1291 rc = 0;
1292 }
1293
1294 return rc;
1295 }
1296 dev->n_native_sectors = native_sectors;
1297
1298 /* nothing to do? */
1299 if (native_sectors <= sectors || !unlock_hpa) {
1300 if (!print_info || native_sectors == sectors)
1301 return 0;
1302
1303 if (native_sectors > sectors)
1304 ata_dev_info(dev,
1305 "HPA detected: current %llu, native %llu\n",
1306 (unsigned long long)sectors,
1307 (unsigned long long)native_sectors);
1308 else if (native_sectors < sectors)
1309 ata_dev_warn(dev,
1310 "native sectors (%llu) is smaller than sectors (%llu)\n",
1311 (unsigned long long)native_sectors,
1312 (unsigned long long)sectors);
1313 return 0;
1314 }
1315
1316 /* let's unlock HPA */
1317 rc = ata_set_max_sectors(dev, native_sectors);
1318 if (rc == -EACCES) {
1319 /* if device aborted the command, skip HPA resizing */
1320 ata_dev_warn(dev,
1321 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1322 (unsigned long long)sectors,
1323 (unsigned long long)native_sectors);
1324 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1325 return 0;
1326 } else if (rc)
1327 return rc;
1328
1329 /* re-read IDENTIFY data */
1330 rc = ata_dev_reread_id(dev, 0);
1331 if (rc) {
1332 ata_dev_err(dev,
1333 "failed to re-read IDENTIFY data after HPA resizing\n");
1334 return rc;
1335 }
1336
1337 if (print_info) {
1338 u64 new_sectors = ata_id_n_sectors(dev->id);
1339 ata_dev_info(dev,
1340 "HPA unlocked: %llu -> %llu, native %llu\n",
1341 (unsigned long long)sectors,
1342 (unsigned long long)new_sectors,
1343 (unsigned long long)native_sectors);
1344 }
1345
1346 return 0;
1347}
1348
1349/**
1350 * ata_dump_id - IDENTIFY DEVICE info debugging output
1351 * @dev: device from which the information is fetched
1352 * @id: IDENTIFY DEVICE page to dump
1353 *
1354 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1355 * page.
1356 *
1357 * LOCKING:
1358 * caller.
1359 */
1360
1361static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
1362{
1363 ata_dev_dbg(dev,
1364 "49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
1365 "80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
1366 "88==0x%04x 93==0x%04x\n",
1367 id[49], id[53], id[63], id[64], id[75], id[80],
1368 id[81], id[82], id[83], id[84], id[88], id[93]);
1369}
1370
1371/**
1372 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1373 * @id: IDENTIFY data to compute xfer mask from
1374 *
1375 * Compute the xfermask for this device. This is not as trivial
1376 * as it seems if we must consider early devices correctly.
1377 *
1378 * FIXME: pre IDE drive timing (do we care ?).
1379 *
1380 * LOCKING:
1381 * None.
1382 *
1383 * RETURNS:
1384 * Computed xfermask
1385 */
1386unsigned int ata_id_xfermask(const u16 *id)
1387{
1388 unsigned int pio_mask, mwdma_mask, udma_mask;
1389
1390 /* Usual case. Word 53 indicates word 64 is valid */
1391 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1392 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1393 pio_mask <<= 3;
1394 pio_mask |= 0x7;
1395 } else {
1396 /* If word 64 isn't valid then Word 51 high byte holds
1397 * the PIO timing number for the maximum. Turn it into
1398 * a mask.
1399 */
1400 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1401 if (mode < 5) /* Valid PIO range */
1402 pio_mask = (2 << mode) - 1;
1403 else
1404 pio_mask = 1;
1405
1406 /* But wait.. there's more. Design your standards by
1407 * committee and you too can get a free iordy field to
1408 * process. However it is the speeds not the modes that
1409 * are supported... Note drivers using the timing API
1410 * will get this right anyway
1411 */
1412 }
1413
1414 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1415
1416 if (ata_id_is_cfa(id)) {
1417 /*
1418 * Process compact flash extended modes
1419 */
1420 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1421 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1422
1423 if (pio)
1424 pio_mask |= (1 << 5);
1425 if (pio > 1)
1426 pio_mask |= (1 << 6);
1427 if (dma)
1428 mwdma_mask |= (1 << 3);
1429 if (dma > 1)
1430 mwdma_mask |= (1 << 4);
1431 }
1432
1433 udma_mask = 0;
1434 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1435 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1436
1437 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1438}
1439EXPORT_SYMBOL_GPL(ata_id_xfermask);
1440
1441static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1442{
1443 struct completion *waiting = qc->private_data;
1444
1445 complete(waiting);
1446}
1447
1448/**
1449 * ata_exec_internal_sg - execute libata internal command
1450 * @dev: Device to which the command is sent
1451 * @tf: Taskfile registers for the command and the result
1452 * @cdb: CDB for packet command
1453 * @dma_dir: Data transfer direction of the command
1454 * @sgl: sg list for the data buffer of the command
1455 * @n_elem: Number of sg entries
1456 * @timeout: Timeout in msecs (0 for default)
1457 *
1458 * Executes libata internal command with timeout. @tf contains
1459 * command on entry and result on return. Timeout and error
1460 * conditions are reported via return value. No recovery action
1461 * is taken after a command times out. It's caller's duty to
1462 * clean up after timeout.
1463 *
1464 * LOCKING:
1465 * None. Should be called with kernel context, might sleep.
1466 *
1467 * RETURNS:
1468 * Zero on success, AC_ERR_* mask on failure
1469 */
1470static unsigned ata_exec_internal_sg(struct ata_device *dev,
1471 struct ata_taskfile *tf, const u8 *cdb,
1472 int dma_dir, struct scatterlist *sgl,
1473 unsigned int n_elem, unsigned int timeout)
1474{
1475 struct ata_link *link = dev->link;
1476 struct ata_port *ap = link->ap;
1477 u8 command = tf->command;
1478 int auto_timeout = 0;
1479 struct ata_queued_cmd *qc;
1480 unsigned int preempted_tag;
1481 u32 preempted_sactive;
1482 u64 preempted_qc_active;
1483 int preempted_nr_active_links;
1484 DECLARE_COMPLETION_ONSTACK(wait);
1485 unsigned long flags;
1486 unsigned int err_mask;
1487 int rc;
1488
1489 spin_lock_irqsave(ap->lock, flags);
1490
1491 /* no internal command while frozen */
1492 if (ata_port_is_frozen(ap)) {
1493 spin_unlock_irqrestore(ap->lock, flags);
1494 return AC_ERR_SYSTEM;
1495 }
1496
1497 /* initialize internal qc */
1498 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1499
1500 qc->tag = ATA_TAG_INTERNAL;
1501 qc->hw_tag = 0;
1502 qc->scsicmd = NULL;
1503 qc->ap = ap;
1504 qc->dev = dev;
1505 ata_qc_reinit(qc);
1506
1507 preempted_tag = link->active_tag;
1508 preempted_sactive = link->sactive;
1509 preempted_qc_active = ap->qc_active;
1510 preempted_nr_active_links = ap->nr_active_links;
1511 link->active_tag = ATA_TAG_POISON;
1512 link->sactive = 0;
1513 ap->qc_active = 0;
1514 ap->nr_active_links = 0;
1515
1516 /* prepare & issue qc */
1517 qc->tf = *tf;
1518 if (cdb)
1519 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1520
1521 /* some SATA bridges need us to indicate data xfer direction */
1522 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1523 dma_dir == DMA_FROM_DEVICE)
1524 qc->tf.feature |= ATAPI_DMADIR;
1525
1526 qc->flags |= ATA_QCFLAG_RESULT_TF;
1527 qc->dma_dir = dma_dir;
1528 if (dma_dir != DMA_NONE) {
1529 unsigned int i, buflen = 0;
1530 struct scatterlist *sg;
1531
1532 for_each_sg(sgl, sg, n_elem, i)
1533 buflen += sg->length;
1534
1535 ata_sg_init(qc, sgl, n_elem);
1536 qc->nbytes = buflen;
1537 }
1538
1539 qc->private_data = &wait;
1540 qc->complete_fn = ata_qc_complete_internal;
1541
1542 ata_qc_issue(qc);
1543
1544 spin_unlock_irqrestore(ap->lock, flags);
1545
1546 if (!timeout) {
1547 if (ata_probe_timeout)
1548 timeout = ata_probe_timeout * 1000;
1549 else {
1550 timeout = ata_internal_cmd_timeout(dev, command);
1551 auto_timeout = 1;
1552 }
1553 }
1554
1555 if (ap->ops->error_handler)
1556 ata_eh_release(ap);
1557
1558 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1559
1560 if (ap->ops->error_handler)
1561 ata_eh_acquire(ap);
1562
1563 ata_sff_flush_pio_task(ap);
1564
1565 if (!rc) {
1566 spin_lock_irqsave(ap->lock, flags);
1567
1568 /* We're racing with irq here. If we lose, the
1569 * following test prevents us from completing the qc
1570 * twice. If we win, the port is frozen and will be
1571 * cleaned up by ->post_internal_cmd().
1572 */
1573 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1574 qc->err_mask |= AC_ERR_TIMEOUT;
1575
1576 if (ap->ops->error_handler)
1577 ata_port_freeze(ap);
1578 else
1579 ata_qc_complete(qc);
1580
1581 ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
1582 timeout, command);
1583 }
1584
1585 spin_unlock_irqrestore(ap->lock, flags);
1586 }
1587
1588 /* do post_internal_cmd */
1589 if (ap->ops->post_internal_cmd)
1590 ap->ops->post_internal_cmd(qc);
1591
1592 /* perform minimal error analysis */
1593 if (qc->flags & ATA_QCFLAG_FAILED) {
1594 if (qc->result_tf.status & (ATA_ERR | ATA_DF))
1595 qc->err_mask |= AC_ERR_DEV;
1596
1597 if (!qc->err_mask)
1598 qc->err_mask |= AC_ERR_OTHER;
1599
1600 if (qc->err_mask & ~AC_ERR_OTHER)
1601 qc->err_mask &= ~AC_ERR_OTHER;
1602 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1603 qc->result_tf.status |= ATA_SENSE;
1604 }
1605
1606 /* finish up */
1607 spin_lock_irqsave(ap->lock, flags);
1608
1609 *tf = qc->result_tf;
1610 err_mask = qc->err_mask;
1611
1612 ata_qc_free(qc);
1613 link->active_tag = preempted_tag;
1614 link->sactive = preempted_sactive;
1615 ap->qc_active = preempted_qc_active;
1616 ap->nr_active_links = preempted_nr_active_links;
1617
1618 spin_unlock_irqrestore(ap->lock, flags);
1619
1620 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1621 ata_internal_cmd_timed_out(dev, command);
1622
1623 return err_mask;
1624}
1625
1626/**
1627 * ata_exec_internal - execute libata internal command
1628 * @dev: Device to which the command is sent
1629 * @tf: Taskfile registers for the command and the result
1630 * @cdb: CDB for packet command
1631 * @dma_dir: Data transfer direction of the command
1632 * @buf: Data buffer of the command
1633 * @buflen: Length of data buffer
1634 * @timeout: Timeout in msecs (0 for default)
1635 *
1636 * Wrapper around ata_exec_internal_sg() which takes simple
1637 * buffer instead of sg list.
1638 *
1639 * LOCKING:
1640 * None. Should be called with kernel context, might sleep.
1641 *
1642 * RETURNS:
1643 * Zero on success, AC_ERR_* mask on failure
1644 */
1645unsigned ata_exec_internal(struct ata_device *dev,
1646 struct ata_taskfile *tf, const u8 *cdb,
1647 int dma_dir, void *buf, unsigned int buflen,
1648 unsigned int timeout)
1649{
1650 struct scatterlist *psg = NULL, sg;
1651 unsigned int n_elem = 0;
1652
1653 if (dma_dir != DMA_NONE) {
1654 WARN_ON(!buf);
1655 sg_init_one(&sg, buf, buflen);
1656 psg = &sg;
1657 n_elem++;
1658 }
1659
1660 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1661 timeout);
1662}
1663
1664/**
1665 * ata_pio_need_iordy - check if iordy needed
1666 * @adev: ATA device
1667 *
1668 * Check if the current speed of the device requires IORDY. Used
1669 * by various controllers for chip configuration.
1670 */
1671unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1672{
1673 /* Don't set IORDY if we're preparing for reset. IORDY may
1674 * lead to controller lock up on certain controllers if the
1675 * port is not occupied. See bko#11703 for details.
1676 */
1677 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1678 return 0;
1679 /* Controller doesn't support IORDY. Probably a pointless
1680 * check as the caller should know this.
1681 */
1682 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1683 return 0;
1684 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1685 if (ata_id_is_cfa(adev->id)
1686 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1687 return 0;
1688 /* PIO3 and higher it is mandatory */
1689 if (adev->pio_mode > XFER_PIO_2)
1690 return 1;
1691 /* We turn it on when possible */
1692 if (ata_id_has_iordy(adev->id))
1693 return 1;
1694 return 0;
1695}
1696EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
1697
1698/**
1699 * ata_pio_mask_no_iordy - Return the non IORDY mask
1700 * @adev: ATA device
1701 *
1702 * Compute the highest mode possible if we are not using iordy. Return
1703 * -1 if no iordy mode is available.
1704 */
1705static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1706{
1707 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1708 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1709 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1710 /* Is the speed faster than the drive allows non IORDY ? */
1711 if (pio) {
1712 /* This is cycle times not frequency - watch the logic! */
1713 if (pio > 240) /* PIO2 is 240nS per cycle */
1714 return 3 << ATA_SHIFT_PIO;
1715 return 7 << ATA_SHIFT_PIO;
1716 }
1717 }
1718 return 3 << ATA_SHIFT_PIO;
1719}
1720
1721/**
1722 * ata_do_dev_read_id - default ID read method
1723 * @dev: device
1724 * @tf: proposed taskfile
1725 * @id: data buffer
1726 *
1727 * Issue the identify taskfile and hand back the buffer containing
1728 * identify data. For some RAID controllers and for pre ATA devices
1729 * this function is wrapped or replaced by the driver
1730 */
1731unsigned int ata_do_dev_read_id(struct ata_device *dev,
1732 struct ata_taskfile *tf, __le16 *id)
1733{
1734 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1735 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1736}
1737EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1738
1739/**
1740 * ata_dev_read_id - Read ID data from the specified device
1741 * @dev: target device
1742 * @p_class: pointer to class of the target device (may be changed)
1743 * @flags: ATA_READID_* flags
1744 * @id: buffer to read IDENTIFY data into
1745 *
1746 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1747 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1748 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1749 * for pre-ATA4 drives.
1750 *
1751 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1752 * now we abort if we hit that case.
1753 *
1754 * LOCKING:
1755 * Kernel thread context (may sleep)
1756 *
1757 * RETURNS:
1758 * 0 on success, -errno otherwise.
1759 */
1760int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1761 unsigned int flags, u16 *id)
1762{
1763 struct ata_port *ap = dev->link->ap;
1764 unsigned int class = *p_class;
1765 struct ata_taskfile tf;
1766 unsigned int err_mask = 0;
1767 const char *reason;
1768 bool is_semb = class == ATA_DEV_SEMB;
1769 int may_fallback = 1, tried_spinup = 0;
1770 int rc;
1771
1772retry:
1773 ata_tf_init(dev, &tf);
1774
1775 switch (class) {
1776 case ATA_DEV_SEMB:
1777 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1778 fallthrough;
1779 case ATA_DEV_ATA:
1780 case ATA_DEV_ZAC:
1781 tf.command = ATA_CMD_ID_ATA;
1782 break;
1783 case ATA_DEV_ATAPI:
1784 tf.command = ATA_CMD_ID_ATAPI;
1785 break;
1786 default:
1787 rc = -ENODEV;
1788 reason = "unsupported class";
1789 goto err_out;
1790 }
1791
1792 tf.protocol = ATA_PROT_PIO;
1793
1794 /* Some devices choke if TF registers contain garbage. Make
1795 * sure those are properly initialized.
1796 */
1797 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1798
1799 /* Device presence detection is unreliable on some
1800 * controllers. Always poll IDENTIFY if available.
1801 */
1802 tf.flags |= ATA_TFLAG_POLLING;
1803
1804 if (ap->ops->read_id)
1805 err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
1806 else
1807 err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
1808
1809 if (err_mask) {
1810 if (err_mask & AC_ERR_NODEV_HINT) {
1811 ata_dev_dbg(dev, "NODEV after polling detection\n");
1812 return -ENOENT;
1813 }
1814
1815 if (is_semb) {
1816 ata_dev_info(dev,
1817 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1818 /* SEMB is not supported yet */
1819 *p_class = ATA_DEV_SEMB_UNSUP;
1820 return 0;
1821 }
1822
1823 if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
1824 /* Device or controller might have reported
1825 * the wrong device class. Give a shot at the
1826 * other IDENTIFY if the current one is
1827 * aborted by the device.
1828 */
1829 if (may_fallback) {
1830 may_fallback = 0;
1831
1832 if (class == ATA_DEV_ATA)
1833 class = ATA_DEV_ATAPI;
1834 else
1835 class = ATA_DEV_ATA;
1836 goto retry;
1837 }
1838
1839 /* Control reaches here iff the device aborted
1840 * both flavors of IDENTIFYs which happens
1841 * sometimes with phantom devices.
1842 */
1843 ata_dev_dbg(dev,
1844 "both IDENTIFYs aborted, assuming NODEV\n");
1845 return -ENOENT;
1846 }
1847
1848 rc = -EIO;
1849 reason = "I/O error";
1850 goto err_out;
1851 }
1852
1853 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1854 ata_dev_info(dev, "dumping IDENTIFY data, "
1855 "class=%d may_fallback=%d tried_spinup=%d\n",
1856 class, may_fallback, tried_spinup);
1857 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
1858 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1859 }
1860
1861 /* Falling back doesn't make sense if ID data was read
1862 * successfully at least once.
1863 */
1864 may_fallback = 0;
1865
1866 swap_buf_le16(id, ATA_ID_WORDS);
1867
1868 /* sanity check */
1869 rc = -EINVAL;
1870 reason = "device reports invalid type";
1871
1872 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1873 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1874 goto err_out;
1875 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1876 ata_id_is_ata(id)) {
1877 ata_dev_dbg(dev,
1878 "host indicates ignore ATA devices, ignored\n");
1879 return -ENOENT;
1880 }
1881 } else {
1882 if (ata_id_is_ata(id))
1883 goto err_out;
1884 }
1885
1886 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1887 tried_spinup = 1;
1888 /*
1889 * Drive powered-up in standby mode, and requires a specific
1890 * SET_FEATURES spin-up subcommand before it will accept
1891 * anything other than the original IDENTIFY command.
1892 */
1893 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1894 if (err_mask && id[2] != 0x738c) {
1895 rc = -EIO;
1896 reason = "SPINUP failed";
1897 goto err_out;
1898 }
1899 /*
1900 * If the drive initially returned incomplete IDENTIFY info,
1901 * we now must reissue the IDENTIFY command.
1902 */
1903 if (id[2] == 0x37c8)
1904 goto retry;
1905 }
1906
1907 if ((flags & ATA_READID_POSTRESET) &&
1908 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1909 /*
1910 * The exact sequence expected by certain pre-ATA4 drives is:
1911 * SRST RESET
1912 * IDENTIFY (optional in early ATA)
1913 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1914 * anything else..
1915 * Some drives were very specific about that exact sequence.
1916 *
1917 * Note that ATA4 says lba is mandatory so the second check
1918 * should never trigger.
1919 */
1920 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1921 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1922 if (err_mask) {
1923 rc = -EIO;
1924 reason = "INIT_DEV_PARAMS failed";
1925 goto err_out;
1926 }
1927
1928 /* current CHS translation info (id[53-58]) might be
1929 * changed. reread the identify device info.
1930 */
1931 flags &= ~ATA_READID_POSTRESET;
1932 goto retry;
1933 }
1934 }
1935
1936 *p_class = class;
1937
1938 return 0;
1939
1940 err_out:
1941 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1942 reason, err_mask);
1943 return rc;
1944}
1945
1946/**
1947 * ata_read_log_page - read a specific log page
1948 * @dev: target device
1949 * @log: log to read
1950 * @page: page to read
1951 * @buf: buffer to store read page
1952 * @sectors: number of sectors to read
1953 *
1954 * Read log page using READ_LOG_EXT command.
1955 *
1956 * LOCKING:
1957 * Kernel thread context (may sleep).
1958 *
1959 * RETURNS:
1960 * 0 on success, AC_ERR_* mask otherwise.
1961 */
1962unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1963 u8 page, void *buf, unsigned int sectors)
1964{
1965 unsigned long ap_flags = dev->link->ap->flags;
1966 struct ata_taskfile tf;
1967 unsigned int err_mask;
1968 bool dma = false;
1969
1970 ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
1971
1972 /*
1973 * Return error without actually issuing the command on controllers
1974 * which e.g. lockup on a read log page.
1975 */
1976 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1977 return AC_ERR_DEV;
1978
1979retry:
1980 ata_tf_init(dev, &tf);
1981 if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
1982 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
1983 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1984 tf.protocol = ATA_PROT_DMA;
1985 dma = true;
1986 } else {
1987 tf.command = ATA_CMD_READ_LOG_EXT;
1988 tf.protocol = ATA_PROT_PIO;
1989 dma = false;
1990 }
1991 tf.lbal = log;
1992 tf.lbam = page;
1993 tf.nsect = sectors;
1994 tf.hob_nsect = sectors >> 8;
1995 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1996
1997 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1998 buf, sectors * ATA_SECT_SIZE, 0);
1999
2000 if (err_mask) {
2001 if (dma) {
2002 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2003 if (!ata_port_is_frozen(dev->link->ap))
2004 goto retry;
2005 }
2006 ata_dev_err(dev,
2007 "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
2008 (unsigned int)log, (unsigned int)page, err_mask);
2009 }
2010
2011 return err_mask;
2012}
2013
2014static int ata_log_supported(struct ata_device *dev, u8 log)
2015{
2016 struct ata_port *ap = dev->link->ap;
2017
2018 if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
2019 return 0;
2020
2021 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2022 return 0;
2023 return get_unaligned_le16(&ap->sector_buf[log * 2]);
2024}
2025
2026static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2027{
2028 struct ata_port *ap = dev->link->ap;
2029 unsigned int err, i;
2030
2031 if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
2032 return false;
2033
2034 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2035 /*
2036 * IDENTIFY DEVICE data log is defined as mandatory starting
2037 * with ACS-3 (ATA version 10). Warn about the missing log
2038 * for drives which implement this ATA level or above.
2039 */
2040 if (ata_id_major_version(dev->id) >= 10)
2041 ata_dev_warn(dev,
2042 "ATA Identify Device Log not supported\n");
2043 dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
2044 return false;
2045 }
2046
2047 /*
2048 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2049 * supported.
2050 */
2051 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2052 1);
2053 if (err)
2054 return false;
2055
2056 for (i = 0; i < ap->sector_buf[8]; i++) {
2057 if (ap->sector_buf[9 + i] == page)
2058 return true;
2059 }
2060
2061 return false;
2062}
2063
2064static int ata_do_link_spd_horkage(struct ata_device *dev)
2065{
2066 struct ata_link *plink = ata_dev_phys_link(dev);
2067 u32 target, target_limit;
2068
2069 if (!sata_scr_valid(plink))
2070 return 0;
2071
2072 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2073 target = 1;
2074 else
2075 return 0;
2076
2077 target_limit = (1 << target) - 1;
2078
2079 /* if already on stricter limit, no need to push further */
2080 if (plink->sata_spd_limit <= target_limit)
2081 return 0;
2082
2083 plink->sata_spd_limit = target_limit;
2084
2085 /* Request another EH round by returning -EAGAIN if link is
2086 * going faster than the target speed. Forward progress is
2087 * guaranteed by setting sata_spd_limit to target_limit above.
2088 */
2089 if (plink->sata_spd > target) {
2090 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2091 sata_spd_string(target));
2092 return -EAGAIN;
2093 }
2094 return 0;
2095}
2096
2097static inline u8 ata_dev_knobble(struct ata_device *dev)
2098{
2099 struct ata_port *ap = dev->link->ap;
2100
2101 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2102 return 0;
2103
2104 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2105}
2106
2107static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2108{
2109 struct ata_port *ap = dev->link->ap;
2110 unsigned int err_mask;
2111
2112 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2113 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2114 return;
2115 }
2116 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2117 0, ap->sector_buf, 1);
2118 if (!err_mask) {
2119 u8 *cmds = dev->ncq_send_recv_cmds;
2120
2121 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2122 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2123
2124 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2125 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2126 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2127 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2128 }
2129 }
2130}
2131
2132static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2133{
2134 struct ata_port *ap = dev->link->ap;
2135 unsigned int err_mask;
2136
2137 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2138 ata_dev_warn(dev,
2139 "NCQ Send/Recv Log not supported\n");
2140 return;
2141 }
2142 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2143 0, ap->sector_buf, 1);
2144 if (!err_mask) {
2145 u8 *cmds = dev->ncq_non_data_cmds;
2146
2147 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2148 }
2149}
2150
2151static void ata_dev_config_ncq_prio(struct ata_device *dev)
2152{
2153 struct ata_port *ap = dev->link->ap;
2154 unsigned int err_mask;
2155
2156 if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2157 return;
2158
2159 err_mask = ata_read_log_page(dev,
2160 ATA_LOG_IDENTIFY_DEVICE,
2161 ATA_LOG_SATA_SETTINGS,
2162 ap->sector_buf,
2163 1);
2164 if (err_mask)
2165 goto not_supported;
2166
2167 if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
2168 goto not_supported;
2169
2170 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2171
2172 return;
2173
2174not_supported:
2175 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
2176 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2177}
2178
2179static bool ata_dev_check_adapter(struct ata_device *dev,
2180 unsigned short vendor_id)
2181{
2182 struct pci_dev *pcidev = NULL;
2183 struct device *parent_dev = NULL;
2184
2185 for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2186 parent_dev = parent_dev->parent) {
2187 if (dev_is_pci(parent_dev)) {
2188 pcidev = to_pci_dev(parent_dev);
2189 if (pcidev->vendor == vendor_id)
2190 return true;
2191 break;
2192 }
2193 }
2194
2195 return false;
2196}
2197
2198static int ata_dev_config_ncq(struct ata_device *dev,
2199 char *desc, size_t desc_sz)
2200{
2201 struct ata_port *ap = dev->link->ap;
2202 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2203 unsigned int err_mask;
2204 char *aa_desc = "";
2205
2206 if (!ata_id_has_ncq(dev->id)) {
2207 desc[0] = '\0';
2208 return 0;
2209 }
2210 if (!IS_ENABLED(CONFIG_SATA_HOST))
2211 return 0;
2212 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2213 snprintf(desc, desc_sz, "NCQ (not used)");
2214 return 0;
2215 }
2216
2217 if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2218 ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2219 snprintf(desc, desc_sz, "NCQ (not used)");
2220 return 0;
2221 }
2222
2223 if (ap->flags & ATA_FLAG_NCQ) {
2224 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2225 dev->flags |= ATA_DFLAG_NCQ;
2226 }
2227
2228 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2229 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2230 ata_id_has_fpdma_aa(dev->id)) {
2231 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2232 SATA_FPDMA_AA);
2233 if (err_mask) {
2234 ata_dev_err(dev,
2235 "failed to enable AA (error_mask=0x%x)\n",
2236 err_mask);
2237 if (err_mask != AC_ERR_DEV) {
2238 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2239 return -EIO;
2240 }
2241 } else
2242 aa_desc = ", AA";
2243 }
2244
2245 if (hdepth >= ddepth)
2246 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2247 else
2248 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2249 ddepth, aa_desc);
2250
2251 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2252 if (ata_id_has_ncq_send_and_recv(dev->id))
2253 ata_dev_config_ncq_send_recv(dev);
2254 if (ata_id_has_ncq_non_data(dev->id))
2255 ata_dev_config_ncq_non_data(dev);
2256 if (ata_id_has_ncq_prio(dev->id))
2257 ata_dev_config_ncq_prio(dev);
2258 }
2259
2260 return 0;
2261}
2262
2263static void ata_dev_config_sense_reporting(struct ata_device *dev)
2264{
2265 unsigned int err_mask;
2266
2267 if (!ata_id_has_sense_reporting(dev->id))
2268 return;
2269
2270 if (ata_id_sense_reporting_enabled(dev->id))
2271 return;
2272
2273 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2274 if (err_mask) {
2275 ata_dev_dbg(dev,
2276 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2277 err_mask);
2278 }
2279}
2280
2281static void ata_dev_config_zac(struct ata_device *dev)
2282{
2283 struct ata_port *ap = dev->link->ap;
2284 unsigned int err_mask;
2285 u8 *identify_buf = ap->sector_buf;
2286
2287 dev->zac_zones_optimal_open = U32_MAX;
2288 dev->zac_zones_optimal_nonseq = U32_MAX;
2289 dev->zac_zones_max_open = U32_MAX;
2290
2291 /*
2292 * Always set the 'ZAC' flag for Host-managed devices.
2293 */
2294 if (dev->class == ATA_DEV_ZAC)
2295 dev->flags |= ATA_DFLAG_ZAC;
2296 else if (ata_id_zoned_cap(dev->id) == 0x01)
2297 /*
2298 * Check for host-aware devices.
2299 */
2300 dev->flags |= ATA_DFLAG_ZAC;
2301
2302 if (!(dev->flags & ATA_DFLAG_ZAC))
2303 return;
2304
2305 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2306 ata_dev_warn(dev,
2307 "ATA Zoned Information Log not supported\n");
2308 return;
2309 }
2310
2311 /*
2312 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2313 */
2314 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2315 ATA_LOG_ZONED_INFORMATION,
2316 identify_buf, 1);
2317 if (!err_mask) {
2318 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2319
2320 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2321 if ((zoned_cap >> 63))
2322 dev->zac_zoned_cap = (zoned_cap & 1);
2323 opt_open = get_unaligned_le64(&identify_buf[24]);
2324 if ((opt_open >> 63))
2325 dev->zac_zones_optimal_open = (u32)opt_open;
2326 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2327 if ((opt_nonseq >> 63))
2328 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2329 max_open = get_unaligned_le64(&identify_buf[40]);
2330 if ((max_open >> 63))
2331 dev->zac_zones_max_open = (u32)max_open;
2332 }
2333}
2334
2335static void ata_dev_config_trusted(struct ata_device *dev)
2336{
2337 struct ata_port *ap = dev->link->ap;
2338 u64 trusted_cap;
2339 unsigned int err;
2340
2341 if (!ata_id_has_trusted(dev->id))
2342 return;
2343
2344 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2345 ata_dev_warn(dev,
2346 "Security Log not supported\n");
2347 return;
2348 }
2349
2350 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2351 ap->sector_buf, 1);
2352 if (err)
2353 return;
2354
2355 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2356 if (!(trusted_cap & (1ULL << 63))) {
2357 ata_dev_dbg(dev,
2358 "Trusted Computing capability qword not valid!\n");
2359 return;
2360 }
2361
2362 if (trusted_cap & (1 << 0))
2363 dev->flags |= ATA_DFLAG_TRUSTED;
2364}
2365
2366static int ata_dev_config_lba(struct ata_device *dev)
2367{
2368 const u16 *id = dev->id;
2369 const char *lba_desc;
2370 char ncq_desc[24];
2371 int ret;
2372
2373 dev->flags |= ATA_DFLAG_LBA;
2374
2375 if (ata_id_has_lba48(id)) {
2376 lba_desc = "LBA48";
2377 dev->flags |= ATA_DFLAG_LBA48;
2378 if (dev->n_sectors >= (1UL << 28) &&
2379 ata_id_has_flush_ext(id))
2380 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2381 } else {
2382 lba_desc = "LBA";
2383 }
2384
2385 /* config NCQ */
2386 ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2387
2388 /* print device info to dmesg */
2389 if (ata_dev_print_info(dev))
2390 ata_dev_info(dev,
2391 "%llu sectors, multi %u: %s %s\n",
2392 (unsigned long long)dev->n_sectors,
2393 dev->multi_count, lba_desc, ncq_desc);
2394
2395 return ret;
2396}
2397
2398static void ata_dev_config_chs(struct ata_device *dev)
2399{
2400 const u16 *id = dev->id;
2401
2402 if (ata_id_current_chs_valid(id)) {
2403 /* Current CHS translation is valid. */
2404 dev->cylinders = id[54];
2405 dev->heads = id[55];
2406 dev->sectors = id[56];
2407 } else {
2408 /* Default translation */
2409 dev->cylinders = id[1];
2410 dev->heads = id[3];
2411 dev->sectors = id[6];
2412 }
2413
2414 /* print device info to dmesg */
2415 if (ata_dev_print_info(dev))
2416 ata_dev_info(dev,
2417 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2418 (unsigned long long)dev->n_sectors,
2419 dev->multi_count, dev->cylinders,
2420 dev->heads, dev->sectors);
2421}
2422
2423static void ata_dev_config_devslp(struct ata_device *dev)
2424{
2425 u8 *sata_setting = dev->link->ap->sector_buf;
2426 unsigned int err_mask;
2427 int i, j;
2428
2429 /*
2430 * Check device sleep capability. Get DevSlp timing variables
2431 * from SATA Settings page of Identify Device Data Log.
2432 */
2433 if (!ata_id_has_devslp(dev->id) ||
2434 !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2435 return;
2436
2437 err_mask = ata_read_log_page(dev,
2438 ATA_LOG_IDENTIFY_DEVICE,
2439 ATA_LOG_SATA_SETTINGS,
2440 sata_setting, 1);
2441 if (err_mask)
2442 return;
2443
2444 dev->flags |= ATA_DFLAG_DEVSLP;
2445 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2446 j = ATA_LOG_DEVSLP_OFFSET + i;
2447 dev->devslp_timing[i] = sata_setting[j];
2448 }
2449}
2450
2451static void ata_dev_config_cpr(struct ata_device *dev)
2452{
2453 unsigned int err_mask;
2454 size_t buf_len;
2455 int i, nr_cpr = 0;
2456 struct ata_cpr_log *cpr_log = NULL;
2457 u8 *desc, *buf = NULL;
2458
2459 if (ata_id_major_version(dev->id) < 11)
2460 goto out;
2461
2462 buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
2463 if (buf_len == 0)
2464 goto out;
2465
2466 /*
2467 * Read the concurrent positioning ranges log (0x47). We can have at
2468 * most 255 32B range descriptors plus a 64B header. This log varies in
2469 * size, so use the size reported in the GPL directory. Reading beyond
2470 * the supported length will result in an error.
2471 */
2472 buf_len <<= 9;
2473 buf = kzalloc(buf_len, GFP_KERNEL);
2474 if (!buf)
2475 goto out;
2476
2477 err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
2478 0, buf, buf_len >> 9);
2479 if (err_mask)
2480 goto out;
2481
2482 nr_cpr = buf[0];
2483 if (!nr_cpr)
2484 goto out;
2485
2486 cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
2487 if (!cpr_log)
2488 goto out;
2489
2490 cpr_log->nr_cpr = nr_cpr;
2491 desc = &buf[64];
2492 for (i = 0; i < nr_cpr; i++, desc += 32) {
2493 cpr_log->cpr[i].num = desc[0];
2494 cpr_log->cpr[i].num_storage_elements = desc[1];
2495 cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
2496 cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
2497 }
2498
2499out:
2500 swap(dev->cpr_log, cpr_log);
2501 kfree(cpr_log);
2502 kfree(buf);
2503}
2504
2505static void ata_dev_print_features(struct ata_device *dev)
2506{
2507 if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
2508 return;
2509
2510 ata_dev_info(dev,
2511 "Features:%s%s%s%s%s%s\n",
2512 dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
2513 dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
2514 dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
2515 dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
2516 dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
2517 dev->cpr_log ? " CPR" : "");
2518}
2519
2520/**
2521 * ata_dev_configure - Configure the specified ATA/ATAPI device
2522 * @dev: Target device to configure
2523 *
2524 * Configure @dev according to @dev->id. Generic and low-level
2525 * driver specific fixups are also applied.
2526 *
2527 * LOCKING:
2528 * Kernel thread context (may sleep)
2529 *
2530 * RETURNS:
2531 * 0 on success, -errno otherwise
2532 */
2533int ata_dev_configure(struct ata_device *dev)
2534{
2535 struct ata_port *ap = dev->link->ap;
2536 bool print_info = ata_dev_print_info(dev);
2537 const u16 *id = dev->id;
2538 unsigned int xfer_mask;
2539 unsigned int err_mask;
2540 char revbuf[7]; /* XYZ-99\0 */
2541 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2542 char modelbuf[ATA_ID_PROD_LEN+1];
2543 int rc;
2544
2545 if (!ata_dev_enabled(dev)) {
2546 ata_dev_dbg(dev, "no device\n");
2547 return 0;
2548 }
2549
2550 /* set horkage */
2551 dev->horkage |= ata_dev_blacklisted(dev);
2552 ata_force_horkage(dev);
2553
2554 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2555 ata_dev_info(dev, "unsupported device, disabling\n");
2556 ata_dev_disable(dev);
2557 return 0;
2558 }
2559
2560 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2561 dev->class == ATA_DEV_ATAPI) {
2562 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2563 atapi_enabled ? "not supported with this driver"
2564 : "disabled");
2565 ata_dev_disable(dev);
2566 return 0;
2567 }
2568
2569 rc = ata_do_link_spd_horkage(dev);
2570 if (rc)
2571 return rc;
2572
2573 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2574 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2575 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2576 dev->horkage |= ATA_HORKAGE_NOLPM;
2577
2578 if (ap->flags & ATA_FLAG_NO_LPM)
2579 dev->horkage |= ATA_HORKAGE_NOLPM;
2580
2581 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2582 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2583 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2584 }
2585
2586 /* let ACPI work its magic */
2587 rc = ata_acpi_on_devcfg(dev);
2588 if (rc)
2589 return rc;
2590
2591 /* massage HPA, do it early as it might change IDENTIFY data */
2592 rc = ata_hpa_resize(dev);
2593 if (rc)
2594 return rc;
2595
2596 /* print device capabilities */
2597 ata_dev_dbg(dev,
2598 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2599 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2600 __func__,
2601 id[49], id[82], id[83], id[84],
2602 id[85], id[86], id[87], id[88]);
2603
2604 /* initialize to-be-configured parameters */
2605 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2606 dev->max_sectors = 0;
2607 dev->cdb_len = 0;
2608 dev->n_sectors = 0;
2609 dev->cylinders = 0;
2610 dev->heads = 0;
2611 dev->sectors = 0;
2612 dev->multi_count = 0;
2613
2614 /*
2615 * common ATA, ATAPI feature tests
2616 */
2617
2618 /* find max transfer mode; for printk only */
2619 xfer_mask = ata_id_xfermask(id);
2620
2621 ata_dump_id(dev, id);
2622
2623 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2624 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2625 sizeof(fwrevbuf));
2626
2627 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2628 sizeof(modelbuf));
2629
2630 /* ATA-specific feature tests */
2631 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2632 if (ata_id_is_cfa(id)) {
2633 /* CPRM may make this media unusable */
2634 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2635 ata_dev_warn(dev,
2636 "supports DRM functions and may not be fully accessible\n");
2637 snprintf(revbuf, 7, "CFA");
2638 } else {
2639 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2640 /* Warn the user if the device has TPM extensions */
2641 if (ata_id_has_tpm(id))
2642 ata_dev_warn(dev,
2643 "supports DRM functions and may not be fully accessible\n");
2644 }
2645
2646 dev->n_sectors = ata_id_n_sectors(id);
2647
2648 /* get current R/W Multiple count setting */
2649 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2650 unsigned int max = dev->id[47] & 0xff;
2651 unsigned int cnt = dev->id[59] & 0xff;
2652 /* only recognize/allow powers of two here */
2653 if (is_power_of_2(max) && is_power_of_2(cnt))
2654 if (cnt <= max)
2655 dev->multi_count = cnt;
2656 }
2657
2658 /* print device info to dmesg */
2659 if (print_info)
2660 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2661 revbuf, modelbuf, fwrevbuf,
2662 ata_mode_string(xfer_mask));
2663
2664 if (ata_id_has_lba(id)) {
2665 rc = ata_dev_config_lba(dev);
2666 if (rc)
2667 return rc;
2668 } else {
2669 ata_dev_config_chs(dev);
2670 }
2671
2672 ata_dev_config_devslp(dev);
2673 ata_dev_config_sense_reporting(dev);
2674 ata_dev_config_zac(dev);
2675 ata_dev_config_trusted(dev);
2676 ata_dev_config_cpr(dev);
2677 dev->cdb_len = 32;
2678
2679 if (print_info)
2680 ata_dev_print_features(dev);
2681 }
2682
2683 /* ATAPI-specific feature tests */
2684 else if (dev->class == ATA_DEV_ATAPI) {
2685 const char *cdb_intr_string = "";
2686 const char *atapi_an_string = "";
2687 const char *dma_dir_string = "";
2688 u32 sntf;
2689
2690 rc = atapi_cdb_len(id);
2691 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2692 ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
2693 rc = -EINVAL;
2694 goto err_out_nosup;
2695 }
2696 dev->cdb_len = (unsigned int) rc;
2697
2698 /* Enable ATAPI AN if both the host and device have
2699 * the support. If PMP is attached, SNTF is required
2700 * to enable ATAPI AN to discern between PHY status
2701 * changed notifications and ATAPI ANs.
2702 */
2703 if (atapi_an &&
2704 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2705 (!sata_pmp_attached(ap) ||
2706 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2707 /* issue SET feature command to turn this on */
2708 err_mask = ata_dev_set_feature(dev,
2709 SETFEATURES_SATA_ENABLE, SATA_AN);
2710 if (err_mask)
2711 ata_dev_err(dev,
2712 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2713 err_mask);
2714 else {
2715 dev->flags |= ATA_DFLAG_AN;
2716 atapi_an_string = ", ATAPI AN";
2717 }
2718 }
2719
2720 if (ata_id_cdb_intr(dev->id)) {
2721 dev->flags |= ATA_DFLAG_CDB_INTR;
2722 cdb_intr_string = ", CDB intr";
2723 }
2724
2725 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2726 dev->flags |= ATA_DFLAG_DMADIR;
2727 dma_dir_string = ", DMADIR";
2728 }
2729
2730 if (ata_id_has_da(dev->id)) {
2731 dev->flags |= ATA_DFLAG_DA;
2732 zpodd_init(dev);
2733 }
2734
2735 /* print device info to dmesg */
2736 if (print_info)
2737 ata_dev_info(dev,
2738 "ATAPI: %s, %s, max %s%s%s%s\n",
2739 modelbuf, fwrevbuf,
2740 ata_mode_string(xfer_mask),
2741 cdb_intr_string, atapi_an_string,
2742 dma_dir_string);
2743 }
2744
2745 /* determine max_sectors */
2746 dev->max_sectors = ATA_MAX_SECTORS;
2747 if (dev->flags & ATA_DFLAG_LBA48)
2748 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2749
2750 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2751 200 sectors */
2752 if (ata_dev_knobble(dev)) {
2753 if (print_info)
2754 ata_dev_info(dev, "applying bridge limits\n");
2755 dev->udma_mask &= ATA_UDMA5;
2756 dev->max_sectors = ATA_MAX_SECTORS;
2757 }
2758
2759 if ((dev->class == ATA_DEV_ATAPI) &&
2760 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2761 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2762 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2763 }
2764
2765 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2766 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2767 dev->max_sectors);
2768
2769 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2770 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2771 dev->max_sectors);
2772
2773 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2774 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2775
2776 if (ap->ops->dev_config)
2777 ap->ops->dev_config(dev);
2778
2779 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2780 /* Let the user know. We don't want to disallow opens for
2781 rescue purposes, or in case the vendor is just a blithering
2782 idiot. Do this after the dev_config call as some controllers
2783 with buggy firmware may want to avoid reporting false device
2784 bugs */
2785
2786 if (print_info) {
2787 ata_dev_warn(dev,
2788"Drive reports diagnostics failure. This may indicate a drive\n");
2789 ata_dev_warn(dev,
2790"fault or invalid emulation. Contact drive vendor for information.\n");
2791 }
2792 }
2793
2794 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2795 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2796 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2797 }
2798
2799 return 0;
2800
2801err_out_nosup:
2802 return rc;
2803}
2804
2805/**
2806 * ata_cable_40wire - return 40 wire cable type
2807 * @ap: port
2808 *
2809 * Helper method for drivers which want to hardwire 40 wire cable
2810 * detection.
2811 */
2812
2813int ata_cable_40wire(struct ata_port *ap)
2814{
2815 return ATA_CBL_PATA40;
2816}
2817EXPORT_SYMBOL_GPL(ata_cable_40wire);
2818
2819/**
2820 * ata_cable_80wire - return 80 wire cable type
2821 * @ap: port
2822 *
2823 * Helper method for drivers which want to hardwire 80 wire cable
2824 * detection.
2825 */
2826
2827int ata_cable_80wire(struct ata_port *ap)
2828{
2829 return ATA_CBL_PATA80;
2830}
2831EXPORT_SYMBOL_GPL(ata_cable_80wire);
2832
2833/**
2834 * ata_cable_unknown - return unknown PATA cable.
2835 * @ap: port
2836 *
2837 * Helper method for drivers which have no PATA cable detection.
2838 */
2839
2840int ata_cable_unknown(struct ata_port *ap)
2841{
2842 return ATA_CBL_PATA_UNK;
2843}
2844EXPORT_SYMBOL_GPL(ata_cable_unknown);
2845
2846/**
2847 * ata_cable_ignore - return ignored PATA cable.
2848 * @ap: port
2849 *
2850 * Helper method for drivers which don't use cable type to limit
2851 * transfer mode.
2852 */
2853int ata_cable_ignore(struct ata_port *ap)
2854{
2855 return ATA_CBL_PATA_IGN;
2856}
2857EXPORT_SYMBOL_GPL(ata_cable_ignore);
2858
2859/**
2860 * ata_cable_sata - return SATA cable type
2861 * @ap: port
2862 *
2863 * Helper method for drivers which have SATA cables
2864 */
2865
2866int ata_cable_sata(struct ata_port *ap)
2867{
2868 return ATA_CBL_SATA;
2869}
2870EXPORT_SYMBOL_GPL(ata_cable_sata);
2871
2872/**
2873 * ata_bus_probe - Reset and probe ATA bus
2874 * @ap: Bus to probe
2875 *
2876 * Master ATA bus probing function. Initiates a hardware-dependent
2877 * bus reset, then attempts to identify any devices found on
2878 * the bus.
2879 *
2880 * LOCKING:
2881 * PCI/etc. bus probe sem.
2882 *
2883 * RETURNS:
2884 * Zero on success, negative errno otherwise.
2885 */
2886
2887int ata_bus_probe(struct ata_port *ap)
2888{
2889 unsigned int classes[ATA_MAX_DEVICES];
2890 int tries[ATA_MAX_DEVICES];
2891 int rc;
2892 struct ata_device *dev;
2893
2894 ata_for_each_dev(dev, &ap->link, ALL)
2895 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2896
2897 retry:
2898 ata_for_each_dev(dev, &ap->link, ALL) {
2899 /* If we issue an SRST then an ATA drive (not ATAPI)
2900 * may change configuration and be in PIO0 timing. If
2901 * we do a hard reset (or are coming from power on)
2902 * this is true for ATA or ATAPI. Until we've set a
2903 * suitable controller mode we should not touch the
2904 * bus as we may be talking too fast.
2905 */
2906 dev->pio_mode = XFER_PIO_0;
2907 dev->dma_mode = 0xff;
2908
2909 /* If the controller has a pio mode setup function
2910 * then use it to set the chipset to rights. Don't
2911 * touch the DMA setup as that will be dealt with when
2912 * configuring devices.
2913 */
2914 if (ap->ops->set_piomode)
2915 ap->ops->set_piomode(ap, dev);
2916 }
2917
2918 /* reset and determine device classes */
2919 ap->ops->phy_reset(ap);
2920
2921 ata_for_each_dev(dev, &ap->link, ALL) {
2922 if (dev->class != ATA_DEV_UNKNOWN)
2923 classes[dev->devno] = dev->class;
2924 else
2925 classes[dev->devno] = ATA_DEV_NONE;
2926
2927 dev->class = ATA_DEV_UNKNOWN;
2928 }
2929
2930 /* read IDENTIFY page and configure devices. We have to do the identify
2931 specific sequence bass-ackwards so that PDIAG- is released by
2932 the slave device */
2933
2934 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2935 if (tries[dev->devno])
2936 dev->class = classes[dev->devno];
2937
2938 if (!ata_dev_enabled(dev))
2939 continue;
2940
2941 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2942 dev->id);
2943 if (rc)
2944 goto fail;
2945 }
2946
2947 /* Now ask for the cable type as PDIAG- should have been released */
2948 if (ap->ops->cable_detect)
2949 ap->cbl = ap->ops->cable_detect(ap);
2950
2951 /* We may have SATA bridge glue hiding here irrespective of
2952 * the reported cable types and sensed types. When SATA
2953 * drives indicate we have a bridge, we don't know which end
2954 * of the link the bridge is which is a problem.
2955 */
2956 ata_for_each_dev(dev, &ap->link, ENABLED)
2957 if (ata_id_is_sata(dev->id))
2958 ap->cbl = ATA_CBL_SATA;
2959
2960 /* After the identify sequence we can now set up the devices. We do
2961 this in the normal order so that the user doesn't get confused */
2962
2963 ata_for_each_dev(dev, &ap->link, ENABLED) {
2964 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2965 rc = ata_dev_configure(dev);
2966 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2967 if (rc)
2968 goto fail;
2969 }
2970
2971 /* configure transfer mode */
2972 rc = ata_set_mode(&ap->link, &dev);
2973 if (rc)
2974 goto fail;
2975
2976 ata_for_each_dev(dev, &ap->link, ENABLED)
2977 return 0;
2978
2979 return -ENODEV;
2980
2981 fail:
2982 tries[dev->devno]--;
2983
2984 switch (rc) {
2985 case -EINVAL:
2986 /* eeek, something went very wrong, give up */
2987 tries[dev->devno] = 0;
2988 break;
2989
2990 case -ENODEV:
2991 /* give it just one more chance */
2992 tries[dev->devno] = min(tries[dev->devno], 1);
2993 fallthrough;
2994 case -EIO:
2995 if (tries[dev->devno] == 1) {
2996 /* This is the last chance, better to slow
2997 * down than lose it.
2998 */
2999 sata_down_spd_limit(&ap->link, 0);
3000 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3001 }
3002 }
3003
3004 if (!tries[dev->devno])
3005 ata_dev_disable(dev);
3006
3007 goto retry;
3008}
3009
3010/**
3011 * sata_print_link_status - Print SATA link status
3012 * @link: SATA link to printk link status about
3013 *
3014 * This function prints link speed and status of a SATA link.
3015 *
3016 * LOCKING:
3017 * None.
3018 */
3019static void sata_print_link_status(struct ata_link *link)
3020{
3021 u32 sstatus, scontrol, tmp;
3022
3023 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3024 return;
3025 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3026 return;
3027
3028 if (ata_phys_link_online(link)) {
3029 tmp = (sstatus >> 4) & 0xf;
3030 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3031 sata_spd_string(tmp), sstatus, scontrol);
3032 } else {
3033 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3034 sstatus, scontrol);
3035 }
3036}
3037
3038/**
3039 * ata_dev_pair - return other device on cable
3040 * @adev: device
3041 *
3042 * Obtain the other device on the same cable, or if none is
3043 * present NULL is returned
3044 */
3045
3046struct ata_device *ata_dev_pair(struct ata_device *adev)
3047{
3048 struct ata_link *link = adev->link;
3049 struct ata_device *pair = &link->device[1 - adev->devno];
3050 if (!ata_dev_enabled(pair))
3051 return NULL;
3052 return pair;
3053}
3054EXPORT_SYMBOL_GPL(ata_dev_pair);
3055
3056/**
3057 * sata_down_spd_limit - adjust SATA spd limit downward
3058 * @link: Link to adjust SATA spd limit for
3059 * @spd_limit: Additional limit
3060 *
3061 * Adjust SATA spd limit of @link downward. Note that this
3062 * function only adjusts the limit. The change must be applied
3063 * using sata_set_spd().
3064 *
3065 * If @spd_limit is non-zero, the speed is limited to equal to or
3066 * lower than @spd_limit if such speed is supported. If
3067 * @spd_limit is slower than any supported speed, only the lowest
3068 * supported speed is allowed.
3069 *
3070 * LOCKING:
3071 * Inherited from caller.
3072 *
3073 * RETURNS:
3074 * 0 on success, negative errno on failure
3075 */
3076int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3077{
3078 u32 sstatus, spd, mask;
3079 int rc, bit;
3080
3081 if (!sata_scr_valid(link))
3082 return -EOPNOTSUPP;
3083
3084 /* If SCR can be read, use it to determine the current SPD.
3085 * If not, use cached value in link->sata_spd.
3086 */
3087 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3088 if (rc == 0 && ata_sstatus_online(sstatus))
3089 spd = (sstatus >> 4) & 0xf;
3090 else
3091 spd = link->sata_spd;
3092
3093 mask = link->sata_spd_limit;
3094 if (mask <= 1)
3095 return -EINVAL;
3096
3097 /* unconditionally mask off the highest bit */
3098 bit = fls(mask) - 1;
3099 mask &= ~(1 << bit);
3100
3101 /*
3102 * Mask off all speeds higher than or equal to the current one. At
3103 * this point, if current SPD is not available and we previously
3104 * recorded the link speed from SStatus, the driver has already
3105 * masked off the highest bit so mask should already be 1 or 0.
3106 * Otherwise, we should not force 1.5Gbps on a link where we have
3107 * not previously recorded speed from SStatus. Just return in this
3108 * case.
3109 */
3110 if (spd > 1)
3111 mask &= (1 << (spd - 1)) - 1;
3112 else if (link->sata_spd)
3113 return -EINVAL;
3114
3115 /* were we already at the bottom? */
3116 if (!mask)
3117 return -EINVAL;
3118
3119 if (spd_limit) {
3120 if (mask & ((1 << spd_limit) - 1))
3121 mask &= (1 << spd_limit) - 1;
3122 else {
3123 bit = ffs(mask) - 1;
3124 mask = 1 << bit;
3125 }
3126 }
3127
3128 link->sata_spd_limit = mask;
3129
3130 ata_link_warn(link, "limiting SATA link speed to %s\n",
3131 sata_spd_string(fls(mask)));
3132
3133 return 0;
3134}
3135
3136#ifdef CONFIG_ATA_ACPI
3137/**
3138 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3139 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3140 * @cycle: cycle duration in ns
3141 *
3142 * Return matching xfer mode for @cycle. The returned mode is of
3143 * the transfer type specified by @xfer_shift. If @cycle is too
3144 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3145 * than the fastest known mode, the fasted mode is returned.
3146 *
3147 * LOCKING:
3148 * None.
3149 *
3150 * RETURNS:
3151 * Matching xfer_mode, 0xff if no match found.
3152 */
3153u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3154{
3155 u8 base_mode = 0xff, last_mode = 0xff;
3156 const struct ata_xfer_ent *ent;
3157 const struct ata_timing *t;
3158
3159 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3160 if (ent->shift == xfer_shift)
3161 base_mode = ent->base;
3162
3163 for (t = ata_timing_find_mode(base_mode);
3164 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3165 unsigned short this_cycle;
3166
3167 switch (xfer_shift) {
3168 case ATA_SHIFT_PIO:
3169 case ATA_SHIFT_MWDMA:
3170 this_cycle = t->cycle;
3171 break;
3172 case ATA_SHIFT_UDMA:
3173 this_cycle = t->udma;
3174 break;
3175 default:
3176 return 0xff;
3177 }
3178
3179 if (cycle > this_cycle)
3180 break;
3181
3182 last_mode = t->mode;
3183 }
3184
3185 return last_mode;
3186}
3187#endif
3188
3189/**
3190 * ata_down_xfermask_limit - adjust dev xfer masks downward
3191 * @dev: Device to adjust xfer masks
3192 * @sel: ATA_DNXFER_* selector
3193 *
3194 * Adjust xfer masks of @dev downward. Note that this function
3195 * does not apply the change. Invoking ata_set_mode() afterwards
3196 * will apply the limit.
3197 *
3198 * LOCKING:
3199 * Inherited from caller.
3200 *
3201 * RETURNS:
3202 * 0 on success, negative errno on failure
3203 */
3204int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3205{
3206 char buf[32];
3207 unsigned int orig_mask, xfer_mask;
3208 unsigned int pio_mask, mwdma_mask, udma_mask;
3209 int quiet, highbit;
3210
3211 quiet = !!(sel & ATA_DNXFER_QUIET);
3212 sel &= ~ATA_DNXFER_QUIET;
3213
3214 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3215 dev->mwdma_mask,
3216 dev->udma_mask);
3217 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3218
3219 switch (sel) {
3220 case ATA_DNXFER_PIO:
3221 highbit = fls(pio_mask) - 1;
3222 pio_mask &= ~(1 << highbit);
3223 break;
3224
3225 case ATA_DNXFER_DMA:
3226 if (udma_mask) {
3227 highbit = fls(udma_mask) - 1;
3228 udma_mask &= ~(1 << highbit);
3229 if (!udma_mask)
3230 return -ENOENT;
3231 } else if (mwdma_mask) {
3232 highbit = fls(mwdma_mask) - 1;
3233 mwdma_mask &= ~(1 << highbit);
3234 if (!mwdma_mask)
3235 return -ENOENT;
3236 }
3237 break;
3238
3239 case ATA_DNXFER_40C:
3240 udma_mask &= ATA_UDMA_MASK_40C;
3241 break;
3242
3243 case ATA_DNXFER_FORCE_PIO0:
3244 pio_mask &= 1;
3245 fallthrough;
3246 case ATA_DNXFER_FORCE_PIO:
3247 mwdma_mask = 0;
3248 udma_mask = 0;
3249 break;
3250
3251 default:
3252 BUG();
3253 }
3254
3255 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3256
3257 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3258 return -ENOENT;
3259
3260 if (!quiet) {
3261 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3262 snprintf(buf, sizeof(buf), "%s:%s",
3263 ata_mode_string(xfer_mask),
3264 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3265 else
3266 snprintf(buf, sizeof(buf), "%s",
3267 ata_mode_string(xfer_mask));
3268
3269 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3270 }
3271
3272 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3273 &dev->udma_mask);
3274
3275 return 0;
3276}
3277
3278static int ata_dev_set_mode(struct ata_device *dev)
3279{
3280 struct ata_port *ap = dev->link->ap;
3281 struct ata_eh_context *ehc = &dev->link->eh_context;
3282 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3283 const char *dev_err_whine = "";
3284 int ign_dev_err = 0;
3285 unsigned int err_mask = 0;
3286 int rc;
3287
3288 dev->flags &= ~ATA_DFLAG_PIO;
3289 if (dev->xfer_shift == ATA_SHIFT_PIO)
3290 dev->flags |= ATA_DFLAG_PIO;
3291
3292 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3293 dev_err_whine = " (SET_XFERMODE skipped)";
3294 else {
3295 if (nosetxfer)
3296 ata_dev_warn(dev,
3297 "NOSETXFER but PATA detected - can't "
3298 "skip SETXFER, might malfunction\n");
3299 err_mask = ata_dev_set_xfermode(dev);
3300 }
3301
3302 if (err_mask & ~AC_ERR_DEV)
3303 goto fail;
3304
3305 /* revalidate */
3306 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3307 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3308 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3309 if (rc)
3310 return rc;
3311
3312 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3313 /* Old CFA may refuse this command, which is just fine */
3314 if (ata_id_is_cfa(dev->id))
3315 ign_dev_err = 1;
3316 /* Catch several broken garbage emulations plus some pre
3317 ATA devices */
3318 if (ata_id_major_version(dev->id) == 0 &&
3319 dev->pio_mode <= XFER_PIO_2)
3320 ign_dev_err = 1;
3321 /* Some very old devices and some bad newer ones fail
3322 any kind of SET_XFERMODE request but support PIO0-2
3323 timings and no IORDY */
3324 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3325 ign_dev_err = 1;
3326 }
3327 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3328 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3329 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3330 dev->dma_mode == XFER_MW_DMA_0 &&
3331 (dev->id[63] >> 8) & 1)
3332 ign_dev_err = 1;
3333
3334 /* if the device is actually configured correctly, ignore dev err */
3335 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3336 ign_dev_err = 1;
3337
3338 if (err_mask & AC_ERR_DEV) {
3339 if (!ign_dev_err)
3340 goto fail;
3341 else
3342 dev_err_whine = " (device error ignored)";
3343 }
3344
3345 ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
3346 dev->xfer_shift, (int)dev->xfer_mode);
3347
3348 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3349 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3350 ata_dev_info(dev, "configured for %s%s\n",
3351 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3352 dev_err_whine);
3353
3354 return 0;
3355
3356 fail:
3357 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3358 return -EIO;
3359}
3360
3361/**
3362 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3363 * @link: link on which timings will be programmed
3364 * @r_failed_dev: out parameter for failed device
3365 *
3366 * Standard implementation of the function used to tune and set
3367 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3368 * ata_dev_set_mode() fails, pointer to the failing device is
3369 * returned in @r_failed_dev.
3370 *
3371 * LOCKING:
3372 * PCI/etc. bus probe sem.
3373 *
3374 * RETURNS:
3375 * 0 on success, negative errno otherwise
3376 */
3377
3378int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3379{
3380 struct ata_port *ap = link->ap;
3381 struct ata_device *dev;
3382 int rc = 0, used_dma = 0, found = 0;
3383
3384 /* step 1: calculate xfer_mask */
3385 ata_for_each_dev(dev, link, ENABLED) {
3386 unsigned int pio_mask, dma_mask;
3387 unsigned int mode_mask;
3388
3389 mode_mask = ATA_DMA_MASK_ATA;
3390 if (dev->class == ATA_DEV_ATAPI)
3391 mode_mask = ATA_DMA_MASK_ATAPI;
3392 else if (ata_id_is_cfa(dev->id))
3393 mode_mask = ATA_DMA_MASK_CFA;
3394
3395 ata_dev_xfermask(dev);
3396 ata_force_xfermask(dev);
3397
3398 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3399
3400 if (libata_dma_mask & mode_mask)
3401 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3402 dev->udma_mask);
3403 else
3404 dma_mask = 0;
3405
3406 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3407 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3408
3409 found = 1;
3410 if (ata_dma_enabled(dev))
3411 used_dma = 1;
3412 }
3413 if (!found)
3414 goto out;
3415
3416 /* step 2: always set host PIO timings */
3417 ata_for_each_dev(dev, link, ENABLED) {
3418 if (dev->pio_mode == 0xff) {
3419 ata_dev_warn(dev, "no PIO support\n");
3420 rc = -EINVAL;
3421 goto out;
3422 }
3423
3424 dev->xfer_mode = dev->pio_mode;
3425 dev->xfer_shift = ATA_SHIFT_PIO;
3426 if (ap->ops->set_piomode)
3427 ap->ops->set_piomode(ap, dev);
3428 }
3429
3430 /* step 3: set host DMA timings */
3431 ata_for_each_dev(dev, link, ENABLED) {
3432 if (!ata_dma_enabled(dev))
3433 continue;
3434
3435 dev->xfer_mode = dev->dma_mode;
3436 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3437 if (ap->ops->set_dmamode)
3438 ap->ops->set_dmamode(ap, dev);
3439 }
3440
3441 /* step 4: update devices' xfer mode */
3442 ata_for_each_dev(dev, link, ENABLED) {
3443 rc = ata_dev_set_mode(dev);
3444 if (rc)
3445 goto out;
3446 }
3447
3448 /* Record simplex status. If we selected DMA then the other
3449 * host channels are not permitted to do so.
3450 */
3451 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3452 ap->host->simplex_claimed = ap;
3453
3454 out:
3455 if (rc)
3456 *r_failed_dev = dev;
3457 return rc;
3458}
3459EXPORT_SYMBOL_GPL(ata_do_set_mode);
3460
3461/**
3462 * ata_wait_ready - wait for link to become ready
3463 * @link: link to be waited on
3464 * @deadline: deadline jiffies for the operation
3465 * @check_ready: callback to check link readiness
3466 *
3467 * Wait for @link to become ready. @check_ready should return
3468 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3469 * link doesn't seem to be occupied, other errno for other error
3470 * conditions.
3471 *
3472 * Transient -ENODEV conditions are allowed for
3473 * ATA_TMOUT_FF_WAIT.
3474 *
3475 * LOCKING:
3476 * EH context.
3477 *
3478 * RETURNS:
3479 * 0 if @link is ready before @deadline; otherwise, -errno.
3480 */
3481int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3482 int (*check_ready)(struct ata_link *link))
3483{
3484 unsigned long start = jiffies;
3485 unsigned long nodev_deadline;
3486 int warned = 0;
3487
3488 /* choose which 0xff timeout to use, read comment in libata.h */
3489 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3490 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3491 else
3492 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3493
3494 /* Slave readiness can't be tested separately from master. On
3495 * M/S emulation configuration, this function should be called
3496 * only on the master and it will handle both master and slave.
3497 */
3498 WARN_ON(link == link->ap->slave_link);
3499
3500 if (time_after(nodev_deadline, deadline))
3501 nodev_deadline = deadline;
3502
3503 while (1) {
3504 unsigned long now = jiffies;
3505 int ready, tmp;
3506
3507 ready = tmp = check_ready(link);
3508 if (ready > 0)
3509 return 0;
3510
3511 /*
3512 * -ENODEV could be transient. Ignore -ENODEV if link
3513 * is online. Also, some SATA devices take a long
3514 * time to clear 0xff after reset. Wait for
3515 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3516 * offline.
3517 *
3518 * Note that some PATA controllers (pata_ali) explode
3519 * if status register is read more than once when
3520 * there's no device attached.
3521 */
3522 if (ready == -ENODEV) {
3523 if (ata_link_online(link))
3524 ready = 0;
3525 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3526 !ata_link_offline(link) &&
3527 time_before(now, nodev_deadline))
3528 ready = 0;
3529 }
3530
3531 if (ready)
3532 return ready;
3533 if (time_after(now, deadline))
3534 return -EBUSY;
3535
3536 if (!warned && time_after(now, start + 5 * HZ) &&
3537 (deadline - now > 3 * HZ)) {
3538 ata_link_warn(link,
3539 "link is slow to respond, please be patient "
3540 "(ready=%d)\n", tmp);
3541 warned = 1;
3542 }
3543
3544 ata_msleep(link->ap, 50);
3545 }
3546}
3547
3548/**
3549 * ata_wait_after_reset - wait for link to become ready after reset
3550 * @link: link to be waited on
3551 * @deadline: deadline jiffies for the operation
3552 * @check_ready: callback to check link readiness
3553 *
3554 * Wait for @link to become ready after reset.
3555 *
3556 * LOCKING:
3557 * EH context.
3558 *
3559 * RETURNS:
3560 * 0 if @link is ready before @deadline; otherwise, -errno.
3561 */
3562int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3563 int (*check_ready)(struct ata_link *link))
3564{
3565 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3566
3567 return ata_wait_ready(link, deadline, check_ready);
3568}
3569EXPORT_SYMBOL_GPL(ata_wait_after_reset);
3570
3571/**
3572 * ata_std_prereset - prepare for reset
3573 * @link: ATA link to be reset
3574 * @deadline: deadline jiffies for the operation
3575 *
3576 * @link is about to be reset. Initialize it. Failure from
3577 * prereset makes libata abort whole reset sequence and give up
3578 * that port, so prereset should be best-effort. It does its
3579 * best to prepare for reset sequence but if things go wrong, it
3580 * should just whine, not fail.
3581 *
3582 * LOCKING:
3583 * Kernel thread context (may sleep)
3584 *
3585 * RETURNS:
3586 * Always 0.
3587 */
3588int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3589{
3590 struct ata_port *ap = link->ap;
3591 struct ata_eh_context *ehc = &link->eh_context;
3592 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3593 int rc;
3594
3595 /* if we're about to do hardreset, nothing more to do */
3596 if (ehc->i.action & ATA_EH_HARDRESET)
3597 return 0;
3598
3599 /* if SATA, resume link */
3600 if (ap->flags & ATA_FLAG_SATA) {
3601 rc = sata_link_resume(link, timing, deadline);
3602 /* whine about phy resume failure but proceed */
3603 if (rc && rc != -EOPNOTSUPP)
3604 ata_link_warn(link,
3605 "failed to resume link for reset (errno=%d)\n",
3606 rc);
3607 }
3608
3609 /* no point in trying softreset on offline link */
3610 if (ata_phys_link_offline(link))
3611 ehc->i.action &= ~ATA_EH_SOFTRESET;
3612
3613 return 0;
3614}
3615EXPORT_SYMBOL_GPL(ata_std_prereset);
3616
3617/**
3618 * sata_std_hardreset - COMRESET w/o waiting or classification
3619 * @link: link to reset
3620 * @class: resulting class of attached device
3621 * @deadline: deadline jiffies for the operation
3622 *
3623 * Standard SATA COMRESET w/o waiting or classification.
3624 *
3625 * LOCKING:
3626 * Kernel thread context (may sleep)
3627 *
3628 * RETURNS:
3629 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3630 */
3631int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3632 unsigned long deadline)
3633{
3634 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3635 bool online;
3636 int rc;
3637
3638 /* do hardreset */
3639 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3640 return online ? -EAGAIN : rc;
3641}
3642EXPORT_SYMBOL_GPL(sata_std_hardreset);
3643
3644/**
3645 * ata_std_postreset - standard postreset callback
3646 * @link: the target ata_link
3647 * @classes: classes of attached devices
3648 *
3649 * This function is invoked after a successful reset. Note that
3650 * the device might have been reset more than once using
3651 * different reset methods before postreset is invoked.
3652 *
3653 * LOCKING:
3654 * Kernel thread context (may sleep)
3655 */
3656void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3657{
3658 u32 serror;
3659
3660 /* reset complete, clear SError */
3661 if (!sata_scr_read(link, SCR_ERROR, &serror))
3662 sata_scr_write(link, SCR_ERROR, serror);
3663
3664 /* print link status */
3665 sata_print_link_status(link);
3666}
3667EXPORT_SYMBOL_GPL(ata_std_postreset);
3668
3669/**
3670 * ata_dev_same_device - Determine whether new ID matches configured device
3671 * @dev: device to compare against
3672 * @new_class: class of the new device
3673 * @new_id: IDENTIFY page of the new device
3674 *
3675 * Compare @new_class and @new_id against @dev and determine
3676 * whether @dev is the device indicated by @new_class and
3677 * @new_id.
3678 *
3679 * LOCKING:
3680 * None.
3681 *
3682 * RETURNS:
3683 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3684 */
3685static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3686 const u16 *new_id)
3687{
3688 const u16 *old_id = dev->id;
3689 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3690 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3691
3692 if (dev->class != new_class) {
3693 ata_dev_info(dev, "class mismatch %d != %d\n",
3694 dev->class, new_class);
3695 return 0;
3696 }
3697
3698 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3699 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3700 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3701 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3702
3703 if (strcmp(model[0], model[1])) {
3704 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3705 model[0], model[1]);
3706 return 0;
3707 }
3708
3709 if (strcmp(serial[0], serial[1])) {
3710 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3711 serial[0], serial[1]);
3712 return 0;
3713 }
3714
3715 return 1;
3716}
3717
3718/**
3719 * ata_dev_reread_id - Re-read IDENTIFY data
3720 * @dev: target ATA device
3721 * @readid_flags: read ID flags
3722 *
3723 * Re-read IDENTIFY page and make sure @dev is still attached to
3724 * the port.
3725 *
3726 * LOCKING:
3727 * Kernel thread context (may sleep)
3728 *
3729 * RETURNS:
3730 * 0 on success, negative errno otherwise
3731 */
3732int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3733{
3734 unsigned int class = dev->class;
3735 u16 *id = (void *)dev->link->ap->sector_buf;
3736 int rc;
3737
3738 /* read ID data */
3739 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3740 if (rc)
3741 return rc;
3742
3743 /* is the device still there? */
3744 if (!ata_dev_same_device(dev, class, id))
3745 return -ENODEV;
3746
3747 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3748 return 0;
3749}
3750
3751/**
3752 * ata_dev_revalidate - Revalidate ATA device
3753 * @dev: device to revalidate
3754 * @new_class: new class code
3755 * @readid_flags: read ID flags
3756 *
3757 * Re-read IDENTIFY page, make sure @dev is still attached to the
3758 * port and reconfigure it according to the new IDENTIFY page.
3759 *
3760 * LOCKING:
3761 * Kernel thread context (may sleep)
3762 *
3763 * RETURNS:
3764 * 0 on success, negative errno otherwise
3765 */
3766int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3767 unsigned int readid_flags)
3768{
3769 u64 n_sectors = dev->n_sectors;
3770 u64 n_native_sectors = dev->n_native_sectors;
3771 int rc;
3772
3773 if (!ata_dev_enabled(dev))
3774 return -ENODEV;
3775
3776 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3777 if (ata_class_enabled(new_class) &&
3778 new_class != ATA_DEV_ATA &&
3779 new_class != ATA_DEV_ATAPI &&
3780 new_class != ATA_DEV_ZAC &&
3781 new_class != ATA_DEV_SEMB) {
3782 ata_dev_info(dev, "class mismatch %u != %u\n",
3783 dev->class, new_class);
3784 rc = -ENODEV;
3785 goto fail;
3786 }
3787
3788 /* re-read ID */
3789 rc = ata_dev_reread_id(dev, readid_flags);
3790 if (rc)
3791 goto fail;
3792
3793 /* configure device according to the new ID */
3794 rc = ata_dev_configure(dev);
3795 if (rc)
3796 goto fail;
3797
3798 /* verify n_sectors hasn't changed */
3799 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3800 dev->n_sectors == n_sectors)
3801 return 0;
3802
3803 /* n_sectors has changed */
3804 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3805 (unsigned long long)n_sectors,
3806 (unsigned long long)dev->n_sectors);
3807
3808 /*
3809 * Something could have caused HPA to be unlocked
3810 * involuntarily. If n_native_sectors hasn't changed and the
3811 * new size matches it, keep the device.
3812 */
3813 if (dev->n_native_sectors == n_native_sectors &&
3814 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3815 ata_dev_warn(dev,
3816 "new n_sectors matches native, probably "
3817 "late HPA unlock, n_sectors updated\n");
3818 /* use the larger n_sectors */
3819 return 0;
3820 }
3821
3822 /*
3823 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
3824 * unlocking HPA in those cases.
3825 *
3826 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3827 */
3828 if (dev->n_native_sectors == n_native_sectors &&
3829 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3830 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
3831 ata_dev_warn(dev,
3832 "old n_sectors matches native, probably "
3833 "late HPA lock, will try to unlock HPA\n");
3834 /* try unlocking HPA */
3835 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
3836 rc = -EIO;
3837 } else
3838 rc = -ENODEV;
3839
3840 /* restore original n_[native_]sectors and fail */
3841 dev->n_native_sectors = n_native_sectors;
3842 dev->n_sectors = n_sectors;
3843 fail:
3844 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
3845 return rc;
3846}
3847
3848struct ata_blacklist_entry {
3849 const char *model_num;
3850 const char *model_rev;
3851 unsigned long horkage;
3852};
3853
3854static const struct ata_blacklist_entry ata_device_blacklist [] = {
3855 /* Devices with DMA related problems under Linux */
3856 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3857 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3858 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3859 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3860 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3861 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3862 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3863 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3864 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3865 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
3866 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3867 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3868 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3869 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3870 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3871 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
3872 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3873 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3874 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3875 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3876 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3877 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3878 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3879 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3880 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3881 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3882 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3883 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3884 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
3885 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
3886 /* Odd clown on sil3726/4726 PMPs */
3887 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
3888 /* Similar story with ASMedia 1092 */
3889 { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
3890
3891 /* Weird ATAPI devices */
3892 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3893 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
3894 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
3895 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
3896
3897 /*
3898 * Causes silent data corruption with higher max sects.
3899 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
3900 */
3901 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
3902
3903 /*
3904 * These devices time out with higher max sects.
3905 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
3906 */
3907 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
3908 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
3909
3910 /* Devices we expect to fail diagnostics */
3911
3912 /* Devices where NCQ should be avoided */
3913 /* NCQ is slow */
3914 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3915 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ },
3916 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3917 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3918 /* NCQ is broken */
3919 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3920 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3921 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
3922 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
3923 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
3924
3925 /* Seagate NCQ + FLUSH CACHE firmware bug */
3926 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3927 ATA_HORKAGE_FIRMWARE_WARN },
3928
3929 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3930 ATA_HORKAGE_FIRMWARE_WARN },
3931
3932 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3933 ATA_HORKAGE_FIRMWARE_WARN },
3934
3935 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3936 ATA_HORKAGE_FIRMWARE_WARN },
3937
3938 /* drives which fail FPDMA_AA activation (some may freeze afterwards)
3939 the ST disks also have LPM issues */
3940 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
3941 ATA_HORKAGE_NOLPM },
3942 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
3943
3944 /* Blacklist entries taken from Silicon Image 3124/3132
3945 Windows driver .inf file - also several Linux problem reports */
3946 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ },
3947 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ },
3948 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ },
3949
3950 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
3951 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ },
3952
3953 /* Sandisk SD7/8/9s lock up hard on large trims */
3954 { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M },
3955
3956 /* devices which puke on READ_NATIVE_MAX */
3957 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
3958 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3959 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3960 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3961
3962 /* this one allows HPA unlocking but fails IOs on the area */
3963 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
3964
3965 /* Devices which report 1 sector over size HPA */
3966 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE },
3967 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE },
3968 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE },
3969
3970 /* Devices which get the IVB wrong */
3971 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
3972 /* Maybe we should just blacklist TSSTcorp... */
3973 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB },
3974
3975 /* Devices that do not need bridging limits applied */
3976 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK },
3977 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK },
3978
3979 /* Devices which aren't very happy with higher link speeds */
3980 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS },
3981 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS },
3982
3983 /*
3984 * Devices which choke on SETXFER. Applies only if both the
3985 * device and controller are SATA.
3986 */
3987 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
3988 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
3989 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
3990 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
3991 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
3992
3993 /* These specific Pioneer models have LPM issues */
3994 { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
3995 { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
3996
3997 /* Crucial BX100 SSD 500GB has broken LPM support */
3998 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
3999
4000 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4001 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4002 ATA_HORKAGE_ZERO_AFTER_TRIM |
4003 ATA_HORKAGE_NOLPM },
4004 /* 512GB MX100 with newer firmware has only LPM issues */
4005 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4006 ATA_HORKAGE_NOLPM },
4007
4008 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4009 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4010 ATA_HORKAGE_ZERO_AFTER_TRIM |
4011 ATA_HORKAGE_NOLPM },
4012 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4013 ATA_HORKAGE_ZERO_AFTER_TRIM |
4014 ATA_HORKAGE_NOLPM },
4015
4016 /* These specific Samsung models/firmware-revs do not handle LPM well */
4017 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
4018 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
4019 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM },
4020 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
4021
4022 /* devices that don't properly handle queued TRIM commands */
4023 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4024 ATA_HORKAGE_ZERO_AFTER_TRIM },
4025 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4026 ATA_HORKAGE_ZERO_AFTER_TRIM },
4027 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4028 ATA_HORKAGE_ZERO_AFTER_TRIM },
4029 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4030 ATA_HORKAGE_ZERO_AFTER_TRIM },
4031 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4032 ATA_HORKAGE_ZERO_AFTER_TRIM },
4033 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4034 ATA_HORKAGE_ZERO_AFTER_TRIM },
4035 { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4036 ATA_HORKAGE_NO_DMA_LOG |
4037 ATA_HORKAGE_ZERO_AFTER_TRIM },
4038 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4039 ATA_HORKAGE_ZERO_AFTER_TRIM },
4040 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4041 ATA_HORKAGE_ZERO_AFTER_TRIM },
4042 { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4043 ATA_HORKAGE_ZERO_AFTER_TRIM |
4044 ATA_HORKAGE_NO_NCQ_ON_ATI },
4045 { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4046 ATA_HORKAGE_ZERO_AFTER_TRIM |
4047 ATA_HORKAGE_NO_NCQ_ON_ATI },
4048 { "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4049 ATA_HORKAGE_ZERO_AFTER_TRIM |
4050 ATA_HORKAGE_NO_NCQ_ON_ATI, },
4051 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4052 ATA_HORKAGE_ZERO_AFTER_TRIM },
4053
4054 /* devices that don't properly handle TRIM commands */
4055 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM },
4056 { "M88V29*", NULL, ATA_HORKAGE_NOTRIM },
4057
4058 /*
4059 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4060 * (Return Zero After Trim) flags in the ATA Command Set are
4061 * unreliable in the sense that they only define what happens if
4062 * the device successfully executed the DSM TRIM command. TRIM
4063 * is only advisory, however, and the device is free to silently
4064 * ignore all or parts of the request.
4065 *
4066 * Whitelist drives that are known to reliably return zeroes
4067 * after TRIM.
4068 */
4069
4070 /*
4071 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4072 * that model before whitelisting all other intel SSDs.
4073 */
4074 { "INTEL*SSDSC2MH*", NULL, 0 },
4075
4076 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4077 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4078 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4079 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4080 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4081 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4082 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4083 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4084
4085 /*
4086 * Some WD SATA-I drives spin up and down erratically when the link
4087 * is put into the slumber mode. We don't have full list of the
4088 * affected devices. Disable LPM if the device matches one of the
4089 * known prefixes and is SATA-1. As a side effect LPM partial is
4090 * lost too.
4091 *
4092 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4093 */
4094 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4095 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4096 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4097 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4098 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4099 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4100 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4101
4102 /*
4103 * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
4104 * log page is accessed. Ensure we never ask for this log page with
4105 * these devices.
4106 */
4107 { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
4108
4109 /* End Marker */
4110 { }
4111};
4112
4113static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4114{
4115 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4116 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4117 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4118
4119 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4120 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4121
4122 while (ad->model_num) {
4123 if (glob_match(ad->model_num, model_num)) {
4124 if (ad->model_rev == NULL)
4125 return ad->horkage;
4126 if (glob_match(ad->model_rev, model_rev))
4127 return ad->horkage;
4128 }
4129 ad++;
4130 }
4131 return 0;
4132}
4133
4134static int ata_dma_blacklisted(const struct ata_device *dev)
4135{
4136 /* We don't support polling DMA.
4137 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4138 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4139 */
4140 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4141 (dev->flags & ATA_DFLAG_CDB_INTR))
4142 return 1;
4143 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4144}
4145
4146/**
4147 * ata_is_40wire - check drive side detection
4148 * @dev: device
4149 *
4150 * Perform drive side detection decoding, allowing for device vendors
4151 * who can't follow the documentation.
4152 */
4153
4154static int ata_is_40wire(struct ata_device *dev)
4155{
4156 if (dev->horkage & ATA_HORKAGE_IVB)
4157 return ata_drive_40wire_relaxed(dev->id);
4158 return ata_drive_40wire(dev->id);
4159}
4160
4161/**
4162 * cable_is_40wire - 40/80/SATA decider
4163 * @ap: port to consider
4164 *
4165 * This function encapsulates the policy for speed management
4166 * in one place. At the moment we don't cache the result but
4167 * there is a good case for setting ap->cbl to the result when
4168 * we are called with unknown cables (and figuring out if it
4169 * impacts hotplug at all).
4170 *
4171 * Return 1 if the cable appears to be 40 wire.
4172 */
4173
4174static int cable_is_40wire(struct ata_port *ap)
4175{
4176 struct ata_link *link;
4177 struct ata_device *dev;
4178
4179 /* If the controller thinks we are 40 wire, we are. */
4180 if (ap->cbl == ATA_CBL_PATA40)
4181 return 1;
4182
4183 /* If the controller thinks we are 80 wire, we are. */
4184 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4185 return 0;
4186
4187 /* If the system is known to be 40 wire short cable (eg
4188 * laptop), then we allow 80 wire modes even if the drive
4189 * isn't sure.
4190 */
4191 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4192 return 0;
4193
4194 /* If the controller doesn't know, we scan.
4195 *
4196 * Note: We look for all 40 wire detects at this point. Any
4197 * 80 wire detect is taken to be 80 wire cable because
4198 * - in many setups only the one drive (slave if present) will
4199 * give a valid detect
4200 * - if you have a non detect capable drive you don't want it
4201 * to colour the choice
4202 */
4203 ata_for_each_link(link, ap, EDGE) {
4204 ata_for_each_dev(dev, link, ENABLED) {
4205 if (!ata_is_40wire(dev))
4206 return 0;
4207 }
4208 }
4209 return 1;
4210}
4211
4212/**
4213 * ata_dev_xfermask - Compute supported xfermask of the given device
4214 * @dev: Device to compute xfermask for
4215 *
4216 * Compute supported xfermask of @dev and store it in
4217 * dev->*_mask. This function is responsible for applying all
4218 * known limits including host controller limits, device
4219 * blacklist, etc...
4220 *
4221 * LOCKING:
4222 * None.
4223 */
4224static void ata_dev_xfermask(struct ata_device *dev)
4225{
4226 struct ata_link *link = dev->link;
4227 struct ata_port *ap = link->ap;
4228 struct ata_host *host = ap->host;
4229 unsigned int xfer_mask;
4230
4231 /* controller modes available */
4232 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4233 ap->mwdma_mask, ap->udma_mask);
4234
4235 /* drive modes available */
4236 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4237 dev->mwdma_mask, dev->udma_mask);
4238 xfer_mask &= ata_id_xfermask(dev->id);
4239
4240 /*
4241 * CFA Advanced TrueIDE timings are not allowed on a shared
4242 * cable
4243 */
4244 if (ata_dev_pair(dev)) {
4245 /* No PIO5 or PIO6 */
4246 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4247 /* No MWDMA3 or MWDMA 4 */
4248 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4249 }
4250
4251 if (ata_dma_blacklisted(dev)) {
4252 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4253 ata_dev_warn(dev,
4254 "device is on DMA blacklist, disabling DMA\n");
4255 }
4256
4257 if ((host->flags & ATA_HOST_SIMPLEX) &&
4258 host->simplex_claimed && host->simplex_claimed != ap) {
4259 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4260 ata_dev_warn(dev,
4261 "simplex DMA is claimed by other device, disabling DMA\n");
4262 }
4263
4264 if (ap->flags & ATA_FLAG_NO_IORDY)
4265 xfer_mask &= ata_pio_mask_no_iordy(dev);
4266
4267 if (ap->ops->mode_filter)
4268 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4269
4270 /* Apply cable rule here. Don't apply it early because when
4271 * we handle hot plug the cable type can itself change.
4272 * Check this last so that we know if the transfer rate was
4273 * solely limited by the cable.
4274 * Unknown or 80 wire cables reported host side are checked
4275 * drive side as well. Cases where we know a 40wire cable
4276 * is used safely for 80 are not checked here.
4277 */
4278 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4279 /* UDMA/44 or higher would be available */
4280 if (cable_is_40wire(ap)) {
4281 ata_dev_warn(dev,
4282 "limited to UDMA/33 due to 40-wire cable\n");
4283 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4284 }
4285
4286 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4287 &dev->mwdma_mask, &dev->udma_mask);
4288}
4289
4290/**
4291 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4292 * @dev: Device to which command will be sent
4293 *
4294 * Issue SET FEATURES - XFER MODE command to device @dev
4295 * on port @ap.
4296 *
4297 * LOCKING:
4298 * PCI/etc. bus probe sem.
4299 *
4300 * RETURNS:
4301 * 0 on success, AC_ERR_* mask otherwise.
4302 */
4303
4304static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4305{
4306 struct ata_taskfile tf;
4307
4308 /* set up set-features taskfile */
4309 ata_dev_dbg(dev, "set features - xfer mode\n");
4310
4311 /* Some controllers and ATAPI devices show flaky interrupt
4312 * behavior after setting xfer mode. Use polling instead.
4313 */
4314 ata_tf_init(dev, &tf);
4315 tf.command = ATA_CMD_SET_FEATURES;
4316 tf.feature = SETFEATURES_XFER;
4317 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4318 tf.protocol = ATA_PROT_NODATA;
4319 /* If we are using IORDY we must send the mode setting command */
4320 if (ata_pio_need_iordy(dev))
4321 tf.nsect = dev->xfer_mode;
4322 /* If the device has IORDY and the controller does not - turn it off */
4323 else if (ata_id_has_iordy(dev->id))
4324 tf.nsect = 0x01;
4325 else /* In the ancient relic department - skip all of this */
4326 return 0;
4327
4328 /*
4329 * On some disks, this command causes spin-up, so we need longer
4330 * timeout.
4331 */
4332 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4333}
4334
4335/**
4336 * ata_dev_set_feature - Issue SET FEATURES
4337 * @dev: Device to which command will be sent
4338 * @subcmd: The SET FEATURES subcommand to be sent
4339 * @action: The sector count represents a subcommand specific action
4340 *
4341 * Issue SET FEATURES command to device @dev on port @ap with sector count
4342 *
4343 * LOCKING:
4344 * PCI/etc. bus probe sem.
4345 *
4346 * RETURNS:
4347 * 0 on success, AC_ERR_* mask otherwise.
4348 */
4349unsigned int ata_dev_set_feature(struct ata_device *dev, u8 subcmd, u8 action)
4350{
4351 struct ata_taskfile tf;
4352 unsigned int timeout = 0;
4353
4354 /* set up set-features taskfile */
4355 ata_dev_dbg(dev, "set features\n");
4356
4357 ata_tf_init(dev, &tf);
4358 tf.command = ATA_CMD_SET_FEATURES;
4359 tf.feature = subcmd;
4360 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4361 tf.protocol = ATA_PROT_NODATA;
4362 tf.nsect = action;
4363
4364 if (subcmd == SETFEATURES_SPINUP)
4365 timeout = ata_probe_timeout ?
4366 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4367
4368 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4369}
4370EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4371
4372/**
4373 * ata_dev_init_params - Issue INIT DEV PARAMS command
4374 * @dev: Device to which command will be sent
4375 * @heads: Number of heads (taskfile parameter)
4376 * @sectors: Number of sectors (taskfile parameter)
4377 *
4378 * LOCKING:
4379 * Kernel thread context (may sleep)
4380 *
4381 * RETURNS:
4382 * 0 on success, AC_ERR_* mask otherwise.
4383 */
4384static unsigned int ata_dev_init_params(struct ata_device *dev,
4385 u16 heads, u16 sectors)
4386{
4387 struct ata_taskfile tf;
4388 unsigned int err_mask;
4389
4390 /* Number of sectors per track 1-255. Number of heads 1-16 */
4391 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4392 return AC_ERR_INVALID;
4393
4394 /* set up init dev params taskfile */
4395 ata_dev_dbg(dev, "init dev params \n");
4396
4397 ata_tf_init(dev, &tf);
4398 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4399 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4400 tf.protocol = ATA_PROT_NODATA;
4401 tf.nsect = sectors;
4402 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4403
4404 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4405 /* A clean abort indicates an original or just out of spec drive
4406 and we should continue as we issue the setup based on the
4407 drive reported working geometry */
4408 if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
4409 err_mask = 0;
4410
4411 return err_mask;
4412}
4413
4414/**
4415 * atapi_check_dma - Check whether ATAPI DMA can be supported
4416 * @qc: Metadata associated with taskfile to check
4417 *
4418 * Allow low-level driver to filter ATA PACKET commands, returning
4419 * a status indicating whether or not it is OK to use DMA for the
4420 * supplied PACKET command.
4421 *
4422 * LOCKING:
4423 * spin_lock_irqsave(host lock)
4424 *
4425 * RETURNS: 0 when ATAPI DMA can be used
4426 * nonzero otherwise
4427 */
4428int atapi_check_dma(struct ata_queued_cmd *qc)
4429{
4430 struct ata_port *ap = qc->ap;
4431
4432 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4433 * few ATAPI devices choke on such DMA requests.
4434 */
4435 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4436 unlikely(qc->nbytes & 15))
4437 return 1;
4438
4439 if (ap->ops->check_atapi_dma)
4440 return ap->ops->check_atapi_dma(qc);
4441
4442 return 0;
4443}
4444
4445/**
4446 * ata_std_qc_defer - Check whether a qc needs to be deferred
4447 * @qc: ATA command in question
4448 *
4449 * Non-NCQ commands cannot run with any other command, NCQ or
4450 * not. As upper layer only knows the queue depth, we are
4451 * responsible for maintaining exclusion. This function checks
4452 * whether a new command @qc can be issued.
4453 *
4454 * LOCKING:
4455 * spin_lock_irqsave(host lock)
4456 *
4457 * RETURNS:
4458 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4459 */
4460int ata_std_qc_defer(struct ata_queued_cmd *qc)
4461{
4462 struct ata_link *link = qc->dev->link;
4463
4464 if (ata_is_ncq(qc->tf.protocol)) {
4465 if (!ata_tag_valid(link->active_tag))
4466 return 0;
4467 } else {
4468 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4469 return 0;
4470 }
4471
4472 return ATA_DEFER_LINK;
4473}
4474EXPORT_SYMBOL_GPL(ata_std_qc_defer);
4475
4476enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4477{
4478 return AC_ERR_OK;
4479}
4480EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4481
4482/**
4483 * ata_sg_init - Associate command with scatter-gather table.
4484 * @qc: Command to be associated
4485 * @sg: Scatter-gather table.
4486 * @n_elem: Number of elements in s/g table.
4487 *
4488 * Initialize the data-related elements of queued_cmd @qc
4489 * to point to a scatter-gather table @sg, containing @n_elem
4490 * elements.
4491 *
4492 * LOCKING:
4493 * spin_lock_irqsave(host lock)
4494 */
4495void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4496 unsigned int n_elem)
4497{
4498 qc->sg = sg;
4499 qc->n_elem = n_elem;
4500 qc->cursg = qc->sg;
4501}
4502
4503#ifdef CONFIG_HAS_DMA
4504
4505/**
4506 * ata_sg_clean - Unmap DMA memory associated with command
4507 * @qc: Command containing DMA memory to be released
4508 *
4509 * Unmap all mapped DMA memory associated with this command.
4510 *
4511 * LOCKING:
4512 * spin_lock_irqsave(host lock)
4513 */
4514static void ata_sg_clean(struct ata_queued_cmd *qc)
4515{
4516 struct ata_port *ap = qc->ap;
4517 struct scatterlist *sg = qc->sg;
4518 int dir = qc->dma_dir;
4519
4520 WARN_ON_ONCE(sg == NULL);
4521
4522 if (qc->n_elem)
4523 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4524
4525 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4526 qc->sg = NULL;
4527}
4528
4529/**
4530 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4531 * @qc: Command with scatter-gather table to be mapped.
4532 *
4533 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4534 *
4535 * LOCKING:
4536 * spin_lock_irqsave(host lock)
4537 *
4538 * RETURNS:
4539 * Zero on success, negative on error.
4540 *
4541 */
4542static int ata_sg_setup(struct ata_queued_cmd *qc)
4543{
4544 struct ata_port *ap = qc->ap;
4545 unsigned int n_elem;
4546
4547 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4548 if (n_elem < 1)
4549 return -1;
4550
4551 qc->orig_n_elem = qc->n_elem;
4552 qc->n_elem = n_elem;
4553 qc->flags |= ATA_QCFLAG_DMAMAP;
4554
4555 return 0;
4556}
4557
4558#else /* !CONFIG_HAS_DMA */
4559
4560static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4561static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4562
4563#endif /* !CONFIG_HAS_DMA */
4564
4565/**
4566 * swap_buf_le16 - swap halves of 16-bit words in place
4567 * @buf: Buffer to swap
4568 * @buf_words: Number of 16-bit words in buffer.
4569 *
4570 * Swap halves of 16-bit words if needed to convert from
4571 * little-endian byte order to native cpu byte order, or
4572 * vice-versa.
4573 *
4574 * LOCKING:
4575 * Inherited from caller.
4576 */
4577void swap_buf_le16(u16 *buf, unsigned int buf_words)
4578{
4579#ifdef __BIG_ENDIAN
4580 unsigned int i;
4581
4582 for (i = 0; i < buf_words; i++)
4583 buf[i] = le16_to_cpu(buf[i]);
4584#endif /* __BIG_ENDIAN */
4585}
4586
4587/**
4588 * ata_qc_free - free unused ata_queued_cmd
4589 * @qc: Command to complete
4590 *
4591 * Designed to free unused ata_queued_cmd object
4592 * in case something prevents using it.
4593 *
4594 * LOCKING:
4595 * spin_lock_irqsave(host lock)
4596 */
4597void ata_qc_free(struct ata_queued_cmd *qc)
4598{
4599 qc->flags = 0;
4600 if (ata_tag_valid(qc->tag))
4601 qc->tag = ATA_TAG_POISON;
4602}
4603
4604void __ata_qc_complete(struct ata_queued_cmd *qc)
4605{
4606 struct ata_port *ap;
4607 struct ata_link *link;
4608
4609 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4610 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4611 ap = qc->ap;
4612 link = qc->dev->link;
4613
4614 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4615 ata_sg_clean(qc);
4616
4617 /* command should be marked inactive atomically with qc completion */
4618 if (ata_is_ncq(qc->tf.protocol)) {
4619 link->sactive &= ~(1 << qc->hw_tag);
4620 if (!link->sactive)
4621 ap->nr_active_links--;
4622 } else {
4623 link->active_tag = ATA_TAG_POISON;
4624 ap->nr_active_links--;
4625 }
4626
4627 /* clear exclusive status */
4628 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4629 ap->excl_link == link))
4630 ap->excl_link = NULL;
4631
4632 /* atapi: mark qc as inactive to prevent the interrupt handler
4633 * from completing the command twice later, before the error handler
4634 * is called. (when rc != 0 and atapi request sense is needed)
4635 */
4636 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4637 ap->qc_active &= ~(1ULL << qc->tag);
4638
4639 /* call completion callback */
4640 qc->complete_fn(qc);
4641}
4642
4643static void fill_result_tf(struct ata_queued_cmd *qc)
4644{
4645 struct ata_port *ap = qc->ap;
4646
4647 qc->result_tf.flags = qc->tf.flags;
4648 ap->ops->qc_fill_rtf(qc);
4649}
4650
4651static void ata_verify_xfer(struct ata_queued_cmd *qc)
4652{
4653 struct ata_device *dev = qc->dev;
4654
4655 if (!ata_is_data(qc->tf.protocol))
4656 return;
4657
4658 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4659 return;
4660
4661 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4662}
4663
4664/**
4665 * ata_qc_complete - Complete an active ATA command
4666 * @qc: Command to complete
4667 *
4668 * Indicate to the mid and upper layers that an ATA command has
4669 * completed, with either an ok or not-ok status.
4670 *
4671 * Refrain from calling this function multiple times when
4672 * successfully completing multiple NCQ commands.
4673 * ata_qc_complete_multiple() should be used instead, which will
4674 * properly update IRQ expect state.
4675 *
4676 * LOCKING:
4677 * spin_lock_irqsave(host lock)
4678 */
4679void ata_qc_complete(struct ata_queued_cmd *qc)
4680{
4681 struct ata_port *ap = qc->ap;
4682
4683 /* Trigger the LED (if available) */
4684 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
4685
4686 /* XXX: New EH and old EH use different mechanisms to
4687 * synchronize EH with regular execution path.
4688 *
4689 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4690 * Normal execution path is responsible for not accessing a
4691 * failed qc. libata core enforces the rule by returning NULL
4692 * from ata_qc_from_tag() for failed qcs.
4693 *
4694 * Old EH depends on ata_qc_complete() nullifying completion
4695 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4696 * not synchronize with interrupt handler. Only PIO task is
4697 * taken care of.
4698 */
4699 if (ap->ops->error_handler) {
4700 struct ata_device *dev = qc->dev;
4701 struct ata_eh_info *ehi = &dev->link->eh_info;
4702
4703 if (unlikely(qc->err_mask))
4704 qc->flags |= ATA_QCFLAG_FAILED;
4705
4706 /*
4707 * Finish internal commands without any further processing
4708 * and always with the result TF filled.
4709 */
4710 if (unlikely(ata_tag_internal(qc->tag))) {
4711 fill_result_tf(qc);
4712 trace_ata_qc_complete_internal(qc);
4713 __ata_qc_complete(qc);
4714 return;
4715 }
4716
4717 /*
4718 * Non-internal qc has failed. Fill the result TF and
4719 * summon EH.
4720 */
4721 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4722 fill_result_tf(qc);
4723 trace_ata_qc_complete_failed(qc);
4724 ata_qc_schedule_eh(qc);
4725 return;
4726 }
4727
4728 WARN_ON_ONCE(ata_port_is_frozen(ap));
4729
4730 /* read result TF if requested */
4731 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4732 fill_result_tf(qc);
4733
4734 trace_ata_qc_complete_done(qc);
4735 /* Some commands need post-processing after successful
4736 * completion.
4737 */
4738 switch (qc->tf.command) {
4739 case ATA_CMD_SET_FEATURES:
4740 if (qc->tf.feature != SETFEATURES_WC_ON &&
4741 qc->tf.feature != SETFEATURES_WC_OFF &&
4742 qc->tf.feature != SETFEATURES_RA_ON &&
4743 qc->tf.feature != SETFEATURES_RA_OFF)
4744 break;
4745 fallthrough;
4746 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4747 case ATA_CMD_SET_MULTI: /* multi_count changed */
4748 /* revalidate device */
4749 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4750 ata_port_schedule_eh(ap);
4751 break;
4752
4753 case ATA_CMD_SLEEP:
4754 dev->flags |= ATA_DFLAG_SLEEPING;
4755 break;
4756 }
4757
4758 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4759 ata_verify_xfer(qc);
4760
4761 __ata_qc_complete(qc);
4762 } else {
4763 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4764 return;
4765
4766 /* read result TF if failed or requested */
4767 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4768 fill_result_tf(qc);
4769
4770 __ata_qc_complete(qc);
4771 }
4772}
4773EXPORT_SYMBOL_GPL(ata_qc_complete);
4774
4775/**
4776 * ata_qc_get_active - get bitmask of active qcs
4777 * @ap: port in question
4778 *
4779 * LOCKING:
4780 * spin_lock_irqsave(host lock)
4781 *
4782 * RETURNS:
4783 * Bitmask of active qcs
4784 */
4785u64 ata_qc_get_active(struct ata_port *ap)
4786{
4787 u64 qc_active = ap->qc_active;
4788
4789 /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4790 if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4791 qc_active |= (1 << 0);
4792 qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4793 }
4794
4795 return qc_active;
4796}
4797EXPORT_SYMBOL_GPL(ata_qc_get_active);
4798
4799/**
4800 * ata_qc_issue - issue taskfile to device
4801 * @qc: command to issue to device
4802 *
4803 * Prepare an ATA command to submission to device.
4804 * This includes mapping the data into a DMA-able
4805 * area, filling in the S/G table, and finally
4806 * writing the taskfile to hardware, starting the command.
4807 *
4808 * LOCKING:
4809 * spin_lock_irqsave(host lock)
4810 */
4811void ata_qc_issue(struct ata_queued_cmd *qc)
4812{
4813 struct ata_port *ap = qc->ap;
4814 struct ata_link *link = qc->dev->link;
4815 u8 prot = qc->tf.protocol;
4816
4817 /* Make sure only one non-NCQ command is outstanding. The
4818 * check is skipped for old EH because it reuses active qc to
4819 * request ATAPI sense.
4820 */
4821 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4822
4823 if (ata_is_ncq(prot)) {
4824 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
4825
4826 if (!link->sactive)
4827 ap->nr_active_links++;
4828 link->sactive |= 1 << qc->hw_tag;
4829 } else {
4830 WARN_ON_ONCE(link->sactive);
4831
4832 ap->nr_active_links++;
4833 link->active_tag = qc->tag;
4834 }
4835
4836 qc->flags |= ATA_QCFLAG_ACTIVE;
4837 ap->qc_active |= 1ULL << qc->tag;
4838
4839 /*
4840 * We guarantee to LLDs that they will have at least one
4841 * non-zero sg if the command is a data command.
4842 */
4843 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
4844 goto sys_err;
4845
4846 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4847 (ap->flags & ATA_FLAG_PIO_DMA)))
4848 if (ata_sg_setup(qc))
4849 goto sys_err;
4850
4851 /* if device is sleeping, schedule reset and abort the link */
4852 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4853 link->eh_info.action |= ATA_EH_RESET;
4854 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4855 ata_link_abort(link);
4856 return;
4857 }
4858
4859 trace_ata_qc_prep(qc);
4860 qc->err_mask |= ap->ops->qc_prep(qc);
4861 if (unlikely(qc->err_mask))
4862 goto err;
4863 trace_ata_qc_issue(qc);
4864 qc->err_mask |= ap->ops->qc_issue(qc);
4865 if (unlikely(qc->err_mask))
4866 goto err;
4867 return;
4868
4869sys_err:
4870 qc->err_mask |= AC_ERR_SYSTEM;
4871err:
4872 ata_qc_complete(qc);
4873}
4874
4875/**
4876 * ata_phys_link_online - test whether the given link is online
4877 * @link: ATA link to test
4878 *
4879 * Test whether @link is online. Note that this function returns
4880 * 0 if online status of @link cannot be obtained, so
4881 * ata_link_online(link) != !ata_link_offline(link).
4882 *
4883 * LOCKING:
4884 * None.
4885 *
4886 * RETURNS:
4887 * True if the port online status is available and online.
4888 */
4889bool ata_phys_link_online(struct ata_link *link)
4890{
4891 u32 sstatus;
4892
4893 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4894 ata_sstatus_online(sstatus))
4895 return true;
4896 return false;
4897}
4898
4899/**
4900 * ata_phys_link_offline - test whether the given link is offline
4901 * @link: ATA link to test
4902 *
4903 * Test whether @link is offline. Note that this function
4904 * returns 0 if offline status of @link cannot be obtained, so
4905 * ata_link_online(link) != !ata_link_offline(link).
4906 *
4907 * LOCKING:
4908 * None.
4909 *
4910 * RETURNS:
4911 * True if the port offline status is available and offline.
4912 */
4913bool ata_phys_link_offline(struct ata_link *link)
4914{
4915 u32 sstatus;
4916
4917 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4918 !ata_sstatus_online(sstatus))
4919 return true;
4920 return false;
4921}
4922
4923/**
4924 * ata_link_online - test whether the given link is online
4925 * @link: ATA link to test
4926 *
4927 * Test whether @link is online. This is identical to
4928 * ata_phys_link_online() when there's no slave link. When
4929 * there's a slave link, this function should only be called on
4930 * the master link and will return true if any of M/S links is
4931 * online.
4932 *
4933 * LOCKING:
4934 * None.
4935 *
4936 * RETURNS:
4937 * True if the port online status is available and online.
4938 */
4939bool ata_link_online(struct ata_link *link)
4940{
4941 struct ata_link *slave = link->ap->slave_link;
4942
4943 WARN_ON(link == slave); /* shouldn't be called on slave link */
4944
4945 return ata_phys_link_online(link) ||
4946 (slave && ata_phys_link_online(slave));
4947}
4948EXPORT_SYMBOL_GPL(ata_link_online);
4949
4950/**
4951 * ata_link_offline - test whether the given link is offline
4952 * @link: ATA link to test
4953 *
4954 * Test whether @link is offline. This is identical to
4955 * ata_phys_link_offline() when there's no slave link. When
4956 * there's a slave link, this function should only be called on
4957 * the master link and will return true if both M/S links are
4958 * offline.
4959 *
4960 * LOCKING:
4961 * None.
4962 *
4963 * RETURNS:
4964 * True if the port offline status is available and offline.
4965 */
4966bool ata_link_offline(struct ata_link *link)
4967{
4968 struct ata_link *slave = link->ap->slave_link;
4969
4970 WARN_ON(link == slave); /* shouldn't be called on slave link */
4971
4972 return ata_phys_link_offline(link) &&
4973 (!slave || ata_phys_link_offline(slave));
4974}
4975EXPORT_SYMBOL_GPL(ata_link_offline);
4976
4977#ifdef CONFIG_PM
4978static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
4979 unsigned int action, unsigned int ehi_flags,
4980 bool async)
4981{
4982 struct ata_link *link;
4983 unsigned long flags;
4984
4985 /* Previous resume operation might still be in
4986 * progress. Wait for PM_PENDING to clear.
4987 */
4988 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4989 ata_port_wait_eh(ap);
4990 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4991 }
4992
4993 /* request PM ops to EH */
4994 spin_lock_irqsave(ap->lock, flags);
4995
4996 ap->pm_mesg = mesg;
4997 ap->pflags |= ATA_PFLAG_PM_PENDING;
4998 ata_for_each_link(link, ap, HOST_FIRST) {
4999 link->eh_info.action |= action;
5000 link->eh_info.flags |= ehi_flags;
5001 }
5002
5003 ata_port_schedule_eh(ap);
5004
5005 spin_unlock_irqrestore(ap->lock, flags);
5006
5007 if (!async) {
5008 ata_port_wait_eh(ap);
5009 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5010 }
5011}
5012
5013/*
5014 * On some hardware, device fails to respond after spun down for suspend. As
5015 * the device won't be used before being resumed, we don't need to touch the
5016 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5017 *
5018 * http://thread.gmane.org/gmane.linux.ide/46764
5019 */
5020static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5021 | ATA_EHI_NO_AUTOPSY
5022 | ATA_EHI_NO_RECOVERY;
5023
5024static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5025{
5026 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5027}
5028
5029static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5030{
5031 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5032}
5033
5034static int ata_port_pm_suspend(struct device *dev)
5035{
5036 struct ata_port *ap = to_ata_port(dev);
5037
5038 if (pm_runtime_suspended(dev))
5039 return 0;
5040
5041 ata_port_suspend(ap, PMSG_SUSPEND);
5042 return 0;
5043}
5044
5045static int ata_port_pm_freeze(struct device *dev)
5046{
5047 struct ata_port *ap = to_ata_port(dev);
5048
5049 if (pm_runtime_suspended(dev))
5050 return 0;
5051
5052 ata_port_suspend(ap, PMSG_FREEZE);
5053 return 0;
5054}
5055
5056static int ata_port_pm_poweroff(struct device *dev)
5057{
5058 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5059 return 0;
5060}
5061
5062static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5063 | ATA_EHI_QUIET;
5064
5065static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5066{
5067 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5068}
5069
5070static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5071{
5072 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5073}
5074
5075static int ata_port_pm_resume(struct device *dev)
5076{
5077 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5078 pm_runtime_disable(dev);
5079 pm_runtime_set_active(dev);
5080 pm_runtime_enable(dev);
5081 return 0;
5082}
5083
5084/*
5085 * For ODDs, the upper layer will poll for media change every few seconds,
5086 * which will make it enter and leave suspend state every few seconds. And
5087 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5088 * is very little and the ODD may malfunction after constantly being reset.
5089 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5090 * ODD is attached to the port.
5091 */
5092static int ata_port_runtime_idle(struct device *dev)
5093{
5094 struct ata_port *ap = to_ata_port(dev);
5095 struct ata_link *link;
5096 struct ata_device *adev;
5097
5098 ata_for_each_link(link, ap, HOST_FIRST) {
5099 ata_for_each_dev(adev, link, ENABLED)
5100 if (adev->class == ATA_DEV_ATAPI &&
5101 !zpodd_dev_enabled(adev))
5102 return -EBUSY;
5103 }
5104
5105 return 0;
5106}
5107
5108static int ata_port_runtime_suspend(struct device *dev)
5109{
5110 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5111 return 0;
5112}
5113
5114static int ata_port_runtime_resume(struct device *dev)
5115{
5116 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5117 return 0;
5118}
5119
5120static const struct dev_pm_ops ata_port_pm_ops = {
5121 .suspend = ata_port_pm_suspend,
5122 .resume = ata_port_pm_resume,
5123 .freeze = ata_port_pm_freeze,
5124 .thaw = ata_port_pm_resume,
5125 .poweroff = ata_port_pm_poweroff,
5126 .restore = ata_port_pm_resume,
5127
5128 .runtime_suspend = ata_port_runtime_suspend,
5129 .runtime_resume = ata_port_runtime_resume,
5130 .runtime_idle = ata_port_runtime_idle,
5131};
5132
5133/* sas ports don't participate in pm runtime management of ata_ports,
5134 * and need to resume ata devices at the domain level, not the per-port
5135 * level. sas suspend/resume is async to allow parallel port recovery
5136 * since sas has multiple ata_port instances per Scsi_Host.
5137 */
5138void ata_sas_port_suspend(struct ata_port *ap)
5139{
5140 ata_port_suspend_async(ap, PMSG_SUSPEND);
5141}
5142EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5143
5144void ata_sas_port_resume(struct ata_port *ap)
5145{
5146 ata_port_resume_async(ap, PMSG_RESUME);
5147}
5148EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5149
5150/**
5151 * ata_host_suspend - suspend host
5152 * @host: host to suspend
5153 * @mesg: PM message
5154 *
5155 * Suspend @host. Actual operation is performed by port suspend.
5156 */
5157void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5158{
5159 host->dev->power.power_state = mesg;
5160}
5161EXPORT_SYMBOL_GPL(ata_host_suspend);
5162
5163/**
5164 * ata_host_resume - resume host
5165 * @host: host to resume
5166 *
5167 * Resume @host. Actual operation is performed by port resume.
5168 */
5169void ata_host_resume(struct ata_host *host)
5170{
5171 host->dev->power.power_state = PMSG_ON;
5172}
5173EXPORT_SYMBOL_GPL(ata_host_resume);
5174#endif
5175
5176const struct device_type ata_port_type = {
5177 .name = "ata_port",
5178#ifdef CONFIG_PM
5179 .pm = &ata_port_pm_ops,
5180#endif
5181};
5182
5183/**
5184 * ata_dev_init - Initialize an ata_device structure
5185 * @dev: Device structure to initialize
5186 *
5187 * Initialize @dev in preparation for probing.
5188 *
5189 * LOCKING:
5190 * Inherited from caller.
5191 */
5192void ata_dev_init(struct ata_device *dev)
5193{
5194 struct ata_link *link = ata_dev_phys_link(dev);
5195 struct ata_port *ap = link->ap;
5196 unsigned long flags;
5197
5198 /* SATA spd limit is bound to the attached device, reset together */
5199 link->sata_spd_limit = link->hw_sata_spd_limit;
5200 link->sata_spd = 0;
5201
5202 /* High bits of dev->flags are used to record warm plug
5203 * requests which occur asynchronously. Synchronize using
5204 * host lock.
5205 */
5206 spin_lock_irqsave(ap->lock, flags);
5207 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5208 dev->horkage = 0;
5209 spin_unlock_irqrestore(ap->lock, flags);
5210
5211 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5212 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5213 dev->pio_mask = UINT_MAX;
5214 dev->mwdma_mask = UINT_MAX;
5215 dev->udma_mask = UINT_MAX;
5216}
5217
5218/**
5219 * ata_link_init - Initialize an ata_link structure
5220 * @ap: ATA port link is attached to
5221 * @link: Link structure to initialize
5222 * @pmp: Port multiplier port number
5223 *
5224 * Initialize @link.
5225 *
5226 * LOCKING:
5227 * Kernel thread context (may sleep)
5228 */
5229void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5230{
5231 int i;
5232
5233 /* clear everything except for devices */
5234 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5235 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5236
5237 link->ap = ap;
5238 link->pmp = pmp;
5239 link->active_tag = ATA_TAG_POISON;
5240 link->hw_sata_spd_limit = UINT_MAX;
5241
5242 /* can't use iterator, ap isn't initialized yet */
5243 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5244 struct ata_device *dev = &link->device[i];
5245
5246 dev->link = link;
5247 dev->devno = dev - link->device;
5248#ifdef CONFIG_ATA_ACPI
5249 dev->gtf_filter = ata_acpi_gtf_filter;
5250#endif
5251 ata_dev_init(dev);
5252 }
5253}
5254
5255/**
5256 * sata_link_init_spd - Initialize link->sata_spd_limit
5257 * @link: Link to configure sata_spd_limit for
5258 *
5259 * Initialize ``link->[hw_]sata_spd_limit`` to the currently
5260 * configured value.
5261 *
5262 * LOCKING:
5263 * Kernel thread context (may sleep).
5264 *
5265 * RETURNS:
5266 * 0 on success, -errno on failure.
5267 */
5268int sata_link_init_spd(struct ata_link *link)
5269{
5270 u8 spd;
5271 int rc;
5272
5273 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5274 if (rc)
5275 return rc;
5276
5277 spd = (link->saved_scontrol >> 4) & 0xf;
5278 if (spd)
5279 link->hw_sata_spd_limit &= (1 << spd) - 1;
5280
5281 ata_force_link_limits(link);
5282
5283 link->sata_spd_limit = link->hw_sata_spd_limit;
5284
5285 return 0;
5286}
5287
5288/**
5289 * ata_port_alloc - allocate and initialize basic ATA port resources
5290 * @host: ATA host this allocated port belongs to
5291 *
5292 * Allocate and initialize basic ATA port resources.
5293 *
5294 * RETURNS:
5295 * Allocate ATA port on success, NULL on failure.
5296 *
5297 * LOCKING:
5298 * Inherited from calling layer (may sleep).
5299 */
5300struct ata_port *ata_port_alloc(struct ata_host *host)
5301{
5302 struct ata_port *ap;
5303
5304 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5305 if (!ap)
5306 return NULL;
5307
5308 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5309 ap->lock = &host->lock;
5310 ap->print_id = -1;
5311 ap->local_port_no = -1;
5312 ap->host = host;
5313 ap->dev = host->dev;
5314
5315 mutex_init(&ap->scsi_scan_mutex);
5316 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5317 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5318 INIT_LIST_HEAD(&ap->eh_done_q);
5319 init_waitqueue_head(&ap->eh_wait_q);
5320 init_completion(&ap->park_req_pending);
5321 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5322 TIMER_DEFERRABLE);
5323
5324 ap->cbl = ATA_CBL_NONE;
5325
5326 ata_link_init(ap, &ap->link, 0);
5327
5328#ifdef ATA_IRQ_TRAP
5329 ap->stats.unhandled_irq = 1;
5330 ap->stats.idle_irq = 1;
5331#endif
5332 ata_sff_port_init(ap);
5333
5334 return ap;
5335}
5336
5337static void ata_devres_release(struct device *gendev, void *res)
5338{
5339 struct ata_host *host = dev_get_drvdata(gendev);
5340 int i;
5341
5342 for (i = 0; i < host->n_ports; i++) {
5343 struct ata_port *ap = host->ports[i];
5344
5345 if (!ap)
5346 continue;
5347
5348 if (ap->scsi_host)
5349 scsi_host_put(ap->scsi_host);
5350
5351 }
5352
5353 dev_set_drvdata(gendev, NULL);
5354 ata_host_put(host);
5355}
5356
5357static void ata_host_release(struct kref *kref)
5358{
5359 struct ata_host *host = container_of(kref, struct ata_host, kref);
5360 int i;
5361
5362 for (i = 0; i < host->n_ports; i++) {
5363 struct ata_port *ap = host->ports[i];
5364
5365 kfree(ap->pmp_link);
5366 kfree(ap->slave_link);
5367 kfree(ap);
5368 host->ports[i] = NULL;
5369 }
5370 kfree(host);
5371}
5372
5373void ata_host_get(struct ata_host *host)
5374{
5375 kref_get(&host->kref);
5376}
5377
5378void ata_host_put(struct ata_host *host)
5379{
5380 kref_put(&host->kref, ata_host_release);
5381}
5382EXPORT_SYMBOL_GPL(ata_host_put);
5383
5384/**
5385 * ata_host_alloc - allocate and init basic ATA host resources
5386 * @dev: generic device this host is associated with
5387 * @max_ports: maximum number of ATA ports associated with this host
5388 *
5389 * Allocate and initialize basic ATA host resources. LLD calls
5390 * this function to allocate a host, initializes it fully and
5391 * attaches it using ata_host_register().
5392 *
5393 * @max_ports ports are allocated and host->n_ports is
5394 * initialized to @max_ports. The caller is allowed to decrease
5395 * host->n_ports before calling ata_host_register(). The unused
5396 * ports will be automatically freed on registration.
5397 *
5398 * RETURNS:
5399 * Allocate ATA host on success, NULL on failure.
5400 *
5401 * LOCKING:
5402 * Inherited from calling layer (may sleep).
5403 */
5404struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5405{
5406 struct ata_host *host;
5407 size_t sz;
5408 int i;
5409 void *dr;
5410
5411 /* alloc a container for our list of ATA ports (buses) */
5412 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5413 host = kzalloc(sz, GFP_KERNEL);
5414 if (!host)
5415 return NULL;
5416
5417 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5418 goto err_free;
5419
5420 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5421 if (!dr)
5422 goto err_out;
5423
5424 devres_add(dev, dr);
5425 dev_set_drvdata(dev, host);
5426
5427 spin_lock_init(&host->lock);
5428 mutex_init(&host->eh_mutex);
5429 host->dev = dev;
5430 host->n_ports = max_ports;
5431 kref_init(&host->kref);
5432
5433 /* allocate ports bound to this host */
5434 for (i = 0; i < max_ports; i++) {
5435 struct ata_port *ap;
5436
5437 ap = ata_port_alloc(host);
5438 if (!ap)
5439 goto err_out;
5440
5441 ap->port_no = i;
5442 host->ports[i] = ap;
5443 }
5444
5445 devres_remove_group(dev, NULL);
5446 return host;
5447
5448 err_out:
5449 devres_release_group(dev, NULL);
5450 err_free:
5451 kfree(host);
5452 return NULL;
5453}
5454EXPORT_SYMBOL_GPL(ata_host_alloc);
5455
5456/**
5457 * ata_host_alloc_pinfo - alloc host and init with port_info array
5458 * @dev: generic device this host is associated with
5459 * @ppi: array of ATA port_info to initialize host with
5460 * @n_ports: number of ATA ports attached to this host
5461 *
5462 * Allocate ATA host and initialize with info from @ppi. If NULL
5463 * terminated, @ppi may contain fewer entries than @n_ports. The
5464 * last entry will be used for the remaining ports.
5465 *
5466 * RETURNS:
5467 * Allocate ATA host on success, NULL on failure.
5468 *
5469 * LOCKING:
5470 * Inherited from calling layer (may sleep).
5471 */
5472struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5473 const struct ata_port_info * const * ppi,
5474 int n_ports)
5475{
5476 const struct ata_port_info *pi = &ata_dummy_port_info;
5477 struct ata_host *host;
5478 int i, j;
5479
5480 host = ata_host_alloc(dev, n_ports);
5481 if (!host)
5482 return NULL;
5483
5484 for (i = 0, j = 0; i < host->n_ports; i++) {
5485 struct ata_port *ap = host->ports[i];
5486
5487 if (ppi[j])
5488 pi = ppi[j++];
5489
5490 ap->pio_mask = pi->pio_mask;
5491 ap->mwdma_mask = pi->mwdma_mask;
5492 ap->udma_mask = pi->udma_mask;
5493 ap->flags |= pi->flags;
5494 ap->link.flags |= pi->link_flags;
5495 ap->ops = pi->port_ops;
5496
5497 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5498 host->ops = pi->port_ops;
5499 }
5500
5501 return host;
5502}
5503EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
5504
5505static void ata_host_stop(struct device *gendev, void *res)
5506{
5507 struct ata_host *host = dev_get_drvdata(gendev);
5508 int i;
5509
5510 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5511
5512 for (i = 0; i < host->n_ports; i++) {
5513 struct ata_port *ap = host->ports[i];
5514
5515 if (ap->ops->port_stop)
5516 ap->ops->port_stop(ap);
5517 }
5518
5519 if (host->ops->host_stop)
5520 host->ops->host_stop(host);
5521}
5522
5523/**
5524 * ata_finalize_port_ops - finalize ata_port_operations
5525 * @ops: ata_port_operations to finalize
5526 *
5527 * An ata_port_operations can inherit from another ops and that
5528 * ops can again inherit from another. This can go on as many
5529 * times as necessary as long as there is no loop in the
5530 * inheritance chain.
5531 *
5532 * Ops tables are finalized when the host is started. NULL or
5533 * unspecified entries are inherited from the closet ancestor
5534 * which has the method and the entry is populated with it.
5535 * After finalization, the ops table directly points to all the
5536 * methods and ->inherits is no longer necessary and cleared.
5537 *
5538 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5539 *
5540 * LOCKING:
5541 * None.
5542 */
5543static void ata_finalize_port_ops(struct ata_port_operations *ops)
5544{
5545 static DEFINE_SPINLOCK(lock);
5546 const struct ata_port_operations *cur;
5547 void **begin = (void **)ops;
5548 void **end = (void **)&ops->inherits;
5549 void **pp;
5550
5551 if (!ops || !ops->inherits)
5552 return;
5553
5554 spin_lock(&lock);
5555
5556 for (cur = ops->inherits; cur; cur = cur->inherits) {
5557 void **inherit = (void **)cur;
5558
5559 for (pp = begin; pp < end; pp++, inherit++)
5560 if (!*pp)
5561 *pp = *inherit;
5562 }
5563
5564 for (pp = begin; pp < end; pp++)
5565 if (IS_ERR(*pp))
5566 *pp = NULL;
5567
5568 ops->inherits = NULL;
5569
5570 spin_unlock(&lock);
5571}
5572
5573/**
5574 * ata_host_start - start and freeze ports of an ATA host
5575 * @host: ATA host to start ports for
5576 *
5577 * Start and then freeze ports of @host. Started status is
5578 * recorded in host->flags, so this function can be called
5579 * multiple times. Ports are guaranteed to get started only
5580 * once. If host->ops is not initialized yet, it is set to the
5581 * first non-dummy port ops.
5582 *
5583 * LOCKING:
5584 * Inherited from calling layer (may sleep).
5585 *
5586 * RETURNS:
5587 * 0 if all ports are started successfully, -errno otherwise.
5588 */
5589int ata_host_start(struct ata_host *host)
5590{
5591 int have_stop = 0;
5592 void *start_dr = NULL;
5593 int i, rc;
5594
5595 if (host->flags & ATA_HOST_STARTED)
5596 return 0;
5597
5598 ata_finalize_port_ops(host->ops);
5599
5600 for (i = 0; i < host->n_ports; i++) {
5601 struct ata_port *ap = host->ports[i];
5602
5603 ata_finalize_port_ops(ap->ops);
5604
5605 if (!host->ops && !ata_port_is_dummy(ap))
5606 host->ops = ap->ops;
5607
5608 if (ap->ops->port_stop)
5609 have_stop = 1;
5610 }
5611
5612 if (host->ops && host->ops->host_stop)
5613 have_stop = 1;
5614
5615 if (have_stop) {
5616 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5617 if (!start_dr)
5618 return -ENOMEM;
5619 }
5620
5621 for (i = 0; i < host->n_ports; i++) {
5622 struct ata_port *ap = host->ports[i];
5623
5624 if (ap->ops->port_start) {
5625 rc = ap->ops->port_start(ap);
5626 if (rc) {
5627 if (rc != -ENODEV)
5628 dev_err(host->dev,
5629 "failed to start port %d (errno=%d)\n",
5630 i, rc);
5631 goto err_out;
5632 }
5633 }
5634 ata_eh_freeze_port(ap);
5635 }
5636
5637 if (start_dr)
5638 devres_add(host->dev, start_dr);
5639 host->flags |= ATA_HOST_STARTED;
5640 return 0;
5641
5642 err_out:
5643 while (--i >= 0) {
5644 struct ata_port *ap = host->ports[i];
5645
5646 if (ap->ops->port_stop)
5647 ap->ops->port_stop(ap);
5648 }
5649 devres_free(start_dr);
5650 return rc;
5651}
5652EXPORT_SYMBOL_GPL(ata_host_start);
5653
5654/**
5655 * ata_host_init - Initialize a host struct for sas (ipr, libsas)
5656 * @host: host to initialize
5657 * @dev: device host is attached to
5658 * @ops: port_ops
5659 *
5660 */
5661void ata_host_init(struct ata_host *host, struct device *dev,
5662 struct ata_port_operations *ops)
5663{
5664 spin_lock_init(&host->lock);
5665 mutex_init(&host->eh_mutex);
5666 host->n_tags = ATA_MAX_QUEUE;
5667 host->dev = dev;
5668 host->ops = ops;
5669 kref_init(&host->kref);
5670}
5671EXPORT_SYMBOL_GPL(ata_host_init);
5672
5673void __ata_port_probe(struct ata_port *ap)
5674{
5675 struct ata_eh_info *ehi = &ap->link.eh_info;
5676 unsigned long flags;
5677
5678 /* kick EH for boot probing */
5679 spin_lock_irqsave(ap->lock, flags);
5680
5681 ehi->probe_mask |= ATA_ALL_DEVICES;
5682 ehi->action |= ATA_EH_RESET;
5683 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5684
5685 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5686 ap->pflags |= ATA_PFLAG_LOADING;
5687 ata_port_schedule_eh(ap);
5688
5689 spin_unlock_irqrestore(ap->lock, flags);
5690}
5691
5692int ata_port_probe(struct ata_port *ap)
5693{
5694 int rc = 0;
5695
5696 if (ap->ops->error_handler) {
5697 __ata_port_probe(ap);
5698 ata_port_wait_eh(ap);
5699 } else {
5700 rc = ata_bus_probe(ap);
5701 }
5702 return rc;
5703}
5704
5705
5706static void async_port_probe(void *data, async_cookie_t cookie)
5707{
5708 struct ata_port *ap = data;
5709
5710 /*
5711 * If we're not allowed to scan this host in parallel,
5712 * we need to wait until all previous scans have completed
5713 * before going further.
5714 * Jeff Garzik says this is only within a controller, so we
5715 * don't need to wait for port 0, only for later ports.
5716 */
5717 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5718 async_synchronize_cookie(cookie);
5719
5720 (void)ata_port_probe(ap);
5721
5722 /* in order to keep device order, we need to synchronize at this point */
5723 async_synchronize_cookie(cookie);
5724
5725 ata_scsi_scan_host(ap, 1);
5726}
5727
5728/**
5729 * ata_host_register - register initialized ATA host
5730 * @host: ATA host to register
5731 * @sht: template for SCSI host
5732 *
5733 * Register initialized ATA host. @host is allocated using
5734 * ata_host_alloc() and fully initialized by LLD. This function
5735 * starts ports, registers @host with ATA and SCSI layers and
5736 * probe registered devices.
5737 *
5738 * LOCKING:
5739 * Inherited from calling layer (may sleep).
5740 *
5741 * RETURNS:
5742 * 0 on success, -errno otherwise.
5743 */
5744int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5745{
5746 int i, rc;
5747
5748 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
5749
5750 /* host must have been started */
5751 if (!(host->flags & ATA_HOST_STARTED)) {
5752 dev_err(host->dev, "BUG: trying to register unstarted host\n");
5753 WARN_ON(1);
5754 return -EINVAL;
5755 }
5756
5757 /* Blow away unused ports. This happens when LLD can't
5758 * determine the exact number of ports to allocate at
5759 * allocation time.
5760 */
5761 for (i = host->n_ports; host->ports[i]; i++)
5762 kfree(host->ports[i]);
5763
5764 /* give ports names and add SCSI hosts */
5765 for (i = 0; i < host->n_ports; i++) {
5766 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
5767 host->ports[i]->local_port_no = i + 1;
5768 }
5769
5770 /* Create associated sysfs transport objects */
5771 for (i = 0; i < host->n_ports; i++) {
5772 rc = ata_tport_add(host->dev,host->ports[i]);
5773 if (rc) {
5774 goto err_tadd;
5775 }
5776 }
5777
5778 rc = ata_scsi_add_hosts(host, sht);
5779 if (rc)
5780 goto err_tadd;
5781
5782 /* set cable, sata_spd_limit and report */
5783 for (i = 0; i < host->n_ports; i++) {
5784 struct ata_port *ap = host->ports[i];
5785 unsigned int xfer_mask;
5786
5787 /* set SATA cable type if still unset */
5788 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5789 ap->cbl = ATA_CBL_SATA;
5790
5791 /* init sata_spd_limit to the current value */
5792 sata_link_init_spd(&ap->link);
5793 if (ap->slave_link)
5794 sata_link_init_spd(ap->slave_link);
5795
5796 /* print per-port info to dmesg */
5797 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5798 ap->udma_mask);
5799
5800 if (!ata_port_is_dummy(ap)) {
5801 ata_port_info(ap, "%cATA max %s %s\n",
5802 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5803 ata_mode_string(xfer_mask),
5804 ap->link.eh_info.desc);
5805 ata_ehi_clear_desc(&ap->link.eh_info);
5806 } else
5807 ata_port_info(ap, "DUMMY\n");
5808 }
5809
5810 /* perform each probe asynchronously */
5811 for (i = 0; i < host->n_ports; i++) {
5812 struct ata_port *ap = host->ports[i];
5813 ap->cookie = async_schedule(async_port_probe, ap);
5814 }
5815
5816 return 0;
5817
5818 err_tadd:
5819 while (--i >= 0) {
5820 ata_tport_delete(host->ports[i]);
5821 }
5822 return rc;
5823
5824}
5825EXPORT_SYMBOL_GPL(ata_host_register);
5826
5827/**
5828 * ata_host_activate - start host, request IRQ and register it
5829 * @host: target ATA host
5830 * @irq: IRQ to request
5831 * @irq_handler: irq_handler used when requesting IRQ
5832 * @irq_flags: irq_flags used when requesting IRQ
5833 * @sht: scsi_host_template to use when registering the host
5834 *
5835 * After allocating an ATA host and initializing it, most libata
5836 * LLDs perform three steps to activate the host - start host,
5837 * request IRQ and register it. This helper takes necessary
5838 * arguments and performs the three steps in one go.
5839 *
5840 * An invalid IRQ skips the IRQ registration and expects the host to
5841 * have set polling mode on the port. In this case, @irq_handler
5842 * should be NULL.
5843 *
5844 * LOCKING:
5845 * Inherited from calling layer (may sleep).
5846 *
5847 * RETURNS:
5848 * 0 on success, -errno otherwise.
5849 */
5850int ata_host_activate(struct ata_host *host, int irq,
5851 irq_handler_t irq_handler, unsigned long irq_flags,
5852 struct scsi_host_template *sht)
5853{
5854 int i, rc;
5855 char *irq_desc;
5856
5857 rc = ata_host_start(host);
5858 if (rc)
5859 return rc;
5860
5861 /* Special case for polling mode */
5862 if (!irq) {
5863 WARN_ON(irq_handler);
5864 return ata_host_register(host, sht);
5865 }
5866
5867 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
5868 dev_driver_string(host->dev),
5869 dev_name(host->dev));
5870 if (!irq_desc)
5871 return -ENOMEM;
5872
5873 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5874 irq_desc, host);
5875 if (rc)
5876 return rc;
5877
5878 for (i = 0; i < host->n_ports; i++)
5879 ata_port_desc(host->ports[i], "irq %d", irq);
5880
5881 rc = ata_host_register(host, sht);
5882 /* if failed, just free the IRQ and leave ports alone */
5883 if (rc)
5884 devm_free_irq(host->dev, irq, host);
5885
5886 return rc;
5887}
5888EXPORT_SYMBOL_GPL(ata_host_activate);
5889
5890/**
5891 * ata_port_detach - Detach ATA port in preparation of device removal
5892 * @ap: ATA port to be detached
5893 *
5894 * Detach all ATA devices and the associated SCSI devices of @ap;
5895 * then, remove the associated SCSI host. @ap is guaranteed to
5896 * be quiescent on return from this function.
5897 *
5898 * LOCKING:
5899 * Kernel thread context (may sleep).
5900 */
5901static void ata_port_detach(struct ata_port *ap)
5902{
5903 unsigned long flags;
5904 struct ata_link *link;
5905 struct ata_device *dev;
5906
5907 if (!ap->ops->error_handler)
5908 goto skip_eh;
5909
5910 /* tell EH we're leaving & flush EH */
5911 spin_lock_irqsave(ap->lock, flags);
5912 ap->pflags |= ATA_PFLAG_UNLOADING;
5913 ata_port_schedule_eh(ap);
5914 spin_unlock_irqrestore(ap->lock, flags);
5915
5916 /* wait till EH commits suicide */
5917 ata_port_wait_eh(ap);
5918
5919 /* it better be dead now */
5920 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
5921
5922 cancel_delayed_work_sync(&ap->hotplug_task);
5923
5924 skip_eh:
5925 /* clean up zpodd on port removal */
5926 ata_for_each_link(link, ap, HOST_FIRST) {
5927 ata_for_each_dev(dev, link, ALL) {
5928 if (zpodd_dev_enabled(dev))
5929 zpodd_exit(dev);
5930 }
5931 }
5932 if (ap->pmp_link) {
5933 int i;
5934 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
5935 ata_tlink_delete(&ap->pmp_link[i]);
5936 }
5937 /* remove the associated SCSI host */
5938 scsi_remove_host(ap->scsi_host);
5939 ata_tport_delete(ap);
5940}
5941
5942/**
5943 * ata_host_detach - Detach all ports of an ATA host
5944 * @host: Host to detach
5945 *
5946 * Detach all ports of @host.
5947 *
5948 * LOCKING:
5949 * Kernel thread context (may sleep).
5950 */
5951void ata_host_detach(struct ata_host *host)
5952{
5953 int i;
5954
5955 for (i = 0; i < host->n_ports; i++) {
5956 /* Ensure ata_port probe has completed */
5957 async_synchronize_cookie(host->ports[i]->cookie + 1);
5958 ata_port_detach(host->ports[i]);
5959 }
5960
5961 /* the host is dead now, dissociate ACPI */
5962 ata_acpi_dissociate(host);
5963}
5964EXPORT_SYMBOL_GPL(ata_host_detach);
5965
5966#ifdef CONFIG_PCI
5967
5968/**
5969 * ata_pci_remove_one - PCI layer callback for device removal
5970 * @pdev: PCI device that was removed
5971 *
5972 * PCI layer indicates to libata via this hook that hot-unplug or
5973 * module unload event has occurred. Detach all ports. Resource
5974 * release is handled via devres.
5975 *
5976 * LOCKING:
5977 * Inherited from PCI layer (may sleep).
5978 */
5979void ata_pci_remove_one(struct pci_dev *pdev)
5980{
5981 struct ata_host *host = pci_get_drvdata(pdev);
5982
5983 ata_host_detach(host);
5984}
5985EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5986
5987void ata_pci_shutdown_one(struct pci_dev *pdev)
5988{
5989 struct ata_host *host = pci_get_drvdata(pdev);
5990 int i;
5991
5992 for (i = 0; i < host->n_ports; i++) {
5993 struct ata_port *ap = host->ports[i];
5994
5995 ap->pflags |= ATA_PFLAG_FROZEN;
5996
5997 /* Disable port interrupts */
5998 if (ap->ops->freeze)
5999 ap->ops->freeze(ap);
6000
6001 /* Stop the port DMA engines */
6002 if (ap->ops->port_stop)
6003 ap->ops->port_stop(ap);
6004 }
6005}
6006EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
6007
6008/* move to PCI subsystem */
6009int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6010{
6011 unsigned long tmp = 0;
6012
6013 switch (bits->width) {
6014 case 1: {
6015 u8 tmp8 = 0;
6016 pci_read_config_byte(pdev, bits->reg, &tmp8);
6017 tmp = tmp8;
6018 break;
6019 }
6020 case 2: {
6021 u16 tmp16 = 0;
6022 pci_read_config_word(pdev, bits->reg, &tmp16);
6023 tmp = tmp16;
6024 break;
6025 }
6026 case 4: {
6027 u32 tmp32 = 0;
6028 pci_read_config_dword(pdev, bits->reg, &tmp32);
6029 tmp = tmp32;
6030 break;
6031 }
6032
6033 default:
6034 return -EINVAL;
6035 }
6036
6037 tmp &= bits->mask;
6038
6039 return (tmp == bits->val) ? 1 : 0;
6040}
6041EXPORT_SYMBOL_GPL(pci_test_config_bits);
6042
6043#ifdef CONFIG_PM
6044void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6045{
6046 pci_save_state(pdev);
6047 pci_disable_device(pdev);
6048
6049 if (mesg.event & PM_EVENT_SLEEP)
6050 pci_set_power_state(pdev, PCI_D3hot);
6051}
6052EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6053
6054int ata_pci_device_do_resume(struct pci_dev *pdev)
6055{
6056 int rc;
6057
6058 pci_set_power_state(pdev, PCI_D0);
6059 pci_restore_state(pdev);
6060
6061 rc = pcim_enable_device(pdev);
6062 if (rc) {
6063 dev_err(&pdev->dev,
6064 "failed to enable device after resume (%d)\n", rc);
6065 return rc;
6066 }
6067
6068 pci_set_master(pdev);
6069 return 0;
6070}
6071EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6072
6073int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6074{
6075 struct ata_host *host = pci_get_drvdata(pdev);
6076
6077 ata_host_suspend(host, mesg);
6078
6079 ata_pci_device_do_suspend(pdev, mesg);
6080
6081 return 0;
6082}
6083EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6084
6085int ata_pci_device_resume(struct pci_dev *pdev)
6086{
6087 struct ata_host *host = pci_get_drvdata(pdev);
6088 int rc;
6089
6090 rc = ata_pci_device_do_resume(pdev);
6091 if (rc == 0)
6092 ata_host_resume(host);
6093 return rc;
6094}
6095EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6096#endif /* CONFIG_PM */
6097#endif /* CONFIG_PCI */
6098
6099/**
6100 * ata_platform_remove_one - Platform layer callback for device removal
6101 * @pdev: Platform device that was removed
6102 *
6103 * Platform layer indicates to libata via this hook that hot-unplug or
6104 * module unload event has occurred. Detach all ports. Resource
6105 * release is handled via devres.
6106 *
6107 * LOCKING:
6108 * Inherited from platform layer (may sleep).
6109 */
6110int ata_platform_remove_one(struct platform_device *pdev)
6111{
6112 struct ata_host *host = platform_get_drvdata(pdev);
6113
6114 ata_host_detach(host);
6115
6116 return 0;
6117}
6118EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6119
6120#ifdef CONFIG_ATA_FORCE
6121
6122#define force_cbl(name, flag) \
6123 { #name, .cbl = (flag) }
6124
6125#define force_spd_limit(spd, val) \
6126 { #spd, .spd_limit = (val) }
6127
6128#define force_xfer(mode, shift) \
6129 { #mode, .xfer_mask = (1UL << (shift)) }
6130
6131#define force_lflag_on(name, flags) \
6132 { #name, .lflags_on = (flags) }
6133
6134#define force_lflag_onoff(name, flags) \
6135 { "no" #name, .lflags_on = (flags) }, \
6136 { #name, .lflags_off = (flags) }
6137
6138#define force_horkage_on(name, flag) \
6139 { #name, .horkage_on = (flag) }
6140
6141#define force_horkage_onoff(name, flag) \
6142 { "no" #name, .horkage_on = (flag) }, \
6143 { #name, .horkage_off = (flag) }
6144
6145static const struct ata_force_param force_tbl[] __initconst = {
6146 force_cbl(40c, ATA_CBL_PATA40),
6147 force_cbl(80c, ATA_CBL_PATA80),
6148 force_cbl(short40c, ATA_CBL_PATA40_SHORT),
6149 force_cbl(unk, ATA_CBL_PATA_UNK),
6150 force_cbl(ign, ATA_CBL_PATA_IGN),
6151 force_cbl(sata, ATA_CBL_SATA),
6152
6153 force_spd_limit(1.5Gbps, 1),
6154 force_spd_limit(3.0Gbps, 2),
6155
6156 force_xfer(pio0, ATA_SHIFT_PIO + 0),
6157 force_xfer(pio1, ATA_SHIFT_PIO + 1),
6158 force_xfer(pio2, ATA_SHIFT_PIO + 2),
6159 force_xfer(pio3, ATA_SHIFT_PIO + 3),
6160 force_xfer(pio4, ATA_SHIFT_PIO + 4),
6161 force_xfer(pio5, ATA_SHIFT_PIO + 5),
6162 force_xfer(pio6, ATA_SHIFT_PIO + 6),
6163 force_xfer(mwdma0, ATA_SHIFT_MWDMA + 0),
6164 force_xfer(mwdma1, ATA_SHIFT_MWDMA + 1),
6165 force_xfer(mwdma2, ATA_SHIFT_MWDMA + 2),
6166 force_xfer(mwdma3, ATA_SHIFT_MWDMA + 3),
6167 force_xfer(mwdma4, ATA_SHIFT_MWDMA + 4),
6168 force_xfer(udma0, ATA_SHIFT_UDMA + 0),
6169 force_xfer(udma16, ATA_SHIFT_UDMA + 0),
6170 force_xfer(udma/16, ATA_SHIFT_UDMA + 0),
6171 force_xfer(udma1, ATA_SHIFT_UDMA + 1),
6172 force_xfer(udma25, ATA_SHIFT_UDMA + 1),
6173 force_xfer(udma/25, ATA_SHIFT_UDMA + 1),
6174 force_xfer(udma2, ATA_SHIFT_UDMA + 2),
6175 force_xfer(udma33, ATA_SHIFT_UDMA + 2),
6176 force_xfer(udma/33, ATA_SHIFT_UDMA + 2),
6177 force_xfer(udma3, ATA_SHIFT_UDMA + 3),
6178 force_xfer(udma44, ATA_SHIFT_UDMA + 3),
6179 force_xfer(udma/44, ATA_SHIFT_UDMA + 3),
6180 force_xfer(udma4, ATA_SHIFT_UDMA + 4),
6181 force_xfer(udma66, ATA_SHIFT_UDMA + 4),
6182 force_xfer(udma/66, ATA_SHIFT_UDMA + 4),
6183 force_xfer(udma5, ATA_SHIFT_UDMA + 5),
6184 force_xfer(udma100, ATA_SHIFT_UDMA + 5),
6185 force_xfer(udma/100, ATA_SHIFT_UDMA + 5),
6186 force_xfer(udma6, ATA_SHIFT_UDMA + 6),
6187 force_xfer(udma133, ATA_SHIFT_UDMA + 6),
6188 force_xfer(udma/133, ATA_SHIFT_UDMA + 6),
6189 force_xfer(udma7, ATA_SHIFT_UDMA + 7),
6190
6191 force_lflag_on(nohrst, ATA_LFLAG_NO_HRST),
6192 force_lflag_on(nosrst, ATA_LFLAG_NO_SRST),
6193 force_lflag_on(norst, ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
6194 force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
6195 force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
6196
6197 force_horkage_onoff(ncq, ATA_HORKAGE_NONCQ),
6198 force_horkage_onoff(ncqtrim, ATA_HORKAGE_NO_NCQ_TRIM),
6199 force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI),
6200
6201 force_horkage_onoff(trim, ATA_HORKAGE_NOTRIM),
6202 force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM),
6203 force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
6204
6205 force_horkage_onoff(dma, ATA_HORKAGE_NODMA),
6206 force_horkage_on(atapi_dmadir, ATA_HORKAGE_ATAPI_DMADIR),
6207 force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
6208
6209 force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG),
6210 force_horkage_onoff(iddevlog, ATA_HORKAGE_NO_ID_DEV_LOG),
6211 force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR),
6212
6213 force_horkage_on(max_sec_128, ATA_HORKAGE_MAX_SEC_128),
6214 force_horkage_on(max_sec_1024, ATA_HORKAGE_MAX_SEC_1024),
6215 force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48),
6216
6217 force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
6218 force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
6219 force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
6220
6221 force_horkage_on(disable, ATA_HORKAGE_DISABLE),
6222};
6223
6224static int __init ata_parse_force_one(char **cur,
6225 struct ata_force_ent *force_ent,
6226 const char **reason)
6227{
6228 char *start = *cur, *p = *cur;
6229 char *id, *val, *endp;
6230 const struct ata_force_param *match_fp = NULL;
6231 int nr_matches = 0, i;
6232
6233 /* find where this param ends and update *cur */
6234 while (*p != '\0' && *p != ',')
6235 p++;
6236
6237 if (*p == '\0')
6238 *cur = p;
6239 else
6240 *cur = p + 1;
6241
6242 *p = '\0';
6243
6244 /* parse */
6245 p = strchr(start, ':');
6246 if (!p) {
6247 val = strstrip(start);
6248 goto parse_val;
6249 }
6250 *p = '\0';
6251
6252 id = strstrip(start);
6253 val = strstrip(p + 1);
6254
6255 /* parse id */
6256 p = strchr(id, '.');
6257 if (p) {
6258 *p++ = '\0';
6259 force_ent->device = simple_strtoul(p, &endp, 10);
6260 if (p == endp || *endp != '\0') {
6261 *reason = "invalid device";
6262 return -EINVAL;
6263 }
6264 }
6265
6266 force_ent->port = simple_strtoul(id, &endp, 10);
6267 if (id == endp || *endp != '\0') {
6268 *reason = "invalid port/link";
6269 return -EINVAL;
6270 }
6271
6272 parse_val:
6273 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6274 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6275 const struct ata_force_param *fp = &force_tbl[i];
6276
6277 if (strncasecmp(val, fp->name, strlen(val)))
6278 continue;
6279
6280 nr_matches++;
6281 match_fp = fp;
6282
6283 if (strcasecmp(val, fp->name) == 0) {
6284 nr_matches = 1;
6285 break;
6286 }
6287 }
6288
6289 if (!nr_matches) {
6290 *reason = "unknown value";
6291 return -EINVAL;
6292 }
6293 if (nr_matches > 1) {
6294 *reason = "ambiguous value";
6295 return -EINVAL;
6296 }
6297
6298 force_ent->param = *match_fp;
6299
6300 return 0;
6301}
6302
6303static void __init ata_parse_force_param(void)
6304{
6305 int idx = 0, size = 1;
6306 int last_port = -1, last_device = -1;
6307 char *p, *cur, *next;
6308
6309 /* Calculate maximum number of params and allocate ata_force_tbl */
6310 for (p = ata_force_param_buf; *p; p++)
6311 if (*p == ',')
6312 size++;
6313
6314 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
6315 if (!ata_force_tbl) {
6316 printk(KERN_WARNING "ata: failed to extend force table, "
6317 "libata.force ignored\n");
6318 return;
6319 }
6320
6321 /* parse and populate the table */
6322 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6323 const char *reason = "";
6324 struct ata_force_ent te = { .port = -1, .device = -1 };
6325
6326 next = cur;
6327 if (ata_parse_force_one(&next, &te, &reason)) {
6328 printk(KERN_WARNING "ata: failed to parse force "
6329 "parameter \"%s\" (%s)\n",
6330 cur, reason);
6331 continue;
6332 }
6333
6334 if (te.port == -1) {
6335 te.port = last_port;
6336 te.device = last_device;
6337 }
6338
6339 ata_force_tbl[idx++] = te;
6340
6341 last_port = te.port;
6342 last_device = te.device;
6343 }
6344
6345 ata_force_tbl_size = idx;
6346}
6347
6348static void ata_free_force_param(void)
6349{
6350 kfree(ata_force_tbl);
6351}
6352#else
6353static inline void ata_parse_force_param(void) { }
6354static inline void ata_free_force_param(void) { }
6355#endif
6356
6357static int __init ata_init(void)
6358{
6359 int rc;
6360
6361 ata_parse_force_param();
6362
6363 rc = ata_sff_init();
6364 if (rc) {
6365 ata_free_force_param();
6366 return rc;
6367 }
6368
6369 libata_transport_init();
6370 ata_scsi_transport_template = ata_attach_transport();
6371 if (!ata_scsi_transport_template) {
6372 ata_sff_exit();
6373 rc = -ENOMEM;
6374 goto err_out;
6375 }
6376
6377 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6378 return 0;
6379
6380err_out:
6381 return rc;
6382}
6383
6384static void __exit ata_exit(void)
6385{
6386 ata_release_transport(ata_scsi_transport_template);
6387 libata_transport_exit();
6388 ata_sff_exit();
6389 ata_free_force_param();
6390}
6391
6392subsys_initcall(ata_init);
6393module_exit(ata_exit);
6394
6395static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6396
6397int ata_ratelimit(void)
6398{
6399 return __ratelimit(&ratelimit);
6400}
6401EXPORT_SYMBOL_GPL(ata_ratelimit);
6402
6403/**
6404 * ata_msleep - ATA EH owner aware msleep
6405 * @ap: ATA port to attribute the sleep to
6406 * @msecs: duration to sleep in milliseconds
6407 *
6408 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6409 * ownership is released before going to sleep and reacquired
6410 * after the sleep is complete. IOW, other ports sharing the
6411 * @ap->host will be allowed to own the EH while this task is
6412 * sleeping.
6413 *
6414 * LOCKING:
6415 * Might sleep.
6416 */
6417void ata_msleep(struct ata_port *ap, unsigned int msecs)
6418{
6419 bool owns_eh = ap && ap->host->eh_owner == current;
6420
6421 if (owns_eh)
6422 ata_eh_release(ap);
6423
6424 if (msecs < 20) {
6425 unsigned long usecs = msecs * USEC_PER_MSEC;
6426 usleep_range(usecs, usecs + 50);
6427 } else {
6428 msleep(msecs);
6429 }
6430
6431 if (owns_eh)
6432 ata_eh_acquire(ap);
6433}
6434EXPORT_SYMBOL_GPL(ata_msleep);
6435
6436/**
6437 * ata_wait_register - wait until register value changes
6438 * @ap: ATA port to wait register for, can be NULL
6439 * @reg: IO-mapped register
6440 * @mask: Mask to apply to read register value
6441 * @val: Wait condition
6442 * @interval: polling interval in milliseconds
6443 * @timeout: timeout in milliseconds
6444 *
6445 * Waiting for some bits of register to change is a common
6446 * operation for ATA controllers. This function reads 32bit LE
6447 * IO-mapped register @reg and tests for the following condition.
6448 *
6449 * (*@reg & mask) != val
6450 *
6451 * If the condition is met, it returns; otherwise, the process is
6452 * repeated after @interval_msec until timeout.
6453 *
6454 * LOCKING:
6455 * Kernel thread context (may sleep)
6456 *
6457 * RETURNS:
6458 * The final register value.
6459 */
6460u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6461 unsigned long interval, unsigned long timeout)
6462{
6463 unsigned long deadline;
6464 u32 tmp;
6465
6466 tmp = ioread32(reg);
6467
6468 /* Calculate timeout _after_ the first read to make sure
6469 * preceding writes reach the controller before starting to
6470 * eat away the timeout.
6471 */
6472 deadline = ata_deadline(jiffies, timeout);
6473
6474 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6475 ata_msleep(ap, interval);
6476 tmp = ioread32(reg);
6477 }
6478
6479 return tmp;
6480}
6481EXPORT_SYMBOL_GPL(ata_wait_register);
6482
6483/*
6484 * Dummy port_ops
6485 */
6486static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6487{
6488 return AC_ERR_SYSTEM;
6489}
6490
6491static void ata_dummy_error_handler(struct ata_port *ap)
6492{
6493 /* truly dummy */
6494}
6495
6496struct ata_port_operations ata_dummy_port_ops = {
6497 .qc_prep = ata_noop_qc_prep,
6498 .qc_issue = ata_dummy_qc_issue,
6499 .error_handler = ata_dummy_error_handler,
6500 .sched_eh = ata_std_sched_eh,
6501 .end_eh = ata_std_end_eh,
6502};
6503EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6504
6505const struct ata_port_info ata_dummy_port_info = {
6506 .port_ops = &ata_dummy_port_ops,
6507};
6508EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6509
6510void ata_print_version(const struct device *dev, const char *version)
6511{
6512 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6513}
6514EXPORT_SYMBOL(ata_print_version);
6515
6516EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
6517EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
6518EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
6519EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
6520EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);