Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  libata-core.c - helper library for ATA
   4 *
   5 *  Maintained by:  Tejun Heo <tj@kernel.org>
   6 *    		    Please ALWAYS copy linux-ide@vger.kernel.org
   7 *		    on emails.
   8 *
   9 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
  10 *  Copyright 2003-2004 Jeff Garzik
  11 *
  12 *  libata documentation is available via 'make {ps|pdf}docs',
  13 *  as Documentation/driver-api/libata.rst
  14 *
  15 *  Hardware documentation available from http://www.t13.org/ and
  16 *  http://www.sata-io.org/
  17 *
  18 *  Standards documents from:
  19 *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
  20 *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
  21 *	http://www.sata-io.org (SATA)
  22 *	http://www.compactflash.org (CF)
  23 *	http://www.qic.org (QIC157 - Tape and DSC)
  24 *	http://www.ce-ata.org (CE-ATA: not supported)
 
 
 
 
 
  25 */
  26
  27#include <linux/kernel.h>
  28#include <linux/module.h>
  29#include <linux/pci.h>
  30#include <linux/init.h>
  31#include <linux/list.h>
  32#include <linux/mm.h>
  33#include <linux/spinlock.h>
  34#include <linux/blkdev.h>
  35#include <linux/delay.h>
  36#include <linux/timer.h>
  37#include <linux/time.h>
  38#include <linux/interrupt.h>
  39#include <linux/completion.h>
  40#include <linux/suspend.h>
  41#include <linux/workqueue.h>
  42#include <linux/scatterlist.h>
  43#include <linux/io.h>
  44#include <linux/async.h>
  45#include <linux/log2.h>
  46#include <linux/slab.h>
  47#include <linux/glob.h>
  48#include <scsi/scsi.h>
  49#include <scsi/scsi_cmnd.h>
  50#include <scsi/scsi_host.h>
  51#include <linux/libata.h>
  52#include <asm/byteorder.h>
  53#include <asm/unaligned.h>
  54#include <linux/cdrom.h>
  55#include <linux/ratelimit.h>
  56#include <linux/leds.h>
  57#include <linux/pm_runtime.h>
  58#include <linux/platform_device.h>
 
  59
  60#define CREATE_TRACE_POINTS
  61#include <trace/events/libata.h>
  62
  63#include "libata.h"
  64#include "libata-transport.h"
  65
  66/* debounce timing parameters in msecs { interval, duration, timeout } */
  67const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
  68const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
  69const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
  70
  71const struct ata_port_operations ata_base_port_ops = {
  72	.prereset		= ata_std_prereset,
  73	.postreset		= ata_std_postreset,
  74	.error_handler		= ata_std_error_handler,
  75	.sched_eh		= ata_std_sched_eh,
  76	.end_eh			= ata_std_end_eh,
  77};
  78
  79const struct ata_port_operations sata_port_ops = {
  80	.inherits		= &ata_base_port_ops,
  81
  82	.qc_defer		= ata_std_qc_defer,
  83	.hardreset		= sata_std_hardreset,
  84};
 
  85
  86static unsigned int ata_dev_init_params(struct ata_device *dev,
  87					u16 heads, u16 sectors);
  88static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
  89static void ata_dev_xfermask(struct ata_device *dev);
  90static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
  91
  92atomic_t ata_print_id = ATOMIC_INIT(0);
  93
 
  94struct ata_force_param {
  95	const char	*name;
  96	unsigned int	cbl;
  97	int		spd_limit;
  98	unsigned long	xfer_mask;
  99	unsigned int	horkage_on;
 100	unsigned int	horkage_off;
 101	unsigned int	lflags;
 102};
 103
 104struct ata_force_ent {
 105	int			port;
 106	int			device;
 107	struct ata_force_param	param;
 108};
 109
 110static struct ata_force_ent *ata_force_tbl;
 111static int ata_force_tbl_size;
 112
 113static char ata_force_param_buf[PAGE_SIZE] __initdata;
 114/* param_buf is thrown away after initialization, disallow read */
 115module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
 116MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
 
 117
 118static int atapi_enabled = 1;
 119module_param(atapi_enabled, int, 0444);
 120MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
 121
 122static int atapi_dmadir = 0;
 123module_param(atapi_dmadir, int, 0444);
 124MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
 125
 126int atapi_passthru16 = 1;
 127module_param(atapi_passthru16, int, 0444);
 128MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
 129
 130int libata_fua = 0;
 131module_param_named(fua, libata_fua, int, 0444);
 132MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
 133
 134static int ata_ignore_hpa;
 135module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
 136MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
 137
 138static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
 139module_param_named(dma, libata_dma_mask, int, 0444);
 140MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
 141
 142static int ata_probe_timeout;
 143module_param(ata_probe_timeout, int, 0444);
 144MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
 145
 146int libata_noacpi = 0;
 147module_param_named(noacpi, libata_noacpi, int, 0444);
 148MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
 149
 150int libata_allow_tpm = 0;
 151module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
 152MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
 153
 154static int atapi_an;
 155module_param(atapi_an, int, 0444);
 156MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
 157
 158MODULE_AUTHOR("Jeff Garzik");
 159MODULE_DESCRIPTION("Library module for ATA devices");
 160MODULE_LICENSE("GPL");
 161MODULE_VERSION(DRV_VERSION);
 162
 163
 164static bool ata_sstatus_online(u32 sstatus)
 165{
 166	return (sstatus & 0xf) == 0x3;
 167}
 168
 169/**
 170 *	ata_link_next - link iteration helper
 171 *	@link: the previous link, NULL to start
 172 *	@ap: ATA port containing links to iterate
 173 *	@mode: iteration mode, one of ATA_LITER_*
 174 *
 175 *	LOCKING:
 176 *	Host lock or EH context.
 177 *
 178 *	RETURNS:
 179 *	Pointer to the next link.
 180 */
 181struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
 182			       enum ata_link_iter_mode mode)
 183{
 184	BUG_ON(mode != ATA_LITER_EDGE &&
 185	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
 186
 187	/* NULL link indicates start of iteration */
 188	if (!link)
 189		switch (mode) {
 190		case ATA_LITER_EDGE:
 191		case ATA_LITER_PMP_FIRST:
 192			if (sata_pmp_attached(ap))
 193				return ap->pmp_link;
 194			/* fall through */
 195		case ATA_LITER_HOST_FIRST:
 196			return &ap->link;
 197		}
 198
 199	/* we just iterated over the host link, what's next? */
 200	if (link == &ap->link)
 201		switch (mode) {
 202		case ATA_LITER_HOST_FIRST:
 203			if (sata_pmp_attached(ap))
 204				return ap->pmp_link;
 205			/* fall through */
 206		case ATA_LITER_PMP_FIRST:
 207			if (unlikely(ap->slave_link))
 208				return ap->slave_link;
 209			/* fall through */
 210		case ATA_LITER_EDGE:
 211			return NULL;
 212		}
 213
 214	/* slave_link excludes PMP */
 215	if (unlikely(link == ap->slave_link))
 216		return NULL;
 217
 218	/* we were over a PMP link */
 219	if (++link < ap->pmp_link + ap->nr_pmp_links)
 220		return link;
 221
 222	if (mode == ATA_LITER_PMP_FIRST)
 223		return &ap->link;
 224
 225	return NULL;
 226}
 
 227
 228/**
 229 *	ata_dev_next - device iteration helper
 230 *	@dev: the previous device, NULL to start
 231 *	@link: ATA link containing devices to iterate
 232 *	@mode: iteration mode, one of ATA_DITER_*
 233 *
 234 *	LOCKING:
 235 *	Host lock or EH context.
 236 *
 237 *	RETURNS:
 238 *	Pointer to the next device.
 239 */
 240struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
 241				enum ata_dev_iter_mode mode)
 242{
 243	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
 244	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
 245
 246	/* NULL dev indicates start of iteration */
 247	if (!dev)
 248		switch (mode) {
 249		case ATA_DITER_ENABLED:
 250		case ATA_DITER_ALL:
 251			dev = link->device;
 252			goto check;
 253		case ATA_DITER_ENABLED_REVERSE:
 254		case ATA_DITER_ALL_REVERSE:
 255			dev = link->device + ata_link_max_devices(link) - 1;
 256			goto check;
 257		}
 258
 259 next:
 260	/* move to the next one */
 261	switch (mode) {
 262	case ATA_DITER_ENABLED:
 263	case ATA_DITER_ALL:
 264		if (++dev < link->device + ata_link_max_devices(link))
 265			goto check;
 266		return NULL;
 267	case ATA_DITER_ENABLED_REVERSE:
 268	case ATA_DITER_ALL_REVERSE:
 269		if (--dev >= link->device)
 270			goto check;
 271		return NULL;
 272	}
 273
 274 check:
 275	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
 276	    !ata_dev_enabled(dev))
 277		goto next;
 278	return dev;
 279}
 
 280
 281/**
 282 *	ata_dev_phys_link - find physical link for a device
 283 *	@dev: ATA device to look up physical link for
 284 *
 285 *	Look up physical link which @dev is attached to.  Note that
 286 *	this is different from @dev->link only when @dev is on slave
 287 *	link.  For all other cases, it's the same as @dev->link.
 288 *
 289 *	LOCKING:
 290 *	Don't care.
 291 *
 292 *	RETURNS:
 293 *	Pointer to the found physical link.
 294 */
 295struct ata_link *ata_dev_phys_link(struct ata_device *dev)
 296{
 297	struct ata_port *ap = dev->link->ap;
 298
 299	if (!ap->slave_link)
 300		return dev->link;
 301	if (!dev->devno)
 302		return &ap->link;
 303	return ap->slave_link;
 304}
 305
 
 306/**
 307 *	ata_force_cbl - force cable type according to libata.force
 308 *	@ap: ATA port of interest
 309 *
 310 *	Force cable type according to libata.force and whine about it.
 311 *	The last entry which has matching port number is used, so it
 312 *	can be specified as part of device force parameters.  For
 313 *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
 314 *	same effect.
 315 *
 316 *	LOCKING:
 317 *	EH context.
 318 */
 319void ata_force_cbl(struct ata_port *ap)
 320{
 321	int i;
 322
 323	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 324		const struct ata_force_ent *fe = &ata_force_tbl[i];
 325
 326		if (fe->port != -1 && fe->port != ap->print_id)
 327			continue;
 328
 329		if (fe->param.cbl == ATA_CBL_NONE)
 330			continue;
 331
 332		ap->cbl = fe->param.cbl;
 333		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
 334		return;
 335	}
 336}
 337
 338/**
 339 *	ata_force_link_limits - force link limits according to libata.force
 340 *	@link: ATA link of interest
 341 *
 342 *	Force link flags and SATA spd limit according to libata.force
 343 *	and whine about it.  When only the port part is specified
 344 *	(e.g. 1:), the limit applies to all links connected to both
 345 *	the host link and all fan-out ports connected via PMP.  If the
 346 *	device part is specified as 0 (e.g. 1.00:), it specifies the
 347 *	first fan-out link not the host link.  Device number 15 always
 348 *	points to the host link whether PMP is attached or not.  If the
 349 *	controller has slave link, device number 16 points to it.
 350 *
 351 *	LOCKING:
 352 *	EH context.
 353 */
 354static void ata_force_link_limits(struct ata_link *link)
 355{
 356	bool did_spd = false;
 357	int linkno = link->pmp;
 358	int i;
 359
 360	if (ata_is_host_link(link))
 361		linkno += 15;
 362
 363	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 364		const struct ata_force_ent *fe = &ata_force_tbl[i];
 365
 366		if (fe->port != -1 && fe->port != link->ap->print_id)
 367			continue;
 368
 369		if (fe->device != -1 && fe->device != linkno)
 370			continue;
 371
 372		/* only honor the first spd limit */
 373		if (!did_spd && fe->param.spd_limit) {
 374			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
 375			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
 376					fe->param.name);
 377			did_spd = true;
 378		}
 379
 380		/* let lflags stack */
 381		if (fe->param.lflags) {
 382			link->flags |= fe->param.lflags;
 383			ata_link_notice(link,
 384					"FORCE: link flag 0x%x forced -> 0x%x\n",
 385					fe->param.lflags, link->flags);
 386		}
 387	}
 388}
 389
 390/**
 391 *	ata_force_xfermask - force xfermask according to libata.force
 392 *	@dev: ATA device of interest
 393 *
 394 *	Force xfer_mask according to libata.force and whine about it.
 395 *	For consistency with link selection, device number 15 selects
 396 *	the first device connected to the host link.
 397 *
 398 *	LOCKING:
 399 *	EH context.
 400 */
 401static void ata_force_xfermask(struct ata_device *dev)
 402{
 403	int devno = dev->link->pmp + dev->devno;
 404	int alt_devno = devno;
 405	int i;
 406
 407	/* allow n.15/16 for devices attached to host port */
 408	if (ata_is_host_link(dev->link))
 409		alt_devno += 15;
 410
 411	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 412		const struct ata_force_ent *fe = &ata_force_tbl[i];
 413		unsigned long pio_mask, mwdma_mask, udma_mask;
 414
 415		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
 416			continue;
 417
 418		if (fe->device != -1 && fe->device != devno &&
 419		    fe->device != alt_devno)
 420			continue;
 421
 422		if (!fe->param.xfer_mask)
 423			continue;
 424
 425		ata_unpack_xfermask(fe->param.xfer_mask,
 426				    &pio_mask, &mwdma_mask, &udma_mask);
 427		if (udma_mask)
 428			dev->udma_mask = udma_mask;
 429		else if (mwdma_mask) {
 430			dev->udma_mask = 0;
 431			dev->mwdma_mask = mwdma_mask;
 432		} else {
 433			dev->udma_mask = 0;
 434			dev->mwdma_mask = 0;
 435			dev->pio_mask = pio_mask;
 436		}
 437
 438		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
 439			       fe->param.name);
 440		return;
 441	}
 442}
 443
 444/**
 445 *	ata_force_horkage - force horkage according to libata.force
 446 *	@dev: ATA device of interest
 447 *
 448 *	Force horkage according to libata.force and whine about it.
 449 *	For consistency with link selection, device number 15 selects
 450 *	the first device connected to the host link.
 451 *
 452 *	LOCKING:
 453 *	EH context.
 454 */
 455static void ata_force_horkage(struct ata_device *dev)
 456{
 457	int devno = dev->link->pmp + dev->devno;
 458	int alt_devno = devno;
 459	int i;
 460
 461	/* allow n.15/16 for devices attached to host port */
 462	if (ata_is_host_link(dev->link))
 463		alt_devno += 15;
 464
 465	for (i = 0; i < ata_force_tbl_size; i++) {
 466		const struct ata_force_ent *fe = &ata_force_tbl[i];
 467
 468		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
 469			continue;
 470
 471		if (fe->device != -1 && fe->device != devno &&
 472		    fe->device != alt_devno)
 473			continue;
 474
 475		if (!(~dev->horkage & fe->param.horkage_on) &&
 476		    !(dev->horkage & fe->param.horkage_off))
 477			continue;
 478
 479		dev->horkage |= fe->param.horkage_on;
 480		dev->horkage &= ~fe->param.horkage_off;
 481
 482		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
 483			       fe->param.name);
 484	}
 485}
 
 
 
 
 
 486
 487/**
 488 *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
 489 *	@opcode: SCSI opcode
 490 *
 491 *	Determine ATAPI command type from @opcode.
 492 *
 493 *	LOCKING:
 494 *	None.
 495 *
 496 *	RETURNS:
 497 *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
 498 */
 499int atapi_cmd_type(u8 opcode)
 500{
 501	switch (opcode) {
 502	case GPCMD_READ_10:
 503	case GPCMD_READ_12:
 504		return ATAPI_READ;
 505
 506	case GPCMD_WRITE_10:
 507	case GPCMD_WRITE_12:
 508	case GPCMD_WRITE_AND_VERIFY_10:
 509		return ATAPI_WRITE;
 510
 511	case GPCMD_READ_CD:
 512	case GPCMD_READ_CD_MSF:
 513		return ATAPI_READ_CD;
 514
 515	case ATA_16:
 516	case ATA_12:
 517		if (atapi_passthru16)
 518			return ATAPI_PASS_THRU;
 519		/* fall thru */
 520	default:
 521		return ATAPI_MISC;
 522	}
 523}
 524
 525/**
 526 *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
 527 *	@tf: Taskfile to convert
 528 *	@pmp: Port multiplier port
 529 *	@is_cmd: This FIS is for command
 530 *	@fis: Buffer into which data will output
 531 *
 532 *	Converts a standard ATA taskfile to a Serial ATA
 533 *	FIS structure (Register - Host to Device).
 534 *
 535 *	LOCKING:
 536 *	Inherited from caller.
 537 */
 538void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
 539{
 540	fis[0] = 0x27;			/* Register - Host to Device FIS */
 541	fis[1] = pmp & 0xf;		/* Port multiplier number*/
 542	if (is_cmd)
 543		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
 544
 545	fis[2] = tf->command;
 546	fis[3] = tf->feature;
 547
 548	fis[4] = tf->lbal;
 549	fis[5] = tf->lbam;
 550	fis[6] = tf->lbah;
 551	fis[7] = tf->device;
 552
 553	fis[8] = tf->hob_lbal;
 554	fis[9] = tf->hob_lbam;
 555	fis[10] = tf->hob_lbah;
 556	fis[11] = tf->hob_feature;
 557
 558	fis[12] = tf->nsect;
 559	fis[13] = tf->hob_nsect;
 560	fis[14] = 0;
 561	fis[15] = tf->ctl;
 562
 563	fis[16] = tf->auxiliary & 0xff;
 564	fis[17] = (tf->auxiliary >> 8) & 0xff;
 565	fis[18] = (tf->auxiliary >> 16) & 0xff;
 566	fis[19] = (tf->auxiliary >> 24) & 0xff;
 567}
 568
 569/**
 570 *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
 571 *	@fis: Buffer from which data will be input
 572 *	@tf: Taskfile to output
 573 *
 574 *	Converts a serial ATA FIS structure to a standard ATA taskfile.
 575 *
 576 *	LOCKING:
 577 *	Inherited from caller.
 578 */
 579
 580void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
 581{
 582	tf->command	= fis[2];	/* status */
 583	tf->feature	= fis[3];	/* error */
 584
 585	tf->lbal	= fis[4];
 586	tf->lbam	= fis[5];
 587	tf->lbah	= fis[6];
 588	tf->device	= fis[7];
 589
 590	tf->hob_lbal	= fis[8];
 591	tf->hob_lbam	= fis[9];
 592	tf->hob_lbah	= fis[10];
 593
 594	tf->nsect	= fis[12];
 595	tf->hob_nsect	= fis[13];
 596}
 597
 598static const u8 ata_rw_cmds[] = {
 599	/* pio multi */
 600	ATA_CMD_READ_MULTI,
 601	ATA_CMD_WRITE_MULTI,
 602	ATA_CMD_READ_MULTI_EXT,
 603	ATA_CMD_WRITE_MULTI_EXT,
 604	0,
 605	0,
 606	0,
 607	ATA_CMD_WRITE_MULTI_FUA_EXT,
 608	/* pio */
 609	ATA_CMD_PIO_READ,
 610	ATA_CMD_PIO_WRITE,
 611	ATA_CMD_PIO_READ_EXT,
 612	ATA_CMD_PIO_WRITE_EXT,
 613	0,
 614	0,
 615	0,
 616	0,
 617	/* dma */
 618	ATA_CMD_READ,
 619	ATA_CMD_WRITE,
 620	ATA_CMD_READ_EXT,
 621	ATA_CMD_WRITE_EXT,
 622	0,
 623	0,
 624	0,
 625	ATA_CMD_WRITE_FUA_EXT
 626};
 627
 628/**
 629 *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
 630 *	@tf: command to examine and configure
 631 *	@dev: device tf belongs to
 632 *
 633 *	Examine the device configuration and tf->flags to calculate
 634 *	the proper read/write commands and protocol to use.
 635 *
 636 *	LOCKING:
 637 *	caller.
 638 */
 639static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
 640{
 641	u8 cmd;
 642
 643	int index, fua, lba48, write;
 644
 645	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
 646	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
 647	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
 648
 649	if (dev->flags & ATA_DFLAG_PIO) {
 650		tf->protocol = ATA_PROT_PIO;
 651		index = dev->multi_count ? 0 : 8;
 652	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
 653		/* Unable to use DMA due to host limitation */
 654		tf->protocol = ATA_PROT_PIO;
 655		index = dev->multi_count ? 0 : 8;
 656	} else {
 657		tf->protocol = ATA_PROT_DMA;
 658		index = 16;
 659	}
 660
 661	cmd = ata_rw_cmds[index + fua + lba48 + write];
 662	if (cmd) {
 663		tf->command = cmd;
 664		return 0;
 665	}
 666	return -1;
 667}
 668
 669/**
 670 *	ata_tf_read_block - Read block address from ATA taskfile
 671 *	@tf: ATA taskfile of interest
 672 *	@dev: ATA device @tf belongs to
 673 *
 674 *	LOCKING:
 675 *	None.
 676 *
 677 *	Read block address from @tf.  This function can handle all
 678 *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
 679 *	flags select the address format to use.
 680 *
 681 *	RETURNS:
 682 *	Block address read from @tf.
 683 */
 684u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
 685{
 686	u64 block = 0;
 687
 688	if (tf->flags & ATA_TFLAG_LBA) {
 689		if (tf->flags & ATA_TFLAG_LBA48) {
 690			block |= (u64)tf->hob_lbah << 40;
 691			block |= (u64)tf->hob_lbam << 32;
 692			block |= (u64)tf->hob_lbal << 24;
 693		} else
 694			block |= (tf->device & 0xf) << 24;
 695
 696		block |= tf->lbah << 16;
 697		block |= tf->lbam << 8;
 698		block |= tf->lbal;
 699	} else {
 700		u32 cyl, head, sect;
 701
 702		cyl = tf->lbam | (tf->lbah << 8);
 703		head = tf->device & 0xf;
 704		sect = tf->lbal;
 705
 706		if (!sect) {
 707			ata_dev_warn(dev,
 708				     "device reported invalid CHS sector 0\n");
 709			return U64_MAX;
 710		}
 711
 712		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
 713	}
 714
 715	return block;
 716}
 717
 718/**
 719 *	ata_build_rw_tf - Build ATA taskfile for given read/write request
 720 *	@tf: Target ATA taskfile
 721 *	@dev: ATA device @tf belongs to
 722 *	@block: Block address
 723 *	@n_block: Number of blocks
 724 *	@tf_flags: RW/FUA etc...
 725 *	@tag: tag
 726 *	@class: IO priority class
 727 *
 728 *	LOCKING:
 729 *	None.
 730 *
 731 *	Build ATA taskfile @tf for read/write request described by
 732 *	@block, @n_block, @tf_flags and @tag on @dev.
 733 *
 734 *	RETURNS:
 735 *
 736 *	0 on success, -ERANGE if the request is too large for @dev,
 737 *	-EINVAL if the request is invalid.
 738 */
 739int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
 740		    u64 block, u32 n_block, unsigned int tf_flags,
 741		    unsigned int tag, int class)
 742{
 743	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 744	tf->flags |= tf_flags;
 745
 746	if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
 747		/* yay, NCQ */
 748		if (!lba_48_ok(block, n_block))
 749			return -ERANGE;
 750
 751		tf->protocol = ATA_PROT_NCQ;
 752		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
 753
 754		if (tf->flags & ATA_TFLAG_WRITE)
 755			tf->command = ATA_CMD_FPDMA_WRITE;
 756		else
 757			tf->command = ATA_CMD_FPDMA_READ;
 758
 759		tf->nsect = tag << 3;
 760		tf->hob_feature = (n_block >> 8) & 0xff;
 761		tf->feature = n_block & 0xff;
 762
 763		tf->hob_lbah = (block >> 40) & 0xff;
 764		tf->hob_lbam = (block >> 32) & 0xff;
 765		tf->hob_lbal = (block >> 24) & 0xff;
 766		tf->lbah = (block >> 16) & 0xff;
 767		tf->lbam = (block >> 8) & 0xff;
 768		tf->lbal = block & 0xff;
 769
 770		tf->device = ATA_LBA;
 771		if (tf->flags & ATA_TFLAG_FUA)
 772			tf->device |= 1 << 7;
 773
 774		if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
 775			if (class == IOPRIO_CLASS_RT)
 776				tf->hob_nsect |= ATA_PRIO_HIGH <<
 777						 ATA_SHIFT_PRIO;
 778		}
 779	} else if (dev->flags & ATA_DFLAG_LBA) {
 780		tf->flags |= ATA_TFLAG_LBA;
 781
 782		if (lba_28_ok(block, n_block)) {
 783			/* use LBA28 */
 784			tf->device |= (block >> 24) & 0xf;
 785		} else if (lba_48_ok(block, n_block)) {
 786			if (!(dev->flags & ATA_DFLAG_LBA48))
 787				return -ERANGE;
 788
 789			/* use LBA48 */
 790			tf->flags |= ATA_TFLAG_LBA48;
 791
 792			tf->hob_nsect = (n_block >> 8) & 0xff;
 793
 794			tf->hob_lbah = (block >> 40) & 0xff;
 795			tf->hob_lbam = (block >> 32) & 0xff;
 796			tf->hob_lbal = (block >> 24) & 0xff;
 797		} else
 798			/* request too large even for LBA48 */
 799			return -ERANGE;
 800
 801		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
 802			return -EINVAL;
 803
 804		tf->nsect = n_block & 0xff;
 805
 806		tf->lbah = (block >> 16) & 0xff;
 807		tf->lbam = (block >> 8) & 0xff;
 808		tf->lbal = block & 0xff;
 809
 810		tf->device |= ATA_LBA;
 811	} else {
 812		/* CHS */
 813		u32 sect, head, cyl, track;
 814
 815		/* The request -may- be too large for CHS addressing. */
 816		if (!lba_28_ok(block, n_block))
 817			return -ERANGE;
 818
 819		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
 820			return -EINVAL;
 821
 822		/* Convert LBA to CHS */
 823		track = (u32)block / dev->sectors;
 824		cyl   = track / dev->heads;
 825		head  = track % dev->heads;
 826		sect  = (u32)block % dev->sectors + 1;
 827
 828		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
 829			(u32)block, track, cyl, head, sect);
 830
 831		/* Check whether the converted CHS can fit.
 832		   Cylinder: 0-65535
 833		   Head: 0-15
 834		   Sector: 1-255*/
 835		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
 836			return -ERANGE;
 837
 838		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
 839		tf->lbal = sect;
 840		tf->lbam = cyl;
 841		tf->lbah = cyl >> 8;
 842		tf->device |= head;
 843	}
 844
 845	return 0;
 846}
 847
 848/**
 849 *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
 850 *	@pio_mask: pio_mask
 851 *	@mwdma_mask: mwdma_mask
 852 *	@udma_mask: udma_mask
 853 *
 854 *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
 855 *	unsigned int xfer_mask.
 856 *
 857 *	LOCKING:
 858 *	None.
 859 *
 860 *	RETURNS:
 861 *	Packed xfer_mask.
 862 */
 863unsigned long ata_pack_xfermask(unsigned long pio_mask,
 864				unsigned long mwdma_mask,
 865				unsigned long udma_mask)
 866{
 867	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
 868		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
 869		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
 870}
 
 871
 872/**
 873 *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
 874 *	@xfer_mask: xfer_mask to unpack
 875 *	@pio_mask: resulting pio_mask
 876 *	@mwdma_mask: resulting mwdma_mask
 877 *	@udma_mask: resulting udma_mask
 878 *
 879 *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
 880 *	Any NULL destination masks will be ignored.
 881 */
 882void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
 883			 unsigned long *mwdma_mask, unsigned long *udma_mask)
 884{
 885	if (pio_mask)
 886		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
 887	if (mwdma_mask)
 888		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
 889	if (udma_mask)
 890		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
 891}
 892
 893static const struct ata_xfer_ent {
 894	int shift, bits;
 895	u8 base;
 896} ata_xfer_tbl[] = {
 897	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
 898	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
 899	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
 900	{ -1, },
 901};
 902
 903/**
 904 *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
 905 *	@xfer_mask: xfer_mask of interest
 906 *
 907 *	Return matching XFER_* value for @xfer_mask.  Only the highest
 908 *	bit of @xfer_mask is considered.
 909 *
 910 *	LOCKING:
 911 *	None.
 912 *
 913 *	RETURNS:
 914 *	Matching XFER_* value, 0xff if no match found.
 915 */
 916u8 ata_xfer_mask2mode(unsigned long xfer_mask)
 917{
 918	int highbit = fls(xfer_mask) - 1;
 919	const struct ata_xfer_ent *ent;
 920
 921	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 922		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
 923			return ent->base + highbit - ent->shift;
 924	return 0xff;
 925}
 
 926
 927/**
 928 *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
 929 *	@xfer_mode: XFER_* of interest
 930 *
 931 *	Return matching xfer_mask for @xfer_mode.
 932 *
 933 *	LOCKING:
 934 *	None.
 935 *
 936 *	RETURNS:
 937 *	Matching xfer_mask, 0 if no match found.
 938 */
 939unsigned long ata_xfer_mode2mask(u8 xfer_mode)
 940{
 941	const struct ata_xfer_ent *ent;
 942
 943	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 944		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
 945			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
 946				& ~((1 << ent->shift) - 1);
 947	return 0;
 948}
 
 949
 950/**
 951 *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
 952 *	@xfer_mode: XFER_* of interest
 953 *
 954 *	Return matching xfer_shift for @xfer_mode.
 955 *
 956 *	LOCKING:
 957 *	None.
 958 *
 959 *	RETURNS:
 960 *	Matching xfer_shift, -1 if no match found.
 961 */
 962int ata_xfer_mode2shift(unsigned long xfer_mode)
 963{
 964	const struct ata_xfer_ent *ent;
 965
 966	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 967		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
 968			return ent->shift;
 969	return -1;
 970}
 
 971
 972/**
 973 *	ata_mode_string - convert xfer_mask to string
 974 *	@xfer_mask: mask of bits supported; only highest bit counts.
 975 *
 976 *	Determine string which represents the highest speed
 977 *	(highest bit in @modemask).
 978 *
 979 *	LOCKING:
 980 *	None.
 981 *
 982 *	RETURNS:
 983 *	Constant C string representing highest speed listed in
 984 *	@mode_mask, or the constant C string "<n/a>".
 985 */
 986const char *ata_mode_string(unsigned long xfer_mask)
 987{
 988	static const char * const xfer_mode_str[] = {
 989		"PIO0",
 990		"PIO1",
 991		"PIO2",
 992		"PIO3",
 993		"PIO4",
 994		"PIO5",
 995		"PIO6",
 996		"MWDMA0",
 997		"MWDMA1",
 998		"MWDMA2",
 999		"MWDMA3",
1000		"MWDMA4",
1001		"UDMA/16",
1002		"UDMA/25",
1003		"UDMA/33",
1004		"UDMA/44",
1005		"UDMA/66",
1006		"UDMA/100",
1007		"UDMA/133",
1008		"UDMA7",
1009	};
1010	int highbit;
1011
1012	highbit = fls(xfer_mask) - 1;
1013	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1014		return xfer_mode_str[highbit];
1015	return "<n/a>";
1016}
 
1017
1018const char *sata_spd_string(unsigned int spd)
1019{
1020	static const char * const spd_str[] = {
1021		"1.5 Gbps",
1022		"3.0 Gbps",
1023		"6.0 Gbps",
1024	};
1025
1026	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1027		return "<unknown>";
1028	return spd_str[spd - 1];
1029}
1030
1031/**
1032 *	ata_dev_classify - determine device type based on ATA-spec signature
1033 *	@tf: ATA taskfile register set for device to be identified
1034 *
1035 *	Determine from taskfile register contents whether a device is
1036 *	ATA or ATAPI, as per "Signature and persistence" section
1037 *	of ATA/PI spec (volume 1, sect 5.14).
1038 *
1039 *	LOCKING:
1040 *	None.
1041 *
1042 *	RETURNS:
1043 *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1044 *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1045 */
1046unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1047{
1048	/* Apple's open source Darwin code hints that some devices only
1049	 * put a proper signature into the LBA mid/high registers,
1050	 * So, we only check those.  It's sufficient for uniqueness.
1051	 *
1052	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1053	 * signatures for ATA and ATAPI devices attached on SerialATA,
1054	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1055	 * spec has never mentioned about using different signatures
1056	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1057	 * Multiplier specification began to use 0x69/0x96 to identify
1058	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1059	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1060	 * 0x69/0x96 shortly and described them as reserved for
1061	 * SerialATA.
1062	 *
1063	 * We follow the current spec and consider that 0x69/0x96
1064	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1065	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1066	 * SEMB signature.  This is worked around in
1067	 * ata_dev_read_id().
1068	 */
1069	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1070		DPRINTK("found ATA device by sig\n");
1071		return ATA_DEV_ATA;
1072	}
1073
1074	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1075		DPRINTK("found ATAPI device by sig\n");
1076		return ATA_DEV_ATAPI;
1077	}
1078
1079	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1080		DPRINTK("found PMP device by sig\n");
1081		return ATA_DEV_PMP;
1082	}
1083
1084	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1085		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1086		return ATA_DEV_SEMB;
1087	}
1088
1089	if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1090		DPRINTK("found ZAC device by sig\n");
1091		return ATA_DEV_ZAC;
1092	}
1093
1094	DPRINTK("unknown device\n");
1095	return ATA_DEV_UNKNOWN;
1096}
 
1097
1098/**
1099 *	ata_id_string - Convert IDENTIFY DEVICE page into string
1100 *	@id: IDENTIFY DEVICE results we will examine
1101 *	@s: string into which data is output
1102 *	@ofs: offset into identify device page
1103 *	@len: length of string to return. must be an even number.
1104 *
1105 *	The strings in the IDENTIFY DEVICE page are broken up into
1106 *	16-bit chunks.  Run through the string, and output each
1107 *	8-bit chunk linearly, regardless of platform.
1108 *
1109 *	LOCKING:
1110 *	caller.
1111 */
1112
1113void ata_id_string(const u16 *id, unsigned char *s,
1114		   unsigned int ofs, unsigned int len)
1115{
1116	unsigned int c;
1117
1118	BUG_ON(len & 1);
1119
1120	while (len > 0) {
1121		c = id[ofs] >> 8;
1122		*s = c;
1123		s++;
1124
1125		c = id[ofs] & 0xff;
1126		*s = c;
1127		s++;
1128
1129		ofs++;
1130		len -= 2;
1131	}
1132}
 
1133
1134/**
1135 *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1136 *	@id: IDENTIFY DEVICE results we will examine
1137 *	@s: string into which data is output
1138 *	@ofs: offset into identify device page
1139 *	@len: length of string to return. must be an odd number.
1140 *
1141 *	This function is identical to ata_id_string except that it
1142 *	trims trailing spaces and terminates the resulting string with
1143 *	null.  @len must be actual maximum length (even number) + 1.
1144 *
1145 *	LOCKING:
1146 *	caller.
1147 */
1148void ata_id_c_string(const u16 *id, unsigned char *s,
1149		     unsigned int ofs, unsigned int len)
1150{
1151	unsigned char *p;
1152
1153	ata_id_string(id, s, ofs, len - 1);
1154
1155	p = s + strnlen(s, len - 1);
1156	while (p > s && p[-1] == ' ')
1157		p--;
1158	*p = '\0';
1159}
 
1160
1161static u64 ata_id_n_sectors(const u16 *id)
1162{
1163	if (ata_id_has_lba(id)) {
1164		if (ata_id_has_lba48(id))
1165			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1166		else
1167			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1168	} else {
1169		if (ata_id_current_chs_valid(id))
1170			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1171			       id[ATA_ID_CUR_SECTORS];
1172		else
1173			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1174			       id[ATA_ID_SECTORS];
1175	}
1176}
1177
1178u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1179{
1180	u64 sectors = 0;
1181
1182	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1183	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1184	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1185	sectors |= (tf->lbah & 0xff) << 16;
1186	sectors |= (tf->lbam & 0xff) << 8;
1187	sectors |= (tf->lbal & 0xff);
1188
1189	return sectors;
1190}
1191
1192u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1193{
1194	u64 sectors = 0;
1195
1196	sectors |= (tf->device & 0x0f) << 24;
1197	sectors |= (tf->lbah & 0xff) << 16;
1198	sectors |= (tf->lbam & 0xff) << 8;
1199	sectors |= (tf->lbal & 0xff);
1200
1201	return sectors;
1202}
1203
1204/**
1205 *	ata_read_native_max_address - Read native max address
1206 *	@dev: target device
1207 *	@max_sectors: out parameter for the result native max address
1208 *
1209 *	Perform an LBA48 or LBA28 native size query upon the device in
1210 *	question.
1211 *
1212 *	RETURNS:
1213 *	0 on success, -EACCES if command is aborted by the drive.
1214 *	-EIO on other errors.
1215 */
1216static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1217{
1218	unsigned int err_mask;
1219	struct ata_taskfile tf;
1220	int lba48 = ata_id_has_lba48(dev->id);
1221
1222	ata_tf_init(dev, &tf);
1223
1224	/* always clear all address registers */
1225	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1226
1227	if (lba48) {
1228		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1229		tf.flags |= ATA_TFLAG_LBA48;
1230	} else
1231		tf.command = ATA_CMD_READ_NATIVE_MAX;
1232
1233	tf.protocol = ATA_PROT_NODATA;
1234	tf.device |= ATA_LBA;
1235
1236	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1237	if (err_mask) {
1238		ata_dev_warn(dev,
1239			     "failed to read native max address (err_mask=0x%x)\n",
1240			     err_mask);
1241		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1242			return -EACCES;
1243		return -EIO;
1244	}
1245
1246	if (lba48)
1247		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1248	else
1249		*max_sectors = ata_tf_to_lba(&tf) + 1;
1250	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1251		(*max_sectors)--;
1252	return 0;
1253}
1254
1255/**
1256 *	ata_set_max_sectors - Set max sectors
1257 *	@dev: target device
1258 *	@new_sectors: new max sectors value to set for the device
1259 *
1260 *	Set max sectors of @dev to @new_sectors.
1261 *
1262 *	RETURNS:
1263 *	0 on success, -EACCES if command is aborted or denied (due to
1264 *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1265 *	errors.
1266 */
1267static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1268{
1269	unsigned int err_mask;
1270	struct ata_taskfile tf;
1271	int lba48 = ata_id_has_lba48(dev->id);
1272
1273	new_sectors--;
1274
1275	ata_tf_init(dev, &tf);
1276
1277	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1278
1279	if (lba48) {
1280		tf.command = ATA_CMD_SET_MAX_EXT;
1281		tf.flags |= ATA_TFLAG_LBA48;
1282
1283		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1284		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1285		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1286	} else {
1287		tf.command = ATA_CMD_SET_MAX;
1288
1289		tf.device |= (new_sectors >> 24) & 0xf;
1290	}
1291
1292	tf.protocol = ATA_PROT_NODATA;
1293	tf.device |= ATA_LBA;
1294
1295	tf.lbal = (new_sectors >> 0) & 0xff;
1296	tf.lbam = (new_sectors >> 8) & 0xff;
1297	tf.lbah = (new_sectors >> 16) & 0xff;
1298
1299	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1300	if (err_mask) {
1301		ata_dev_warn(dev,
1302			     "failed to set max address (err_mask=0x%x)\n",
1303			     err_mask);
1304		if (err_mask == AC_ERR_DEV &&
1305		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1306			return -EACCES;
1307		return -EIO;
1308	}
1309
1310	return 0;
1311}
1312
1313/**
1314 *	ata_hpa_resize		-	Resize a device with an HPA set
1315 *	@dev: Device to resize
1316 *
1317 *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1318 *	it if required to the full size of the media. The caller must check
1319 *	the drive has the HPA feature set enabled.
1320 *
1321 *	RETURNS:
1322 *	0 on success, -errno on failure.
1323 */
1324static int ata_hpa_resize(struct ata_device *dev)
1325{
1326	struct ata_eh_context *ehc = &dev->link->eh_context;
1327	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1328	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1329	u64 sectors = ata_id_n_sectors(dev->id);
1330	u64 native_sectors;
1331	int rc;
1332
1333	/* do we need to do it? */
1334	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1335	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1336	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1337		return 0;
1338
1339	/* read native max address */
1340	rc = ata_read_native_max_address(dev, &native_sectors);
1341	if (rc) {
1342		/* If device aborted the command or HPA isn't going to
1343		 * be unlocked, skip HPA resizing.
1344		 */
1345		if (rc == -EACCES || !unlock_hpa) {
1346			ata_dev_warn(dev,
1347				     "HPA support seems broken, skipping HPA handling\n");
1348			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1349
1350			/* we can continue if device aborted the command */
1351			if (rc == -EACCES)
1352				rc = 0;
1353		}
1354
1355		return rc;
1356	}
1357	dev->n_native_sectors = native_sectors;
1358
1359	/* nothing to do? */
1360	if (native_sectors <= sectors || !unlock_hpa) {
1361		if (!print_info || native_sectors == sectors)
1362			return 0;
1363
1364		if (native_sectors > sectors)
1365			ata_dev_info(dev,
1366				"HPA detected: current %llu, native %llu\n",
1367				(unsigned long long)sectors,
1368				(unsigned long long)native_sectors);
1369		else if (native_sectors < sectors)
1370			ata_dev_warn(dev,
1371				"native sectors (%llu) is smaller than sectors (%llu)\n",
1372				(unsigned long long)native_sectors,
1373				(unsigned long long)sectors);
1374		return 0;
1375	}
1376
1377	/* let's unlock HPA */
1378	rc = ata_set_max_sectors(dev, native_sectors);
1379	if (rc == -EACCES) {
1380		/* if device aborted the command, skip HPA resizing */
1381		ata_dev_warn(dev,
1382			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1383			     (unsigned long long)sectors,
1384			     (unsigned long long)native_sectors);
1385		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1386		return 0;
1387	} else if (rc)
1388		return rc;
1389
1390	/* re-read IDENTIFY data */
1391	rc = ata_dev_reread_id(dev, 0);
1392	if (rc) {
1393		ata_dev_err(dev,
1394			    "failed to re-read IDENTIFY data after HPA resizing\n");
1395		return rc;
1396	}
1397
1398	if (print_info) {
1399		u64 new_sectors = ata_id_n_sectors(dev->id);
1400		ata_dev_info(dev,
1401			"HPA unlocked: %llu -> %llu, native %llu\n",
1402			(unsigned long long)sectors,
1403			(unsigned long long)new_sectors,
1404			(unsigned long long)native_sectors);
1405	}
1406
1407	return 0;
1408}
1409
1410/**
1411 *	ata_dump_id - IDENTIFY DEVICE info debugging output
1412 *	@id: IDENTIFY DEVICE page to dump
1413 *
1414 *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1415 *	page.
1416 *
1417 *	LOCKING:
1418 *	caller.
1419 */
1420
1421static inline void ata_dump_id(const u16 *id)
1422{
1423	DPRINTK("49==0x%04x  "
1424		"53==0x%04x  "
1425		"63==0x%04x  "
1426		"64==0x%04x  "
1427		"75==0x%04x  \n",
1428		id[49],
1429		id[53],
1430		id[63],
1431		id[64],
1432		id[75]);
1433	DPRINTK("80==0x%04x  "
1434		"81==0x%04x  "
1435		"82==0x%04x  "
1436		"83==0x%04x  "
1437		"84==0x%04x  \n",
1438		id[80],
1439		id[81],
1440		id[82],
1441		id[83],
1442		id[84]);
1443	DPRINTK("88==0x%04x  "
1444		"93==0x%04x\n",
1445		id[88],
1446		id[93]);
1447}
1448
1449/**
1450 *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1451 *	@id: IDENTIFY data to compute xfer mask from
1452 *
1453 *	Compute the xfermask for this device. This is not as trivial
1454 *	as it seems if we must consider early devices correctly.
1455 *
1456 *	FIXME: pre IDE drive timing (do we care ?).
1457 *
1458 *	LOCKING:
1459 *	None.
1460 *
1461 *	RETURNS:
1462 *	Computed xfermask
1463 */
1464unsigned long ata_id_xfermask(const u16 *id)
1465{
1466	unsigned long pio_mask, mwdma_mask, udma_mask;
1467
1468	/* Usual case. Word 53 indicates word 64 is valid */
1469	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1470		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1471		pio_mask <<= 3;
1472		pio_mask |= 0x7;
1473	} else {
1474		/* If word 64 isn't valid then Word 51 high byte holds
1475		 * the PIO timing number for the maximum. Turn it into
1476		 * a mask.
1477		 */
1478		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1479		if (mode < 5)	/* Valid PIO range */
1480			pio_mask = (2 << mode) - 1;
1481		else
1482			pio_mask = 1;
1483
1484		/* But wait.. there's more. Design your standards by
1485		 * committee and you too can get a free iordy field to
1486		 * process. However its the speeds not the modes that
1487		 * are supported... Note drivers using the timing API
1488		 * will get this right anyway
1489		 */
1490	}
1491
1492	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1493
1494	if (ata_id_is_cfa(id)) {
1495		/*
1496		 *	Process compact flash extended modes
1497		 */
1498		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1499		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1500
1501		if (pio)
1502			pio_mask |= (1 << 5);
1503		if (pio > 1)
1504			pio_mask |= (1 << 6);
1505		if (dma)
1506			mwdma_mask |= (1 << 3);
1507		if (dma > 1)
1508			mwdma_mask |= (1 << 4);
1509	}
1510
1511	udma_mask = 0;
1512	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1513		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1514
1515	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1516}
 
1517
1518static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1519{
1520	struct completion *waiting = qc->private_data;
1521
1522	complete(waiting);
1523}
1524
1525/**
1526 *	ata_exec_internal_sg - execute libata internal command
1527 *	@dev: Device to which the command is sent
1528 *	@tf: Taskfile registers for the command and the result
1529 *	@cdb: CDB for packet command
1530 *	@dma_dir: Data transfer direction of the command
1531 *	@sgl: sg list for the data buffer of the command
1532 *	@n_elem: Number of sg entries
1533 *	@timeout: Timeout in msecs (0 for default)
1534 *
1535 *	Executes libata internal command with timeout.  @tf contains
1536 *	command on entry and result on return.  Timeout and error
1537 *	conditions are reported via return value.  No recovery action
1538 *	is taken after a command times out.  It's caller's duty to
1539 *	clean up after timeout.
1540 *
1541 *	LOCKING:
1542 *	None.  Should be called with kernel context, might sleep.
1543 *
1544 *	RETURNS:
1545 *	Zero on success, AC_ERR_* mask on failure
1546 */
1547unsigned ata_exec_internal_sg(struct ata_device *dev,
1548			      struct ata_taskfile *tf, const u8 *cdb,
1549			      int dma_dir, struct scatterlist *sgl,
1550			      unsigned int n_elem, unsigned long timeout)
1551{
1552	struct ata_link *link = dev->link;
1553	struct ata_port *ap = link->ap;
1554	u8 command = tf->command;
1555	int auto_timeout = 0;
1556	struct ata_queued_cmd *qc;
1557	unsigned int preempted_tag;
1558	u32 preempted_sactive;
1559	u64 preempted_qc_active;
1560	int preempted_nr_active_links;
1561	DECLARE_COMPLETION_ONSTACK(wait);
1562	unsigned long flags;
1563	unsigned int err_mask;
1564	int rc;
1565
1566	spin_lock_irqsave(ap->lock, flags);
1567
1568	/* no internal command while frozen */
1569	if (ap->pflags & ATA_PFLAG_FROZEN) {
1570		spin_unlock_irqrestore(ap->lock, flags);
1571		return AC_ERR_SYSTEM;
1572	}
1573
1574	/* initialize internal qc */
1575	qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1576
1577	qc->tag = ATA_TAG_INTERNAL;
1578	qc->hw_tag = 0;
1579	qc->scsicmd = NULL;
1580	qc->ap = ap;
1581	qc->dev = dev;
1582	ata_qc_reinit(qc);
1583
1584	preempted_tag = link->active_tag;
1585	preempted_sactive = link->sactive;
1586	preempted_qc_active = ap->qc_active;
1587	preempted_nr_active_links = ap->nr_active_links;
1588	link->active_tag = ATA_TAG_POISON;
1589	link->sactive = 0;
1590	ap->qc_active = 0;
1591	ap->nr_active_links = 0;
1592
1593	/* prepare & issue qc */
1594	qc->tf = *tf;
1595	if (cdb)
1596		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1597
1598	/* some SATA bridges need us to indicate data xfer direction */
1599	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1600	    dma_dir == DMA_FROM_DEVICE)
1601		qc->tf.feature |= ATAPI_DMADIR;
1602
1603	qc->flags |= ATA_QCFLAG_RESULT_TF;
1604	qc->dma_dir = dma_dir;
1605	if (dma_dir != DMA_NONE) {
1606		unsigned int i, buflen = 0;
1607		struct scatterlist *sg;
1608
1609		for_each_sg(sgl, sg, n_elem, i)
1610			buflen += sg->length;
1611
1612		ata_sg_init(qc, sgl, n_elem);
1613		qc->nbytes = buflen;
1614	}
1615
1616	qc->private_data = &wait;
1617	qc->complete_fn = ata_qc_complete_internal;
1618
1619	ata_qc_issue(qc);
1620
1621	spin_unlock_irqrestore(ap->lock, flags);
1622
1623	if (!timeout) {
1624		if (ata_probe_timeout)
1625			timeout = ata_probe_timeout * 1000;
1626		else {
1627			timeout = ata_internal_cmd_timeout(dev, command);
1628			auto_timeout = 1;
1629		}
1630	}
1631
1632	if (ap->ops->error_handler)
1633		ata_eh_release(ap);
1634
1635	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1636
1637	if (ap->ops->error_handler)
1638		ata_eh_acquire(ap);
1639
1640	ata_sff_flush_pio_task(ap);
1641
1642	if (!rc) {
1643		spin_lock_irqsave(ap->lock, flags);
1644
1645		/* We're racing with irq here.  If we lose, the
1646		 * following test prevents us from completing the qc
1647		 * twice.  If we win, the port is frozen and will be
1648		 * cleaned up by ->post_internal_cmd().
1649		 */
1650		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1651			qc->err_mask |= AC_ERR_TIMEOUT;
1652
1653			if (ap->ops->error_handler)
1654				ata_port_freeze(ap);
1655			else
1656				ata_qc_complete(qc);
1657
1658			if (ata_msg_warn(ap))
1659				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1660					     command);
1661		}
1662
1663		spin_unlock_irqrestore(ap->lock, flags);
1664	}
1665
1666	/* do post_internal_cmd */
1667	if (ap->ops->post_internal_cmd)
1668		ap->ops->post_internal_cmd(qc);
1669
1670	/* perform minimal error analysis */
1671	if (qc->flags & ATA_QCFLAG_FAILED) {
1672		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1673			qc->err_mask |= AC_ERR_DEV;
1674
1675		if (!qc->err_mask)
1676			qc->err_mask |= AC_ERR_OTHER;
1677
1678		if (qc->err_mask & ~AC_ERR_OTHER)
1679			qc->err_mask &= ~AC_ERR_OTHER;
1680	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1681		qc->result_tf.command |= ATA_SENSE;
1682	}
1683
1684	/* finish up */
1685	spin_lock_irqsave(ap->lock, flags);
1686
1687	*tf = qc->result_tf;
1688	err_mask = qc->err_mask;
1689
1690	ata_qc_free(qc);
1691	link->active_tag = preempted_tag;
1692	link->sactive = preempted_sactive;
1693	ap->qc_active = preempted_qc_active;
1694	ap->nr_active_links = preempted_nr_active_links;
1695
1696	spin_unlock_irqrestore(ap->lock, flags);
1697
1698	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1699		ata_internal_cmd_timed_out(dev, command);
1700
1701	return err_mask;
1702}
1703
1704/**
1705 *	ata_exec_internal - execute libata internal command
1706 *	@dev: Device to which the command is sent
1707 *	@tf: Taskfile registers for the command and the result
1708 *	@cdb: CDB for packet command
1709 *	@dma_dir: Data transfer direction of the command
1710 *	@buf: Data buffer of the command
1711 *	@buflen: Length of data buffer
1712 *	@timeout: Timeout in msecs (0 for default)
1713 *
1714 *	Wrapper around ata_exec_internal_sg() which takes simple
1715 *	buffer instead of sg list.
1716 *
1717 *	LOCKING:
1718 *	None.  Should be called with kernel context, might sleep.
1719 *
1720 *	RETURNS:
1721 *	Zero on success, AC_ERR_* mask on failure
1722 */
1723unsigned ata_exec_internal(struct ata_device *dev,
1724			   struct ata_taskfile *tf, const u8 *cdb,
1725			   int dma_dir, void *buf, unsigned int buflen,
1726			   unsigned long timeout)
1727{
1728	struct scatterlist *psg = NULL, sg;
1729	unsigned int n_elem = 0;
1730
1731	if (dma_dir != DMA_NONE) {
1732		WARN_ON(!buf);
1733		sg_init_one(&sg, buf, buflen);
1734		psg = &sg;
1735		n_elem++;
1736	}
1737
1738	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1739				    timeout);
1740}
1741
1742/**
1743 *	ata_pio_need_iordy	-	check if iordy needed
1744 *	@adev: ATA device
1745 *
1746 *	Check if the current speed of the device requires IORDY. Used
1747 *	by various controllers for chip configuration.
1748 */
1749unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1750{
1751	/* Don't set IORDY if we're preparing for reset.  IORDY may
1752	 * lead to controller lock up on certain controllers if the
1753	 * port is not occupied.  See bko#11703 for details.
1754	 */
1755	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1756		return 0;
1757	/* Controller doesn't support IORDY.  Probably a pointless
1758	 * check as the caller should know this.
1759	 */
1760	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1761		return 0;
1762	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1763	if (ata_id_is_cfa(adev->id)
1764	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1765		return 0;
1766	/* PIO3 and higher it is mandatory */
1767	if (adev->pio_mode > XFER_PIO_2)
1768		return 1;
1769	/* We turn it on when possible */
1770	if (ata_id_has_iordy(adev->id))
1771		return 1;
1772	return 0;
1773}
 
1774
1775/**
1776 *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1777 *	@adev: ATA device
1778 *
1779 *	Compute the highest mode possible if we are not using iordy. Return
1780 *	-1 if no iordy mode is available.
1781 */
1782static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1783{
1784	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1785	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1786		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1787		/* Is the speed faster than the drive allows non IORDY ? */
1788		if (pio) {
1789			/* This is cycle times not frequency - watch the logic! */
1790			if (pio > 240)	/* PIO2 is 240nS per cycle */
1791				return 3 << ATA_SHIFT_PIO;
1792			return 7 << ATA_SHIFT_PIO;
1793		}
1794	}
1795	return 3 << ATA_SHIFT_PIO;
1796}
1797
1798/**
1799 *	ata_do_dev_read_id		-	default ID read method
1800 *	@dev: device
1801 *	@tf: proposed taskfile
1802 *	@id: data buffer
1803 *
1804 *	Issue the identify taskfile and hand back the buffer containing
1805 *	identify data. For some RAID controllers and for pre ATA devices
1806 *	this function is wrapped or replaced by the driver
1807 */
1808unsigned int ata_do_dev_read_id(struct ata_device *dev,
1809					struct ata_taskfile *tf, u16 *id)
1810{
1811	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1812				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1813}
 
1814
1815/**
1816 *	ata_dev_read_id - Read ID data from the specified device
1817 *	@dev: target device
1818 *	@p_class: pointer to class of the target device (may be changed)
1819 *	@flags: ATA_READID_* flags
1820 *	@id: buffer to read IDENTIFY data into
1821 *
1822 *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1823 *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1824 *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1825 *	for pre-ATA4 drives.
1826 *
1827 *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1828 *	now we abort if we hit that case.
1829 *
1830 *	LOCKING:
1831 *	Kernel thread context (may sleep)
1832 *
1833 *	RETURNS:
1834 *	0 on success, -errno otherwise.
1835 */
1836int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1837		    unsigned int flags, u16 *id)
1838{
1839	struct ata_port *ap = dev->link->ap;
1840	unsigned int class = *p_class;
1841	struct ata_taskfile tf;
1842	unsigned int err_mask = 0;
1843	const char *reason;
1844	bool is_semb = class == ATA_DEV_SEMB;
1845	int may_fallback = 1, tried_spinup = 0;
1846	int rc;
1847
1848	if (ata_msg_ctl(ap))
1849		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1850
1851retry:
1852	ata_tf_init(dev, &tf);
1853
1854	switch (class) {
1855	case ATA_DEV_SEMB:
1856		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1857		/* fall through */
1858	case ATA_DEV_ATA:
1859	case ATA_DEV_ZAC:
1860		tf.command = ATA_CMD_ID_ATA;
1861		break;
1862	case ATA_DEV_ATAPI:
1863		tf.command = ATA_CMD_ID_ATAPI;
1864		break;
1865	default:
1866		rc = -ENODEV;
1867		reason = "unsupported class";
1868		goto err_out;
1869	}
1870
1871	tf.protocol = ATA_PROT_PIO;
1872
1873	/* Some devices choke if TF registers contain garbage.  Make
1874	 * sure those are properly initialized.
1875	 */
1876	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1877
1878	/* Device presence detection is unreliable on some
1879	 * controllers.  Always poll IDENTIFY if available.
1880	 */
1881	tf.flags |= ATA_TFLAG_POLLING;
1882
1883	if (ap->ops->read_id)
1884		err_mask = ap->ops->read_id(dev, &tf, id);
1885	else
1886		err_mask = ata_do_dev_read_id(dev, &tf, id);
1887
1888	if (err_mask) {
1889		if (err_mask & AC_ERR_NODEV_HINT) {
1890			ata_dev_dbg(dev, "NODEV after polling detection\n");
1891			return -ENOENT;
1892		}
1893
1894		if (is_semb) {
1895			ata_dev_info(dev,
1896		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1897			/* SEMB is not supported yet */
1898			*p_class = ATA_DEV_SEMB_UNSUP;
1899			return 0;
1900		}
1901
1902		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1903			/* Device or controller might have reported
1904			 * the wrong device class.  Give a shot at the
1905			 * other IDENTIFY if the current one is
1906			 * aborted by the device.
1907			 */
1908			if (may_fallback) {
1909				may_fallback = 0;
1910
1911				if (class == ATA_DEV_ATA)
1912					class = ATA_DEV_ATAPI;
1913				else
1914					class = ATA_DEV_ATA;
1915				goto retry;
1916			}
1917
1918			/* Control reaches here iff the device aborted
1919			 * both flavors of IDENTIFYs which happens
1920			 * sometimes with phantom devices.
1921			 */
1922			ata_dev_dbg(dev,
1923				    "both IDENTIFYs aborted, assuming NODEV\n");
1924			return -ENOENT;
1925		}
1926
1927		rc = -EIO;
1928		reason = "I/O error";
1929		goto err_out;
1930	}
1931
1932	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1933		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1934			    "class=%d may_fallback=%d tried_spinup=%d\n",
1935			    class, may_fallback, tried_spinup);
1936		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1937			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1938	}
1939
1940	/* Falling back doesn't make sense if ID data was read
1941	 * successfully at least once.
1942	 */
1943	may_fallback = 0;
1944
1945	swap_buf_le16(id, ATA_ID_WORDS);
1946
1947	/* sanity check */
1948	rc = -EINVAL;
1949	reason = "device reports invalid type";
1950
1951	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1952		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1953			goto err_out;
1954		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1955							ata_id_is_ata(id)) {
1956			ata_dev_dbg(dev,
1957				"host indicates ignore ATA devices, ignored\n");
1958			return -ENOENT;
1959		}
1960	} else {
1961		if (ata_id_is_ata(id))
1962			goto err_out;
1963	}
1964
1965	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1966		tried_spinup = 1;
1967		/*
1968		 * Drive powered-up in standby mode, and requires a specific
1969		 * SET_FEATURES spin-up subcommand before it will accept
1970		 * anything other than the original IDENTIFY command.
1971		 */
1972		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1973		if (err_mask && id[2] != 0x738c) {
1974			rc = -EIO;
1975			reason = "SPINUP failed";
1976			goto err_out;
1977		}
1978		/*
1979		 * If the drive initially returned incomplete IDENTIFY info,
1980		 * we now must reissue the IDENTIFY command.
1981		 */
1982		if (id[2] == 0x37c8)
1983			goto retry;
1984	}
1985
1986	if ((flags & ATA_READID_POSTRESET) &&
1987	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1988		/*
1989		 * The exact sequence expected by certain pre-ATA4 drives is:
1990		 * SRST RESET
1991		 * IDENTIFY (optional in early ATA)
1992		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1993		 * anything else..
1994		 * Some drives were very specific about that exact sequence.
1995		 *
1996		 * Note that ATA4 says lba is mandatory so the second check
1997		 * should never trigger.
1998		 */
1999		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2000			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2001			if (err_mask) {
2002				rc = -EIO;
2003				reason = "INIT_DEV_PARAMS failed";
2004				goto err_out;
2005			}
2006
2007			/* current CHS translation info (id[53-58]) might be
2008			 * changed. reread the identify device info.
2009			 */
2010			flags &= ~ATA_READID_POSTRESET;
2011			goto retry;
2012		}
2013	}
2014
2015	*p_class = class;
2016
2017	return 0;
2018
2019 err_out:
2020	if (ata_msg_warn(ap))
2021		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2022			     reason, err_mask);
2023	return rc;
2024}
2025
2026/**
2027 *	ata_read_log_page - read a specific log page
2028 *	@dev: target device
2029 *	@log: log to read
2030 *	@page: page to read
2031 *	@buf: buffer to store read page
2032 *	@sectors: number of sectors to read
2033 *
2034 *	Read log page using READ_LOG_EXT command.
2035 *
2036 *	LOCKING:
2037 *	Kernel thread context (may sleep).
2038 *
2039 *	RETURNS:
2040 *	0 on success, AC_ERR_* mask otherwise.
2041 */
2042unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2043			       u8 page, void *buf, unsigned int sectors)
2044{
2045	unsigned long ap_flags = dev->link->ap->flags;
2046	struct ata_taskfile tf;
2047	unsigned int err_mask;
2048	bool dma = false;
2049
2050	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
2051
2052	/*
2053	 * Return error without actually issuing the command on controllers
2054	 * which e.g. lockup on a read log page.
2055	 */
2056	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2057		return AC_ERR_DEV;
2058
2059retry:
2060	ata_tf_init(dev, &tf);
2061	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
2062	    !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2063		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2064		tf.protocol = ATA_PROT_DMA;
2065		dma = true;
2066	} else {
2067		tf.command = ATA_CMD_READ_LOG_EXT;
2068		tf.protocol = ATA_PROT_PIO;
2069		dma = false;
2070	}
2071	tf.lbal = log;
2072	tf.lbam = page;
2073	tf.nsect = sectors;
2074	tf.hob_nsect = sectors >> 8;
2075	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2076
2077	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2078				     buf, sectors * ATA_SECT_SIZE, 0);
2079
2080	if (err_mask && dma) {
2081		dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2082		ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
2083		goto retry;
2084	}
2085
2086	DPRINTK("EXIT, err_mask=%x\n", err_mask);
2087	return err_mask;
2088}
2089
2090static bool ata_log_supported(struct ata_device *dev, u8 log)
2091{
2092	struct ata_port *ap = dev->link->ap;
2093
2094	if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2095		return false;
2096	return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2097}
2098
2099static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2100{
2101	struct ata_port *ap = dev->link->ap;
2102	unsigned int err, i;
2103
2104	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2105		ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
2106		return false;
2107	}
2108
2109	/*
2110	 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2111	 * supported.
2112	 */
2113	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2114				1);
2115	if (err) {
2116		ata_dev_info(dev,
2117			     "failed to get Device Identify Log Emask 0x%x\n",
2118			     err);
2119		return false;
2120	}
2121
2122	for (i = 0; i < ap->sector_buf[8]; i++) {
2123		if (ap->sector_buf[9 + i] == page)
2124			return true;
2125	}
2126
2127	return false;
2128}
2129
2130static int ata_do_link_spd_horkage(struct ata_device *dev)
2131{
2132	struct ata_link *plink = ata_dev_phys_link(dev);
2133	u32 target, target_limit;
2134
2135	if (!sata_scr_valid(plink))
2136		return 0;
2137
2138	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2139		target = 1;
2140	else
2141		return 0;
2142
2143	target_limit = (1 << target) - 1;
2144
2145	/* if already on stricter limit, no need to push further */
2146	if (plink->sata_spd_limit <= target_limit)
2147		return 0;
2148
2149	plink->sata_spd_limit = target_limit;
2150
2151	/* Request another EH round by returning -EAGAIN if link is
2152	 * going faster than the target speed.  Forward progress is
2153	 * guaranteed by setting sata_spd_limit to target_limit above.
2154	 */
2155	if (plink->sata_spd > target) {
2156		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2157			     sata_spd_string(target));
2158		return -EAGAIN;
2159	}
2160	return 0;
2161}
2162
2163static inline u8 ata_dev_knobble(struct ata_device *dev)
2164{
2165	struct ata_port *ap = dev->link->ap;
2166
2167	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2168		return 0;
2169
2170	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2171}
2172
2173static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2174{
2175	struct ata_port *ap = dev->link->ap;
2176	unsigned int err_mask;
2177
2178	if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2179		ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2180		return;
2181	}
2182	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2183				     0, ap->sector_buf, 1);
2184	if (err_mask) {
2185		ata_dev_dbg(dev,
2186			    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2187			    err_mask);
2188	} else {
2189		u8 *cmds = dev->ncq_send_recv_cmds;
2190
2191		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2192		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2193
2194		if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2195			ata_dev_dbg(dev, "disabling queued TRIM support\n");
2196			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2197				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2198		}
2199	}
2200}
2201
2202static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2203{
2204	struct ata_port *ap = dev->link->ap;
2205	unsigned int err_mask;
2206
2207	if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2208		ata_dev_warn(dev,
2209			     "NCQ Send/Recv Log not supported\n");
2210		return;
2211	}
2212	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2213				     0, ap->sector_buf, 1);
2214	if (err_mask) {
2215		ata_dev_dbg(dev,
2216			    "failed to get NCQ Non-Data Log Emask 0x%x\n",
2217			    err_mask);
2218	} else {
2219		u8 *cmds = dev->ncq_non_data_cmds;
2220
2221		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2222	}
2223}
2224
2225static void ata_dev_config_ncq_prio(struct ata_device *dev)
2226{
2227	struct ata_port *ap = dev->link->ap;
2228	unsigned int err_mask;
2229
2230	if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2231		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2232		return;
2233	}
2234
2235	err_mask = ata_read_log_page(dev,
2236				     ATA_LOG_IDENTIFY_DEVICE,
2237				     ATA_LOG_SATA_SETTINGS,
2238				     ap->sector_buf,
2239				     1);
2240	if (err_mask) {
2241		ata_dev_dbg(dev,
2242			    "failed to get Identify Device data, Emask 0x%x\n",
2243			    err_mask);
2244		return;
2245	}
2246
2247	if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
2248		dev->flags |= ATA_DFLAG_NCQ_PRIO;
2249	} else {
2250		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2251		ata_dev_dbg(dev, "SATA page does not support priority\n");
2252	}
2253
2254}
2255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2256static int ata_dev_config_ncq(struct ata_device *dev,
2257			       char *desc, size_t desc_sz)
2258{
2259	struct ata_port *ap = dev->link->ap;
2260	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2261	unsigned int err_mask;
2262	char *aa_desc = "";
2263
2264	if (!ata_id_has_ncq(dev->id)) {
2265		desc[0] = '\0';
2266		return 0;
2267	}
 
 
2268	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2269		snprintf(desc, desc_sz, "NCQ (not used)");
2270		return 0;
2271	}
 
 
 
 
 
 
 
2272	if (ap->flags & ATA_FLAG_NCQ) {
2273		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2274		dev->flags |= ATA_DFLAG_NCQ;
2275	}
2276
2277	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2278		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2279		ata_id_has_fpdma_aa(dev->id)) {
2280		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2281			SATA_FPDMA_AA);
2282		if (err_mask) {
2283			ata_dev_err(dev,
2284				    "failed to enable AA (error_mask=0x%x)\n",
2285				    err_mask);
2286			if (err_mask != AC_ERR_DEV) {
2287				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2288				return -EIO;
2289			}
2290		} else
2291			aa_desc = ", AA";
2292	}
2293
2294	if (hdepth >= ddepth)
2295		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2296	else
2297		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2298			ddepth, aa_desc);
2299
2300	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2301		if (ata_id_has_ncq_send_and_recv(dev->id))
2302			ata_dev_config_ncq_send_recv(dev);
2303		if (ata_id_has_ncq_non_data(dev->id))
2304			ata_dev_config_ncq_non_data(dev);
2305		if (ata_id_has_ncq_prio(dev->id))
2306			ata_dev_config_ncq_prio(dev);
2307	}
2308
2309	return 0;
2310}
2311
2312static void ata_dev_config_sense_reporting(struct ata_device *dev)
2313{
2314	unsigned int err_mask;
2315
2316	if (!ata_id_has_sense_reporting(dev->id))
2317		return;
2318
2319	if (ata_id_sense_reporting_enabled(dev->id))
2320		return;
2321
2322	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2323	if (err_mask) {
2324		ata_dev_dbg(dev,
2325			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
2326			    err_mask);
2327	}
2328}
2329
2330static void ata_dev_config_zac(struct ata_device *dev)
2331{
2332	struct ata_port *ap = dev->link->ap;
2333	unsigned int err_mask;
2334	u8 *identify_buf = ap->sector_buf;
2335
2336	dev->zac_zones_optimal_open = U32_MAX;
2337	dev->zac_zones_optimal_nonseq = U32_MAX;
2338	dev->zac_zones_max_open = U32_MAX;
2339
2340	/*
2341	 * Always set the 'ZAC' flag for Host-managed devices.
2342	 */
2343	if (dev->class == ATA_DEV_ZAC)
2344		dev->flags |= ATA_DFLAG_ZAC;
2345	else if (ata_id_zoned_cap(dev->id) == 0x01)
2346		/*
2347		 * Check for host-aware devices.
2348		 */
2349		dev->flags |= ATA_DFLAG_ZAC;
2350
2351	if (!(dev->flags & ATA_DFLAG_ZAC))
2352		return;
2353
2354	if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2355		ata_dev_warn(dev,
2356			     "ATA Zoned Information Log not supported\n");
2357		return;
2358	}
2359
2360	/*
2361	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2362	 */
2363	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2364				     ATA_LOG_ZONED_INFORMATION,
2365				     identify_buf, 1);
2366	if (!err_mask) {
2367		u64 zoned_cap, opt_open, opt_nonseq, max_open;
2368
2369		zoned_cap = get_unaligned_le64(&identify_buf[8]);
2370		if ((zoned_cap >> 63))
2371			dev->zac_zoned_cap = (zoned_cap & 1);
2372		opt_open = get_unaligned_le64(&identify_buf[24]);
2373		if ((opt_open >> 63))
2374			dev->zac_zones_optimal_open = (u32)opt_open;
2375		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2376		if ((opt_nonseq >> 63))
2377			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2378		max_open = get_unaligned_le64(&identify_buf[40]);
2379		if ((max_open >> 63))
2380			dev->zac_zones_max_open = (u32)max_open;
2381	}
2382}
2383
2384static void ata_dev_config_trusted(struct ata_device *dev)
2385{
2386	struct ata_port *ap = dev->link->ap;
2387	u64 trusted_cap;
2388	unsigned int err;
2389
2390	if (!ata_id_has_trusted(dev->id))
2391		return;
2392
2393	if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2394		ata_dev_warn(dev,
2395			     "Security Log not supported\n");
2396		return;
2397	}
2398
2399	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2400			ap->sector_buf, 1);
2401	if (err) {
2402		ata_dev_dbg(dev,
2403			    "failed to read Security Log, Emask 0x%x\n", err);
2404		return;
2405	}
2406
2407	trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2408	if (!(trusted_cap & (1ULL << 63))) {
2409		ata_dev_dbg(dev,
2410			    "Trusted Computing capability qword not valid!\n");
2411		return;
2412	}
2413
2414	if (trusted_cap & (1 << 0))
2415		dev->flags |= ATA_DFLAG_TRUSTED;
2416}
2417
2418/**
2419 *	ata_dev_configure - Configure the specified ATA/ATAPI device
2420 *	@dev: Target device to configure
2421 *
2422 *	Configure @dev according to @dev->id.  Generic and low-level
2423 *	driver specific fixups are also applied.
2424 *
2425 *	LOCKING:
2426 *	Kernel thread context (may sleep)
2427 *
2428 *	RETURNS:
2429 *	0 on success, -errno otherwise
2430 */
2431int ata_dev_configure(struct ata_device *dev)
2432{
2433	struct ata_port *ap = dev->link->ap;
2434	struct ata_eh_context *ehc = &dev->link->eh_context;
2435	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2436	const u16 *id = dev->id;
2437	unsigned long xfer_mask;
2438	unsigned int err_mask;
2439	char revbuf[7];		/* XYZ-99\0 */
2440	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2441	char modelbuf[ATA_ID_PROD_LEN+1];
2442	int rc;
2443
2444	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2445		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2446		return 0;
2447	}
2448
2449	if (ata_msg_probe(ap))
2450		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2451
2452	/* set horkage */
2453	dev->horkage |= ata_dev_blacklisted(dev);
2454	ata_force_horkage(dev);
2455
2456	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2457		ata_dev_info(dev, "unsupported device, disabling\n");
2458		ata_dev_disable(dev);
2459		return 0;
2460	}
2461
2462	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2463	    dev->class == ATA_DEV_ATAPI) {
2464		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2465			     atapi_enabled ? "not supported with this driver"
2466			     : "disabled");
2467		ata_dev_disable(dev);
2468		return 0;
2469	}
2470
2471	rc = ata_do_link_spd_horkage(dev);
2472	if (rc)
2473		return rc;
2474
2475	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2476	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2477	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2478		dev->horkage |= ATA_HORKAGE_NOLPM;
2479
2480	if (ap->flags & ATA_FLAG_NO_LPM)
2481		dev->horkage |= ATA_HORKAGE_NOLPM;
2482
2483	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2484		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2485		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2486	}
2487
2488	/* let ACPI work its magic */
2489	rc = ata_acpi_on_devcfg(dev);
2490	if (rc)
2491		return rc;
2492
2493	/* massage HPA, do it early as it might change IDENTIFY data */
2494	rc = ata_hpa_resize(dev);
2495	if (rc)
2496		return rc;
2497
2498	/* print device capabilities */
2499	if (ata_msg_probe(ap))
2500		ata_dev_dbg(dev,
2501			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2502			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2503			    __func__,
2504			    id[49], id[82], id[83], id[84],
2505			    id[85], id[86], id[87], id[88]);
2506
2507	/* initialize to-be-configured parameters */
2508	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2509	dev->max_sectors = 0;
2510	dev->cdb_len = 0;
2511	dev->n_sectors = 0;
2512	dev->cylinders = 0;
2513	dev->heads = 0;
2514	dev->sectors = 0;
2515	dev->multi_count = 0;
2516
2517	/*
2518	 * common ATA, ATAPI feature tests
2519	 */
2520
2521	/* find max transfer mode; for printk only */
2522	xfer_mask = ata_id_xfermask(id);
2523
2524	if (ata_msg_probe(ap))
2525		ata_dump_id(id);
2526
2527	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2528	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2529			sizeof(fwrevbuf));
2530
2531	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2532			sizeof(modelbuf));
2533
2534	/* ATA-specific feature tests */
2535	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2536		if (ata_id_is_cfa(id)) {
2537			/* CPRM may make this media unusable */
2538			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2539				ata_dev_warn(dev,
2540	"supports DRM functions and may not be fully accessible\n");
2541			snprintf(revbuf, 7, "CFA");
2542		} else {
2543			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2544			/* Warn the user if the device has TPM extensions */
2545			if (ata_id_has_tpm(id))
2546				ata_dev_warn(dev,
2547	"supports DRM functions and may not be fully accessible\n");
2548		}
2549
2550		dev->n_sectors = ata_id_n_sectors(id);
2551
2552		/* get current R/W Multiple count setting */
2553		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2554			unsigned int max = dev->id[47] & 0xff;
2555			unsigned int cnt = dev->id[59] & 0xff;
2556			/* only recognize/allow powers of two here */
2557			if (is_power_of_2(max) && is_power_of_2(cnt))
2558				if (cnt <= max)
2559					dev->multi_count = cnt;
2560		}
2561
2562		if (ata_id_has_lba(id)) {
2563			const char *lba_desc;
2564			char ncq_desc[24];
2565
2566			lba_desc = "LBA";
2567			dev->flags |= ATA_DFLAG_LBA;
2568			if (ata_id_has_lba48(id)) {
2569				dev->flags |= ATA_DFLAG_LBA48;
2570				lba_desc = "LBA48";
2571
2572				if (dev->n_sectors >= (1UL << 28) &&
2573				    ata_id_has_flush_ext(id))
2574					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2575			}
2576
2577			/* config NCQ */
2578			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2579			if (rc)
2580				return rc;
2581
2582			/* print device info to dmesg */
2583			if (ata_msg_drv(ap) && print_info) {
2584				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2585					     revbuf, modelbuf, fwrevbuf,
2586					     ata_mode_string(xfer_mask));
2587				ata_dev_info(dev,
2588					     "%llu sectors, multi %u: %s %s\n",
2589					(unsigned long long)dev->n_sectors,
2590					dev->multi_count, lba_desc, ncq_desc);
2591			}
2592		} else {
2593			/* CHS */
2594
2595			/* Default translation */
2596			dev->cylinders	= id[1];
2597			dev->heads	= id[3];
2598			dev->sectors	= id[6];
2599
2600			if (ata_id_current_chs_valid(id)) {
2601				/* Current CHS translation is valid. */
2602				dev->cylinders = id[54];
2603				dev->heads     = id[55];
2604				dev->sectors   = id[56];
2605			}
2606
2607			/* print device info to dmesg */
2608			if (ata_msg_drv(ap) && print_info) {
2609				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2610					     revbuf,	modelbuf, fwrevbuf,
2611					     ata_mode_string(xfer_mask));
2612				ata_dev_info(dev,
2613					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2614					     (unsigned long long)dev->n_sectors,
2615					     dev->multi_count, dev->cylinders,
2616					     dev->heads, dev->sectors);
2617			}
2618		}
2619
2620		/* Check and mark DevSlp capability. Get DevSlp timing variables
2621		 * from SATA Settings page of Identify Device Data Log.
2622		 */
2623		if (ata_id_has_devslp(dev->id)) {
2624			u8 *sata_setting = ap->sector_buf;
2625			int i, j;
2626
2627			dev->flags |= ATA_DFLAG_DEVSLP;
2628			err_mask = ata_read_log_page(dev,
2629						     ATA_LOG_IDENTIFY_DEVICE,
2630						     ATA_LOG_SATA_SETTINGS,
2631						     sata_setting,
2632						     1);
2633			if (err_mask)
2634				ata_dev_dbg(dev,
2635					    "failed to get Identify Device Data, Emask 0x%x\n",
2636					    err_mask);
2637			else
2638				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2639					j = ATA_LOG_DEVSLP_OFFSET + i;
2640					dev->devslp_timing[i] = sata_setting[j];
2641				}
2642		}
2643		ata_dev_config_sense_reporting(dev);
2644		ata_dev_config_zac(dev);
2645		ata_dev_config_trusted(dev);
2646		dev->cdb_len = 32;
2647	}
2648
2649	/* ATAPI-specific feature tests */
2650	else if (dev->class == ATA_DEV_ATAPI) {
2651		const char *cdb_intr_string = "";
2652		const char *atapi_an_string = "";
2653		const char *dma_dir_string = "";
2654		u32 sntf;
2655
2656		rc = atapi_cdb_len(id);
2657		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2658			if (ata_msg_warn(ap))
2659				ata_dev_warn(dev, "unsupported CDB len\n");
2660			rc = -EINVAL;
2661			goto err_out_nosup;
2662		}
2663		dev->cdb_len = (unsigned int) rc;
2664
2665		/* Enable ATAPI AN if both the host and device have
2666		 * the support.  If PMP is attached, SNTF is required
2667		 * to enable ATAPI AN to discern between PHY status
2668		 * changed notifications and ATAPI ANs.
2669		 */
2670		if (atapi_an &&
2671		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2672		    (!sata_pmp_attached(ap) ||
2673		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2674			/* issue SET feature command to turn this on */
2675			err_mask = ata_dev_set_feature(dev,
2676					SETFEATURES_SATA_ENABLE, SATA_AN);
2677			if (err_mask)
2678				ata_dev_err(dev,
2679					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2680					    err_mask);
2681			else {
2682				dev->flags |= ATA_DFLAG_AN;
2683				atapi_an_string = ", ATAPI AN";
2684			}
2685		}
2686
2687		if (ata_id_cdb_intr(dev->id)) {
2688			dev->flags |= ATA_DFLAG_CDB_INTR;
2689			cdb_intr_string = ", CDB intr";
2690		}
2691
2692		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2693			dev->flags |= ATA_DFLAG_DMADIR;
2694			dma_dir_string = ", DMADIR";
2695		}
2696
2697		if (ata_id_has_da(dev->id)) {
2698			dev->flags |= ATA_DFLAG_DA;
2699			zpodd_init(dev);
2700		}
2701
2702		/* print device info to dmesg */
2703		if (ata_msg_drv(ap) && print_info)
2704			ata_dev_info(dev,
2705				     "ATAPI: %s, %s, max %s%s%s%s\n",
2706				     modelbuf, fwrevbuf,
2707				     ata_mode_string(xfer_mask),
2708				     cdb_intr_string, atapi_an_string,
2709				     dma_dir_string);
2710	}
2711
2712	/* determine max_sectors */
2713	dev->max_sectors = ATA_MAX_SECTORS;
2714	if (dev->flags & ATA_DFLAG_LBA48)
2715		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2716
2717	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2718	   200 sectors */
2719	if (ata_dev_knobble(dev)) {
2720		if (ata_msg_drv(ap) && print_info)
2721			ata_dev_info(dev, "applying bridge limits\n");
2722		dev->udma_mask &= ATA_UDMA5;
2723		dev->max_sectors = ATA_MAX_SECTORS;
2724	}
2725
2726	if ((dev->class == ATA_DEV_ATAPI) &&
2727	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2728		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2729		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2730	}
2731
2732	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2733		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2734					 dev->max_sectors);
2735
2736	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2737		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2738					 dev->max_sectors);
2739
2740	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2741		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2742
2743	if (ap->ops->dev_config)
2744		ap->ops->dev_config(dev);
2745
2746	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2747		/* Let the user know. We don't want to disallow opens for
2748		   rescue purposes, or in case the vendor is just a blithering
2749		   idiot. Do this after the dev_config call as some controllers
2750		   with buggy firmware may want to avoid reporting false device
2751		   bugs */
2752
2753		if (print_info) {
2754			ata_dev_warn(dev,
2755"Drive reports diagnostics failure. This may indicate a drive\n");
2756			ata_dev_warn(dev,
2757"fault or invalid emulation. Contact drive vendor for information.\n");
2758		}
2759	}
2760
2761	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2762		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2763		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2764	}
2765
2766	return 0;
2767
2768err_out_nosup:
2769	if (ata_msg_probe(ap))
2770		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2771	return rc;
2772}
2773
2774/**
2775 *	ata_cable_40wire	-	return 40 wire cable type
2776 *	@ap: port
2777 *
2778 *	Helper method for drivers which want to hardwire 40 wire cable
2779 *	detection.
2780 */
2781
2782int ata_cable_40wire(struct ata_port *ap)
2783{
2784	return ATA_CBL_PATA40;
2785}
 
2786
2787/**
2788 *	ata_cable_80wire	-	return 80 wire cable type
2789 *	@ap: port
2790 *
2791 *	Helper method for drivers which want to hardwire 80 wire cable
2792 *	detection.
2793 */
2794
2795int ata_cable_80wire(struct ata_port *ap)
2796{
2797	return ATA_CBL_PATA80;
2798}
 
2799
2800/**
2801 *	ata_cable_unknown	-	return unknown PATA cable.
2802 *	@ap: port
2803 *
2804 *	Helper method for drivers which have no PATA cable detection.
2805 */
2806
2807int ata_cable_unknown(struct ata_port *ap)
2808{
2809	return ATA_CBL_PATA_UNK;
2810}
 
2811
2812/**
2813 *	ata_cable_ignore	-	return ignored PATA cable.
2814 *	@ap: port
2815 *
2816 *	Helper method for drivers which don't use cable type to limit
2817 *	transfer mode.
2818 */
2819int ata_cable_ignore(struct ata_port *ap)
2820{
2821	return ATA_CBL_PATA_IGN;
2822}
 
2823
2824/**
2825 *	ata_cable_sata	-	return SATA cable type
2826 *	@ap: port
2827 *
2828 *	Helper method for drivers which have SATA cables
2829 */
2830
2831int ata_cable_sata(struct ata_port *ap)
2832{
2833	return ATA_CBL_SATA;
2834}
 
2835
2836/**
2837 *	ata_bus_probe - Reset and probe ATA bus
2838 *	@ap: Bus to probe
2839 *
2840 *	Master ATA bus probing function.  Initiates a hardware-dependent
2841 *	bus reset, then attempts to identify any devices found on
2842 *	the bus.
2843 *
2844 *	LOCKING:
2845 *	PCI/etc. bus probe sem.
2846 *
2847 *	RETURNS:
2848 *	Zero on success, negative errno otherwise.
2849 */
2850
2851int ata_bus_probe(struct ata_port *ap)
2852{
2853	unsigned int classes[ATA_MAX_DEVICES];
2854	int tries[ATA_MAX_DEVICES];
2855	int rc;
2856	struct ata_device *dev;
2857
2858	ata_for_each_dev(dev, &ap->link, ALL)
2859		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2860
2861 retry:
2862	ata_for_each_dev(dev, &ap->link, ALL) {
2863		/* If we issue an SRST then an ATA drive (not ATAPI)
2864		 * may change configuration and be in PIO0 timing. If
2865		 * we do a hard reset (or are coming from power on)
2866		 * this is true for ATA or ATAPI. Until we've set a
2867		 * suitable controller mode we should not touch the
2868		 * bus as we may be talking too fast.
2869		 */
2870		dev->pio_mode = XFER_PIO_0;
2871		dev->dma_mode = 0xff;
2872
2873		/* If the controller has a pio mode setup function
2874		 * then use it to set the chipset to rights. Don't
2875		 * touch the DMA setup as that will be dealt with when
2876		 * configuring devices.
2877		 */
2878		if (ap->ops->set_piomode)
2879			ap->ops->set_piomode(ap, dev);
2880	}
2881
2882	/* reset and determine device classes */
2883	ap->ops->phy_reset(ap);
2884
2885	ata_for_each_dev(dev, &ap->link, ALL) {
2886		if (dev->class != ATA_DEV_UNKNOWN)
2887			classes[dev->devno] = dev->class;
2888		else
2889			classes[dev->devno] = ATA_DEV_NONE;
2890
2891		dev->class = ATA_DEV_UNKNOWN;
2892	}
2893
2894	/* read IDENTIFY page and configure devices. We have to do the identify
2895	   specific sequence bass-ackwards so that PDIAG- is released by
2896	   the slave device */
2897
2898	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2899		if (tries[dev->devno])
2900			dev->class = classes[dev->devno];
2901
2902		if (!ata_dev_enabled(dev))
2903			continue;
2904
2905		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2906				     dev->id);
2907		if (rc)
2908			goto fail;
2909	}
2910
2911	/* Now ask for the cable type as PDIAG- should have been released */
2912	if (ap->ops->cable_detect)
2913		ap->cbl = ap->ops->cable_detect(ap);
2914
2915	/* We may have SATA bridge glue hiding here irrespective of
2916	 * the reported cable types and sensed types.  When SATA
2917	 * drives indicate we have a bridge, we don't know which end
2918	 * of the link the bridge is which is a problem.
2919	 */
2920	ata_for_each_dev(dev, &ap->link, ENABLED)
2921		if (ata_id_is_sata(dev->id))
2922			ap->cbl = ATA_CBL_SATA;
2923
2924	/* After the identify sequence we can now set up the devices. We do
2925	   this in the normal order so that the user doesn't get confused */
2926
2927	ata_for_each_dev(dev, &ap->link, ENABLED) {
2928		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2929		rc = ata_dev_configure(dev);
2930		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2931		if (rc)
2932			goto fail;
2933	}
2934
2935	/* configure transfer mode */
2936	rc = ata_set_mode(&ap->link, &dev);
2937	if (rc)
2938		goto fail;
2939
2940	ata_for_each_dev(dev, &ap->link, ENABLED)
2941		return 0;
2942
2943	return -ENODEV;
2944
2945 fail:
2946	tries[dev->devno]--;
2947
2948	switch (rc) {
2949	case -EINVAL:
2950		/* eeek, something went very wrong, give up */
2951		tries[dev->devno] = 0;
2952		break;
2953
2954	case -ENODEV:
2955		/* give it just one more chance */
2956		tries[dev->devno] = min(tries[dev->devno], 1);
2957		/* fall through */
2958	case -EIO:
2959		if (tries[dev->devno] == 1) {
2960			/* This is the last chance, better to slow
2961			 * down than lose it.
2962			 */
2963			sata_down_spd_limit(&ap->link, 0);
2964			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2965		}
2966	}
2967
2968	if (!tries[dev->devno])
2969		ata_dev_disable(dev);
2970
2971	goto retry;
2972}
2973
2974/**
2975 *	sata_print_link_status - Print SATA link status
2976 *	@link: SATA link to printk link status about
2977 *
2978 *	This function prints link speed and status of a SATA link.
2979 *
2980 *	LOCKING:
2981 *	None.
2982 */
2983static void sata_print_link_status(struct ata_link *link)
2984{
2985	u32 sstatus, scontrol, tmp;
2986
2987	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2988		return;
2989	sata_scr_read(link, SCR_CONTROL, &scontrol);
2990
2991	if (ata_phys_link_online(link)) {
2992		tmp = (sstatus >> 4) & 0xf;
2993		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2994			      sata_spd_string(tmp), sstatus, scontrol);
2995	} else {
2996		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2997			      sstatus, scontrol);
2998	}
2999}
3000
3001/**
3002 *	ata_dev_pair		-	return other device on cable
3003 *	@adev: device
3004 *
3005 *	Obtain the other device on the same cable, or if none is
3006 *	present NULL is returned
3007 */
3008
3009struct ata_device *ata_dev_pair(struct ata_device *adev)
3010{
3011	struct ata_link *link = adev->link;
3012	struct ata_device *pair = &link->device[1 - adev->devno];
3013	if (!ata_dev_enabled(pair))
3014		return NULL;
3015	return pair;
3016}
 
3017
3018/**
3019 *	sata_down_spd_limit - adjust SATA spd limit downward
3020 *	@link: Link to adjust SATA spd limit for
3021 *	@spd_limit: Additional limit
3022 *
3023 *	Adjust SATA spd limit of @link downward.  Note that this
3024 *	function only adjusts the limit.  The change must be applied
3025 *	using sata_set_spd().
3026 *
3027 *	If @spd_limit is non-zero, the speed is limited to equal to or
3028 *	lower than @spd_limit if such speed is supported.  If
3029 *	@spd_limit is slower than any supported speed, only the lowest
3030 *	supported speed is allowed.
3031 *
3032 *	LOCKING:
3033 *	Inherited from caller.
3034 *
3035 *	RETURNS:
3036 *	0 on success, negative errno on failure
3037 */
3038int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3039{
3040	u32 sstatus, spd, mask;
3041	int rc, bit;
3042
3043	if (!sata_scr_valid(link))
3044		return -EOPNOTSUPP;
3045
3046	/* If SCR can be read, use it to determine the current SPD.
3047	 * If not, use cached value in link->sata_spd.
3048	 */
3049	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3050	if (rc == 0 && ata_sstatus_online(sstatus))
3051		spd = (sstatus >> 4) & 0xf;
3052	else
3053		spd = link->sata_spd;
3054
3055	mask = link->sata_spd_limit;
3056	if (mask <= 1)
3057		return -EINVAL;
3058
3059	/* unconditionally mask off the highest bit */
3060	bit = fls(mask) - 1;
3061	mask &= ~(1 << bit);
3062
3063	/*
3064	 * Mask off all speeds higher than or equal to the current one.  At
3065	 * this point, if current SPD is not available and we previously
3066	 * recorded the link speed from SStatus, the driver has already
3067	 * masked off the highest bit so mask should already be 1 or 0.
3068	 * Otherwise, we should not force 1.5Gbps on a link where we have
3069	 * not previously recorded speed from SStatus.  Just return in this
3070	 * case.
3071	 */
3072	if (spd > 1)
3073		mask &= (1 << (spd - 1)) - 1;
3074	else
3075		return -EINVAL;
3076
3077	/* were we already at the bottom? */
3078	if (!mask)
3079		return -EINVAL;
3080
3081	if (spd_limit) {
3082		if (mask & ((1 << spd_limit) - 1))
3083			mask &= (1 << spd_limit) - 1;
3084		else {
3085			bit = ffs(mask) - 1;
3086			mask = 1 << bit;
3087		}
3088	}
3089
3090	link->sata_spd_limit = mask;
3091
3092	ata_link_warn(link, "limiting SATA link speed to %s\n",
3093		      sata_spd_string(fls(mask)));
3094
3095	return 0;
3096}
3097
3098static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3099{
3100	struct ata_link *host_link = &link->ap->link;
3101	u32 limit, target, spd;
3102
3103	limit = link->sata_spd_limit;
3104
3105	/* Don't configure downstream link faster than upstream link.
3106	 * It doesn't speed up anything and some PMPs choke on such
3107	 * configuration.
3108	 */
3109	if (!ata_is_host_link(link) && host_link->sata_spd)
3110		limit &= (1 << host_link->sata_spd) - 1;
3111
3112	if (limit == UINT_MAX)
3113		target = 0;
3114	else
3115		target = fls(limit);
3116
3117	spd = (*scontrol >> 4) & 0xf;
3118	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3119
3120	return spd != target;
3121}
3122
3123/**
3124 *	sata_set_spd_needed - is SATA spd configuration needed
3125 *	@link: Link in question
3126 *
3127 *	Test whether the spd limit in SControl matches
3128 *	@link->sata_spd_limit.  This function is used to determine
3129 *	whether hardreset is necessary to apply SATA spd
3130 *	configuration.
3131 *
3132 *	LOCKING:
3133 *	Inherited from caller.
3134 *
3135 *	RETURNS:
3136 *	1 if SATA spd configuration is needed, 0 otherwise.
3137 */
3138static int sata_set_spd_needed(struct ata_link *link)
3139{
3140	u32 scontrol;
3141
3142	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3143		return 1;
3144
3145	return __sata_set_spd_needed(link, &scontrol);
3146}
3147
3148/**
3149 *	sata_set_spd - set SATA spd according to spd limit
3150 *	@link: Link to set SATA spd for
3151 *
3152 *	Set SATA spd of @link according to sata_spd_limit.
3153 *
3154 *	LOCKING:
3155 *	Inherited from caller.
3156 *
3157 *	RETURNS:
3158 *	0 if spd doesn't need to be changed, 1 if spd has been
3159 *	changed.  Negative errno if SCR registers are inaccessible.
3160 */
3161int sata_set_spd(struct ata_link *link)
3162{
3163	u32 scontrol;
3164	int rc;
3165
3166	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3167		return rc;
3168
3169	if (!__sata_set_spd_needed(link, &scontrol))
3170		return 0;
3171
3172	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3173		return rc;
3174
3175	return 1;
3176}
3177
3178/*
3179 * This mode timing computation functionality is ported over from
3180 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3181 */
3182/*
3183 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3184 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3185 * for UDMA6, which is currently supported only by Maxtor drives.
3186 *
3187 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3188 */
3189
3190static const struct ata_timing ata_timing[] = {
3191/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
3192	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
3193	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
3194	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
3195	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
3196	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
3197	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
3198	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
3199
3200	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
3201	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
3202	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
3203
3204	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
3205	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
3206	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
3207	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
3208	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
3209
3210/*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
3211	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
3212	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
3213	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
3214	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
3215	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
3216	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
3217	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
3218
3219	{ 0xFF }
3220};
3221
3222#define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
3223#define EZ(v, unit)		((v)?ENOUGH(((v) * 1000), unit):0)
3224
3225static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3226{
3227	q->setup	= EZ(t->setup,       T);
3228	q->act8b	= EZ(t->act8b,       T);
3229	q->rec8b	= EZ(t->rec8b,       T);
3230	q->cyc8b	= EZ(t->cyc8b,       T);
3231	q->active	= EZ(t->active,      T);
3232	q->recover	= EZ(t->recover,     T);
3233	q->dmack_hold	= EZ(t->dmack_hold,  T);
3234	q->cycle	= EZ(t->cycle,       T);
3235	q->udma		= EZ(t->udma,       UT);
3236}
3237
3238void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3239		      struct ata_timing *m, unsigned int what)
3240{
3241	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3242	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3243	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3244	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3245	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3246	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3247	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3248	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3249	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3250}
3251
3252const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3253{
3254	const struct ata_timing *t = ata_timing;
3255
3256	while (xfer_mode > t->mode)
3257		t++;
3258
3259	if (xfer_mode == t->mode)
3260		return t;
3261
3262	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3263			__func__, xfer_mode);
3264
3265	return NULL;
3266}
3267
3268int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3269		       struct ata_timing *t, int T, int UT)
3270{
3271	const u16 *id = adev->id;
3272	const struct ata_timing *s;
3273	struct ata_timing p;
3274
3275	/*
3276	 * Find the mode.
3277	 */
3278
3279	if (!(s = ata_timing_find_mode(speed)))
3280		return -EINVAL;
3281
3282	memcpy(t, s, sizeof(*s));
3283
3284	/*
3285	 * If the drive is an EIDE drive, it can tell us it needs extended
3286	 * PIO/MW_DMA cycle timing.
3287	 */
3288
3289	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3290		memset(&p, 0, sizeof(p));
3291
3292		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3293			if (speed <= XFER_PIO_2)
3294				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3295			else if ((speed <= XFER_PIO_4) ||
3296				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3297				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3298		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3299			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3300
3301		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3302	}
3303
3304	/*
3305	 * Convert the timing to bus clock counts.
3306	 */
3307
3308	ata_timing_quantize(t, t, T, UT);
3309
3310	/*
3311	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3312	 * S.M.A.R.T * and some other commands. We have to ensure that the
3313	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3314	 */
3315
3316	if (speed > XFER_PIO_6) {
3317		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3318		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3319	}
3320
3321	/*
3322	 * Lengthen active & recovery time so that cycle time is correct.
3323	 */
3324
3325	if (t->act8b + t->rec8b < t->cyc8b) {
3326		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3327		t->rec8b = t->cyc8b - t->act8b;
3328	}
3329
3330	if (t->active + t->recover < t->cycle) {
3331		t->active += (t->cycle - (t->active + t->recover)) / 2;
3332		t->recover = t->cycle - t->active;
3333	}
3334
3335	/* In a few cases quantisation may produce enough errors to
3336	   leave t->cycle too low for the sum of active and recovery
3337	   if so we must correct this */
3338	if (t->active + t->recover > t->cycle)
3339		t->cycle = t->active + t->recover;
3340
3341	return 0;
3342}
3343
3344/**
3345 *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3346 *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3347 *	@cycle: cycle duration in ns
3348 *
3349 *	Return matching xfer mode for @cycle.  The returned mode is of
3350 *	the transfer type specified by @xfer_shift.  If @cycle is too
3351 *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3352 *	than the fastest known mode, the fasted mode is returned.
3353 *
3354 *	LOCKING:
3355 *	None.
3356 *
3357 *	RETURNS:
3358 *	Matching xfer_mode, 0xff if no match found.
3359 */
3360u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3361{
3362	u8 base_mode = 0xff, last_mode = 0xff;
3363	const struct ata_xfer_ent *ent;
3364	const struct ata_timing *t;
3365
3366	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3367		if (ent->shift == xfer_shift)
3368			base_mode = ent->base;
3369
3370	for (t = ata_timing_find_mode(base_mode);
3371	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3372		unsigned short this_cycle;
3373
3374		switch (xfer_shift) {
3375		case ATA_SHIFT_PIO:
3376		case ATA_SHIFT_MWDMA:
3377			this_cycle = t->cycle;
3378			break;
3379		case ATA_SHIFT_UDMA:
3380			this_cycle = t->udma;
3381			break;
3382		default:
3383			return 0xff;
3384		}
3385
3386		if (cycle > this_cycle)
3387			break;
3388
3389		last_mode = t->mode;
3390	}
3391
3392	return last_mode;
3393}
 
3394
3395/**
3396 *	ata_down_xfermask_limit - adjust dev xfer masks downward
3397 *	@dev: Device to adjust xfer masks
3398 *	@sel: ATA_DNXFER_* selector
3399 *
3400 *	Adjust xfer masks of @dev downward.  Note that this function
3401 *	does not apply the change.  Invoking ata_set_mode() afterwards
3402 *	will apply the limit.
3403 *
3404 *	LOCKING:
3405 *	Inherited from caller.
3406 *
3407 *	RETURNS:
3408 *	0 on success, negative errno on failure
3409 */
3410int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3411{
3412	char buf[32];
3413	unsigned long orig_mask, xfer_mask;
3414	unsigned long pio_mask, mwdma_mask, udma_mask;
3415	int quiet, highbit;
3416
3417	quiet = !!(sel & ATA_DNXFER_QUIET);
3418	sel &= ~ATA_DNXFER_QUIET;
3419
3420	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3421						  dev->mwdma_mask,
3422						  dev->udma_mask);
3423	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3424
3425	switch (sel) {
3426	case ATA_DNXFER_PIO:
3427		highbit = fls(pio_mask) - 1;
3428		pio_mask &= ~(1 << highbit);
3429		break;
3430
3431	case ATA_DNXFER_DMA:
3432		if (udma_mask) {
3433			highbit = fls(udma_mask) - 1;
3434			udma_mask &= ~(1 << highbit);
3435			if (!udma_mask)
3436				return -ENOENT;
3437		} else if (mwdma_mask) {
3438			highbit = fls(mwdma_mask) - 1;
3439			mwdma_mask &= ~(1 << highbit);
3440			if (!mwdma_mask)
3441				return -ENOENT;
3442		}
3443		break;
3444
3445	case ATA_DNXFER_40C:
3446		udma_mask &= ATA_UDMA_MASK_40C;
3447		break;
3448
3449	case ATA_DNXFER_FORCE_PIO0:
3450		pio_mask &= 1;
3451		/* fall through */
3452	case ATA_DNXFER_FORCE_PIO:
3453		mwdma_mask = 0;
3454		udma_mask = 0;
3455		break;
3456
3457	default:
3458		BUG();
3459	}
3460
3461	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3462
3463	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3464		return -ENOENT;
3465
3466	if (!quiet) {
3467		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3468			snprintf(buf, sizeof(buf), "%s:%s",
3469				 ata_mode_string(xfer_mask),
3470				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3471		else
3472			snprintf(buf, sizeof(buf), "%s",
3473				 ata_mode_string(xfer_mask));
3474
3475		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3476	}
3477
3478	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3479			    &dev->udma_mask);
3480
3481	return 0;
3482}
3483
3484static int ata_dev_set_mode(struct ata_device *dev)
3485{
3486	struct ata_port *ap = dev->link->ap;
3487	struct ata_eh_context *ehc = &dev->link->eh_context;
3488	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3489	const char *dev_err_whine = "";
3490	int ign_dev_err = 0;
3491	unsigned int err_mask = 0;
3492	int rc;
3493
3494	dev->flags &= ~ATA_DFLAG_PIO;
3495	if (dev->xfer_shift == ATA_SHIFT_PIO)
3496		dev->flags |= ATA_DFLAG_PIO;
3497
3498	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3499		dev_err_whine = " (SET_XFERMODE skipped)";
3500	else {
3501		if (nosetxfer)
3502			ata_dev_warn(dev,
3503				     "NOSETXFER but PATA detected - can't "
3504				     "skip SETXFER, might malfunction\n");
3505		err_mask = ata_dev_set_xfermode(dev);
3506	}
3507
3508	if (err_mask & ~AC_ERR_DEV)
3509		goto fail;
3510
3511	/* revalidate */
3512	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3513	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3514	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3515	if (rc)
3516		return rc;
3517
3518	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3519		/* Old CFA may refuse this command, which is just fine */
3520		if (ata_id_is_cfa(dev->id))
3521			ign_dev_err = 1;
3522		/* Catch several broken garbage emulations plus some pre
3523		   ATA devices */
3524		if (ata_id_major_version(dev->id) == 0 &&
3525					dev->pio_mode <= XFER_PIO_2)
3526			ign_dev_err = 1;
3527		/* Some very old devices and some bad newer ones fail
3528		   any kind of SET_XFERMODE request but support PIO0-2
3529		   timings and no IORDY */
3530		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3531			ign_dev_err = 1;
3532	}
3533	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3534	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3535	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3536	    dev->dma_mode == XFER_MW_DMA_0 &&
3537	    (dev->id[63] >> 8) & 1)
3538		ign_dev_err = 1;
3539
3540	/* if the device is actually configured correctly, ignore dev err */
3541	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3542		ign_dev_err = 1;
3543
3544	if (err_mask & AC_ERR_DEV) {
3545		if (!ign_dev_err)
3546			goto fail;
3547		else
3548			dev_err_whine = " (device error ignored)";
3549	}
3550
3551	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3552		dev->xfer_shift, (int)dev->xfer_mode);
3553
3554	if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3555	    ehc->i.flags & ATA_EHI_DID_HARDRESET)
3556		ata_dev_info(dev, "configured for %s%s\n",
3557			     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3558			     dev_err_whine);
3559
3560	return 0;
3561
3562 fail:
3563	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3564	return -EIO;
3565}
3566
3567/**
3568 *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3569 *	@link: link on which timings will be programmed
3570 *	@r_failed_dev: out parameter for failed device
3571 *
3572 *	Standard implementation of the function used to tune and set
3573 *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3574 *	ata_dev_set_mode() fails, pointer to the failing device is
3575 *	returned in @r_failed_dev.
3576 *
3577 *	LOCKING:
3578 *	PCI/etc. bus probe sem.
3579 *
3580 *	RETURNS:
3581 *	0 on success, negative errno otherwise
3582 */
3583
3584int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3585{
3586	struct ata_port *ap = link->ap;
3587	struct ata_device *dev;
3588	int rc = 0, used_dma = 0, found = 0;
3589
3590	/* step 1: calculate xfer_mask */
3591	ata_for_each_dev(dev, link, ENABLED) {
3592		unsigned long pio_mask, dma_mask;
3593		unsigned int mode_mask;
3594
3595		mode_mask = ATA_DMA_MASK_ATA;
3596		if (dev->class == ATA_DEV_ATAPI)
3597			mode_mask = ATA_DMA_MASK_ATAPI;
3598		else if (ata_id_is_cfa(dev->id))
3599			mode_mask = ATA_DMA_MASK_CFA;
3600
3601		ata_dev_xfermask(dev);
3602		ata_force_xfermask(dev);
3603
3604		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3605
3606		if (libata_dma_mask & mode_mask)
3607			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3608						     dev->udma_mask);
3609		else
3610			dma_mask = 0;
3611
3612		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3613		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3614
3615		found = 1;
3616		if (ata_dma_enabled(dev))
3617			used_dma = 1;
3618	}
3619	if (!found)
3620		goto out;
3621
3622	/* step 2: always set host PIO timings */
3623	ata_for_each_dev(dev, link, ENABLED) {
3624		if (dev->pio_mode == 0xff) {
3625			ata_dev_warn(dev, "no PIO support\n");
3626			rc = -EINVAL;
3627			goto out;
3628		}
3629
3630		dev->xfer_mode = dev->pio_mode;
3631		dev->xfer_shift = ATA_SHIFT_PIO;
3632		if (ap->ops->set_piomode)
3633			ap->ops->set_piomode(ap, dev);
3634	}
3635
3636	/* step 3: set host DMA timings */
3637	ata_for_each_dev(dev, link, ENABLED) {
3638		if (!ata_dma_enabled(dev))
3639			continue;
3640
3641		dev->xfer_mode = dev->dma_mode;
3642		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3643		if (ap->ops->set_dmamode)
3644			ap->ops->set_dmamode(ap, dev);
3645	}
3646
3647	/* step 4: update devices' xfer mode */
3648	ata_for_each_dev(dev, link, ENABLED) {
3649		rc = ata_dev_set_mode(dev);
3650		if (rc)
3651			goto out;
3652	}
3653
3654	/* Record simplex status. If we selected DMA then the other
3655	 * host channels are not permitted to do so.
3656	 */
3657	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3658		ap->host->simplex_claimed = ap;
3659
3660 out:
3661	if (rc)
3662		*r_failed_dev = dev;
3663	return rc;
3664}
 
3665
3666/**
3667 *	ata_wait_ready - wait for link to become ready
3668 *	@link: link to be waited on
3669 *	@deadline: deadline jiffies for the operation
3670 *	@check_ready: callback to check link readiness
3671 *
3672 *	Wait for @link to become ready.  @check_ready should return
3673 *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3674 *	link doesn't seem to be occupied, other errno for other error
3675 *	conditions.
3676 *
3677 *	Transient -ENODEV conditions are allowed for
3678 *	ATA_TMOUT_FF_WAIT.
3679 *
3680 *	LOCKING:
3681 *	EH context.
3682 *
3683 *	RETURNS:
3684 *	0 if @link is ready before @deadline; otherwise, -errno.
3685 */
3686int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3687		   int (*check_ready)(struct ata_link *link))
3688{
3689	unsigned long start = jiffies;
3690	unsigned long nodev_deadline;
3691	int warned = 0;
3692
3693	/* choose which 0xff timeout to use, read comment in libata.h */
3694	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3695		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3696	else
3697		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3698
3699	/* Slave readiness can't be tested separately from master.  On
3700	 * M/S emulation configuration, this function should be called
3701	 * only on the master and it will handle both master and slave.
3702	 */
3703	WARN_ON(link == link->ap->slave_link);
3704
3705	if (time_after(nodev_deadline, deadline))
3706		nodev_deadline = deadline;
3707
3708	while (1) {
3709		unsigned long now = jiffies;
3710		int ready, tmp;
3711
3712		ready = tmp = check_ready(link);
3713		if (ready > 0)
3714			return 0;
3715
3716		/*
3717		 * -ENODEV could be transient.  Ignore -ENODEV if link
3718		 * is online.  Also, some SATA devices take a long
3719		 * time to clear 0xff after reset.  Wait for
3720		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3721		 * offline.
3722		 *
3723		 * Note that some PATA controllers (pata_ali) explode
3724		 * if status register is read more than once when
3725		 * there's no device attached.
3726		 */
3727		if (ready == -ENODEV) {
3728			if (ata_link_online(link))
3729				ready = 0;
3730			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3731				 !ata_link_offline(link) &&
3732				 time_before(now, nodev_deadline))
3733				ready = 0;
3734		}
3735
3736		if (ready)
3737			return ready;
3738		if (time_after(now, deadline))
3739			return -EBUSY;
3740
3741		if (!warned && time_after(now, start + 5 * HZ) &&
3742		    (deadline - now > 3 * HZ)) {
3743			ata_link_warn(link,
3744				"link is slow to respond, please be patient "
3745				"(ready=%d)\n", tmp);
3746			warned = 1;
3747		}
3748
3749		ata_msleep(link->ap, 50);
3750	}
3751}
3752
3753/**
3754 *	ata_wait_after_reset - wait for link to become ready after reset
3755 *	@link: link to be waited on
3756 *	@deadline: deadline jiffies for the operation
3757 *	@check_ready: callback to check link readiness
3758 *
3759 *	Wait for @link to become ready after reset.
3760 *
3761 *	LOCKING:
3762 *	EH context.
3763 *
3764 *	RETURNS:
3765 *	0 if @link is ready before @deadline; otherwise, -errno.
3766 */
3767int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3768				int (*check_ready)(struct ata_link *link))
3769{
3770	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3771
3772	return ata_wait_ready(link, deadline, check_ready);
3773}
3774
3775/**
3776 *	sata_link_debounce - debounce SATA phy status
3777 *	@link: ATA link to debounce SATA phy status for
3778 *	@params: timing parameters { interval, duration, timeout } in msec
3779 *	@deadline: deadline jiffies for the operation
3780 *
3781 *	Make sure SStatus of @link reaches stable state, determined by
3782 *	holding the same value where DET is not 1 for @duration polled
3783 *	every @interval, before @timeout.  Timeout constraints the
3784 *	beginning of the stable state.  Because DET gets stuck at 1 on
3785 *	some controllers after hot unplugging, this functions waits
3786 *	until timeout then returns 0 if DET is stable at 1.
3787 *
3788 *	@timeout is further limited by @deadline.  The sooner of the
3789 *	two is used.
3790 *
3791 *	LOCKING:
3792 *	Kernel thread context (may sleep)
3793 *
3794 *	RETURNS:
3795 *	0 on success, -errno on failure.
3796 */
3797int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3798		       unsigned long deadline)
3799{
3800	unsigned long interval = params[0];
3801	unsigned long duration = params[1];
3802	unsigned long last_jiffies, t;
3803	u32 last, cur;
3804	int rc;
3805
3806	t = ata_deadline(jiffies, params[2]);
3807	if (time_before(t, deadline))
3808		deadline = t;
3809
3810	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3811		return rc;
3812	cur &= 0xf;
3813
3814	last = cur;
3815	last_jiffies = jiffies;
3816
3817	while (1) {
3818		ata_msleep(link->ap, interval);
3819		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3820			return rc;
3821		cur &= 0xf;
3822
3823		/* DET stable? */
3824		if (cur == last) {
3825			if (cur == 1 && time_before(jiffies, deadline))
3826				continue;
3827			if (time_after(jiffies,
3828				       ata_deadline(last_jiffies, duration)))
3829				return 0;
3830			continue;
3831		}
3832
3833		/* unstable, start over */
3834		last = cur;
3835		last_jiffies = jiffies;
3836
3837		/* Check deadline.  If debouncing failed, return
3838		 * -EPIPE to tell upper layer to lower link speed.
3839		 */
3840		if (time_after(jiffies, deadline))
3841			return -EPIPE;
3842	}
3843}
3844
3845/**
3846 *	sata_link_resume - resume SATA link
3847 *	@link: ATA link to resume SATA
3848 *	@params: timing parameters { interval, duration, timeout } in msec
3849 *	@deadline: deadline jiffies for the operation
3850 *
3851 *	Resume SATA phy @link and debounce it.
3852 *
3853 *	LOCKING:
3854 *	Kernel thread context (may sleep)
3855 *
3856 *	RETURNS:
3857 *	0 on success, -errno on failure.
3858 */
3859int sata_link_resume(struct ata_link *link, const unsigned long *params,
3860		     unsigned long deadline)
3861{
3862	int tries = ATA_LINK_RESUME_TRIES;
3863	u32 scontrol, serror;
3864	int rc;
3865
3866	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3867		return rc;
3868
3869	/*
3870	 * Writes to SControl sometimes get ignored under certain
3871	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3872	 * cleared.
3873	 */
3874	do {
3875		scontrol = (scontrol & 0x0f0) | 0x300;
3876		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3877			return rc;
3878		/*
3879		 * Some PHYs react badly if SStatus is pounded
3880		 * immediately after resuming.  Delay 200ms before
3881		 * debouncing.
3882		 */
3883		if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3884			ata_msleep(link->ap, 200);
3885
3886		/* is SControl restored correctly? */
3887		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3888			return rc;
3889	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3890
3891	if ((scontrol & 0xf0f) != 0x300) {
3892		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3893			     scontrol);
3894		return 0;
3895	}
3896
3897	if (tries < ATA_LINK_RESUME_TRIES)
3898		ata_link_warn(link, "link resume succeeded after %d retries\n",
3899			      ATA_LINK_RESUME_TRIES - tries);
3900
3901	if ((rc = sata_link_debounce(link, params, deadline)))
3902		return rc;
3903
3904	/* clear SError, some PHYs require this even for SRST to work */
3905	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3906		rc = sata_scr_write(link, SCR_ERROR, serror);
3907
3908	return rc != -EINVAL ? rc : 0;
3909}
3910
3911/**
3912 *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3913 *	@link: ATA link to manipulate SControl for
3914 *	@policy: LPM policy to configure
3915 *	@spm_wakeup: initiate LPM transition to active state
3916 *
3917 *	Manipulate the IPM field of the SControl register of @link
3918 *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3919 *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3920 *	the link.  This function also clears PHYRDY_CHG before
3921 *	returning.
3922 *
3923 *	LOCKING:
3924 *	EH context.
3925 *
3926 *	RETURNS:
3927 *	0 on success, -errno otherwise.
3928 */
3929int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3930		      bool spm_wakeup)
3931{
3932	struct ata_eh_context *ehc = &link->eh_context;
3933	bool woken_up = false;
3934	u32 scontrol;
3935	int rc;
3936
3937	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3938	if (rc)
3939		return rc;
3940
3941	switch (policy) {
3942	case ATA_LPM_MAX_POWER:
3943		/* disable all LPM transitions */
3944		scontrol |= (0x7 << 8);
3945		/* initiate transition to active state */
3946		if (spm_wakeup) {
3947			scontrol |= (0x4 << 12);
3948			woken_up = true;
3949		}
3950		break;
3951	case ATA_LPM_MED_POWER:
3952		/* allow LPM to PARTIAL */
3953		scontrol &= ~(0x1 << 8);
3954		scontrol |= (0x6 << 8);
3955		break;
3956	case ATA_LPM_MED_POWER_WITH_DIPM:
3957	case ATA_LPM_MIN_POWER_WITH_PARTIAL:
3958	case ATA_LPM_MIN_POWER:
3959		if (ata_link_nr_enabled(link) > 0)
3960			/* no restrictions on LPM transitions */
3961			scontrol &= ~(0x7 << 8);
3962		else {
3963			/* empty port, power off */
3964			scontrol &= ~0xf;
3965			scontrol |= (0x1 << 2);
3966		}
3967		break;
3968	default:
3969		WARN_ON(1);
3970	}
3971
3972	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3973	if (rc)
3974		return rc;
3975
3976	/* give the link time to transit out of LPM state */
3977	if (woken_up)
3978		msleep(10);
3979
3980	/* clear PHYRDY_CHG from SError */
3981	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3982	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3983}
3984
3985/**
3986 *	ata_std_prereset - prepare for reset
3987 *	@link: ATA link to be reset
3988 *	@deadline: deadline jiffies for the operation
3989 *
3990 *	@link is about to be reset.  Initialize it.  Failure from
3991 *	prereset makes libata abort whole reset sequence and give up
3992 *	that port, so prereset should be best-effort.  It does its
3993 *	best to prepare for reset sequence but if things go wrong, it
3994 *	should just whine, not fail.
3995 *
3996 *	LOCKING:
3997 *	Kernel thread context (may sleep)
3998 *
3999 *	RETURNS:
4000 *	0 on success, -errno otherwise.
4001 */
4002int ata_std_prereset(struct ata_link *link, unsigned long deadline)
4003{
4004	struct ata_port *ap = link->ap;
4005	struct ata_eh_context *ehc = &link->eh_context;
4006	const unsigned long *timing = sata_ehc_deb_timing(ehc);
4007	int rc;
4008
4009	/* if we're about to do hardreset, nothing more to do */
4010	if (ehc->i.action & ATA_EH_HARDRESET)
4011		return 0;
4012
4013	/* if SATA, resume link */
4014	if (ap->flags & ATA_FLAG_SATA) {
4015		rc = sata_link_resume(link, timing, deadline);
4016		/* whine about phy resume failure but proceed */
4017		if (rc && rc != -EOPNOTSUPP)
4018			ata_link_warn(link,
4019				      "failed to resume link for reset (errno=%d)\n",
4020				      rc);
4021	}
4022
4023	/* no point in trying softreset on offline link */
4024	if (ata_phys_link_offline(link))
4025		ehc->i.action &= ~ATA_EH_SOFTRESET;
4026
4027	return 0;
4028}
4029
4030/**
4031 *	sata_link_hardreset - reset link via SATA phy reset
4032 *	@link: link to reset
4033 *	@timing: timing parameters { interval, duration, timeout } in msec
4034 *	@deadline: deadline jiffies for the operation
4035 *	@online: optional out parameter indicating link onlineness
4036 *	@check_ready: optional callback to check link readiness
4037 *
4038 *	SATA phy-reset @link using DET bits of SControl register.
4039 *	After hardreset, link readiness is waited upon using
4040 *	ata_wait_ready() if @check_ready is specified.  LLDs are
4041 *	allowed to not specify @check_ready and wait itself after this
4042 *	function returns.  Device classification is LLD's
4043 *	responsibility.
4044 *
4045 *	*@online is set to one iff reset succeeded and @link is online
4046 *	after reset.
4047 *
4048 *	LOCKING:
4049 *	Kernel thread context (may sleep)
4050 *
4051 *	RETURNS:
4052 *	0 on success, -errno otherwise.
4053 */
4054int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
4055			unsigned long deadline,
4056			bool *online, int (*check_ready)(struct ata_link *))
4057{
4058	u32 scontrol;
4059	int rc;
4060
4061	DPRINTK("ENTER\n");
4062
4063	if (online)
4064		*online = false;
4065
4066	if (sata_set_spd_needed(link)) {
4067		/* SATA spec says nothing about how to reconfigure
4068		 * spd.  To be on the safe side, turn off phy during
4069		 * reconfiguration.  This works for at least ICH7 AHCI
4070		 * and Sil3124.
4071		 */
4072		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4073			goto out;
4074
4075		scontrol = (scontrol & 0x0f0) | 0x304;
4076
4077		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4078			goto out;
4079
4080		sata_set_spd(link);
4081	}
4082
4083	/* issue phy wake/reset */
4084	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4085		goto out;
4086
4087	scontrol = (scontrol & 0x0f0) | 0x301;
4088
4089	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4090		goto out;
4091
4092	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
4093	 * 10.4.2 says at least 1 ms.
4094	 */
4095	ata_msleep(link->ap, 1);
4096
4097	/* bring link back */
4098	rc = sata_link_resume(link, timing, deadline);
4099	if (rc)
4100		goto out;
4101	/* if link is offline nothing more to do */
4102	if (ata_phys_link_offline(link))
4103		goto out;
4104
4105	/* Link is online.  From this point, -ENODEV too is an error. */
4106	if (online)
4107		*online = true;
4108
4109	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
4110		/* If PMP is supported, we have to do follow-up SRST.
4111		 * Some PMPs don't send D2H Reg FIS after hardreset if
4112		 * the first port is empty.  Wait only for
4113		 * ATA_TMOUT_PMP_SRST_WAIT.
4114		 */
4115		if (check_ready) {
4116			unsigned long pmp_deadline;
4117
4118			pmp_deadline = ata_deadline(jiffies,
4119						    ATA_TMOUT_PMP_SRST_WAIT);
4120			if (time_after(pmp_deadline, deadline))
4121				pmp_deadline = deadline;
4122			ata_wait_ready(link, pmp_deadline, check_ready);
4123		}
4124		rc = -EAGAIN;
4125		goto out;
4126	}
4127
4128	rc = 0;
4129	if (check_ready)
4130		rc = ata_wait_ready(link, deadline, check_ready);
4131 out:
4132	if (rc && rc != -EAGAIN) {
4133		/* online is set iff link is online && reset succeeded */
4134		if (online)
4135			*online = false;
4136		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4137	}
4138	DPRINTK("EXIT, rc=%d\n", rc);
4139	return rc;
4140}
4141
4142/**
4143 *	sata_std_hardreset - COMRESET w/o waiting or classification
4144 *	@link: link to reset
4145 *	@class: resulting class of attached device
4146 *	@deadline: deadline jiffies for the operation
4147 *
4148 *	Standard SATA COMRESET w/o waiting or classification.
4149 *
4150 *	LOCKING:
4151 *	Kernel thread context (may sleep)
4152 *
4153 *	RETURNS:
4154 *	0 if link offline, -EAGAIN if link online, -errno on errors.
4155 */
4156int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4157		       unsigned long deadline)
4158{
4159	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4160	bool online;
4161	int rc;
4162
4163	/* do hardreset */
4164	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4165	return online ? -EAGAIN : rc;
4166}
 
4167
4168/**
4169 *	ata_std_postreset - standard postreset callback
4170 *	@link: the target ata_link
4171 *	@classes: classes of attached devices
4172 *
4173 *	This function is invoked after a successful reset.  Note that
4174 *	the device might have been reset more than once using
4175 *	different reset methods before postreset is invoked.
4176 *
4177 *	LOCKING:
4178 *	Kernel thread context (may sleep)
4179 */
4180void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4181{
4182	u32 serror;
4183
4184	DPRINTK("ENTER\n");
4185
4186	/* reset complete, clear SError */
4187	if (!sata_scr_read(link, SCR_ERROR, &serror))
4188		sata_scr_write(link, SCR_ERROR, serror);
4189
4190	/* print link status */
4191	sata_print_link_status(link);
4192
4193	DPRINTK("EXIT\n");
4194}
 
4195
4196/**
4197 *	ata_dev_same_device - Determine whether new ID matches configured device
4198 *	@dev: device to compare against
4199 *	@new_class: class of the new device
4200 *	@new_id: IDENTIFY page of the new device
4201 *
4202 *	Compare @new_class and @new_id against @dev and determine
4203 *	whether @dev is the device indicated by @new_class and
4204 *	@new_id.
4205 *
4206 *	LOCKING:
4207 *	None.
4208 *
4209 *	RETURNS:
4210 *	1 if @dev matches @new_class and @new_id, 0 otherwise.
4211 */
4212static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4213			       const u16 *new_id)
4214{
4215	const u16 *old_id = dev->id;
4216	unsigned char model[2][ATA_ID_PROD_LEN + 1];
4217	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4218
4219	if (dev->class != new_class) {
4220		ata_dev_info(dev, "class mismatch %d != %d\n",
4221			     dev->class, new_class);
4222		return 0;
4223	}
4224
4225	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4226	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4227	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4228	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4229
4230	if (strcmp(model[0], model[1])) {
4231		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4232			     model[0], model[1]);
4233		return 0;
4234	}
4235
4236	if (strcmp(serial[0], serial[1])) {
4237		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4238			     serial[0], serial[1]);
4239		return 0;
4240	}
4241
4242	return 1;
4243}
4244
4245/**
4246 *	ata_dev_reread_id - Re-read IDENTIFY data
4247 *	@dev: target ATA device
4248 *	@readid_flags: read ID flags
4249 *
4250 *	Re-read IDENTIFY page and make sure @dev is still attached to
4251 *	the port.
4252 *
4253 *	LOCKING:
4254 *	Kernel thread context (may sleep)
4255 *
4256 *	RETURNS:
4257 *	0 on success, negative errno otherwise
4258 */
4259int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4260{
4261	unsigned int class = dev->class;
4262	u16 *id = (void *)dev->link->ap->sector_buf;
4263	int rc;
4264
4265	/* read ID data */
4266	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4267	if (rc)
4268		return rc;
4269
4270	/* is the device still there? */
4271	if (!ata_dev_same_device(dev, class, id))
4272		return -ENODEV;
4273
4274	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4275	return 0;
4276}
4277
4278/**
4279 *	ata_dev_revalidate - Revalidate ATA device
4280 *	@dev: device to revalidate
4281 *	@new_class: new class code
4282 *	@readid_flags: read ID flags
4283 *
4284 *	Re-read IDENTIFY page, make sure @dev is still attached to the
4285 *	port and reconfigure it according to the new IDENTIFY page.
4286 *
4287 *	LOCKING:
4288 *	Kernel thread context (may sleep)
4289 *
4290 *	RETURNS:
4291 *	0 on success, negative errno otherwise
4292 */
4293int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4294		       unsigned int readid_flags)
4295{
4296	u64 n_sectors = dev->n_sectors;
4297	u64 n_native_sectors = dev->n_native_sectors;
4298	int rc;
4299
4300	if (!ata_dev_enabled(dev))
4301		return -ENODEV;
4302
4303	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4304	if (ata_class_enabled(new_class) &&
4305	    new_class != ATA_DEV_ATA &&
4306	    new_class != ATA_DEV_ATAPI &&
4307	    new_class != ATA_DEV_ZAC &&
4308	    new_class != ATA_DEV_SEMB) {
4309		ata_dev_info(dev, "class mismatch %u != %u\n",
4310			     dev->class, new_class);
4311		rc = -ENODEV;
4312		goto fail;
4313	}
4314
4315	/* re-read ID */
4316	rc = ata_dev_reread_id(dev, readid_flags);
4317	if (rc)
4318		goto fail;
4319
4320	/* configure device according to the new ID */
4321	rc = ata_dev_configure(dev);
4322	if (rc)
4323		goto fail;
4324
4325	/* verify n_sectors hasn't changed */
4326	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4327	    dev->n_sectors == n_sectors)
4328		return 0;
4329
4330	/* n_sectors has changed */
4331	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4332		     (unsigned long long)n_sectors,
4333		     (unsigned long long)dev->n_sectors);
4334
4335	/*
4336	 * Something could have caused HPA to be unlocked
4337	 * involuntarily.  If n_native_sectors hasn't changed and the
4338	 * new size matches it, keep the device.
4339	 */
4340	if (dev->n_native_sectors == n_native_sectors &&
4341	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4342		ata_dev_warn(dev,
4343			     "new n_sectors matches native, probably "
4344			     "late HPA unlock, n_sectors updated\n");
4345		/* use the larger n_sectors */
4346		return 0;
4347	}
4348
4349	/*
4350	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4351	 * unlocking HPA in those cases.
4352	 *
4353	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4354	 */
4355	if (dev->n_native_sectors == n_native_sectors &&
4356	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4357	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4358		ata_dev_warn(dev,
4359			     "old n_sectors matches native, probably "
4360			     "late HPA lock, will try to unlock HPA\n");
4361		/* try unlocking HPA */
4362		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4363		rc = -EIO;
4364	} else
4365		rc = -ENODEV;
4366
4367	/* restore original n_[native_]sectors and fail */
4368	dev->n_native_sectors = n_native_sectors;
4369	dev->n_sectors = n_sectors;
4370 fail:
4371	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4372	return rc;
4373}
4374
4375struct ata_blacklist_entry {
4376	const char *model_num;
4377	const char *model_rev;
4378	unsigned long horkage;
4379};
4380
4381static const struct ata_blacklist_entry ata_device_blacklist [] = {
4382	/* Devices with DMA related problems under Linux */
4383	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4384	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4385	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4386	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4387	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4388	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4389	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4390	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4391	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4392	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4393	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4394	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4395	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4396	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4397	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4398	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4399	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4400	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4401	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4402	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4403	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4404	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4405	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4406	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4407	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4408	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4409	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4410	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4411	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4412	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
4413	/* Odd clown on sil3726/4726 PMPs */
4414	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4415
4416	/* Weird ATAPI devices */
4417	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4418	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4419	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4420	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4421
4422	/*
4423	 * Causes silent data corruption with higher max sects.
4424	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4425	 */
4426	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
4427
4428	/*
4429	 * These devices time out with higher max sects.
4430	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4431	 */
4432	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4433	{ "LITEON EP1-*",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4434
4435	/* Devices we expect to fail diagnostics */
4436
4437	/* Devices where NCQ should be avoided */
4438	/* NCQ is slow */
4439	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4440	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4441	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4442	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4443	/* NCQ is broken */
4444	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4445	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4446	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4447	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4448	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4449
4450	/* Seagate NCQ + FLUSH CACHE firmware bug */
4451	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4452						ATA_HORKAGE_FIRMWARE_WARN },
4453
4454	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4455						ATA_HORKAGE_FIRMWARE_WARN },
4456
4457	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4458						ATA_HORKAGE_FIRMWARE_WARN },
4459
4460	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4461						ATA_HORKAGE_FIRMWARE_WARN },
4462
4463	/* drives which fail FPDMA_AA activation (some may freeze afterwards)
4464	   the ST disks also have LPM issues */
4465	{ "ST1000LM024 HN-M101MBB", NULL,	ATA_HORKAGE_BROKEN_FPDMA_AA |
4466						ATA_HORKAGE_NOLPM, },
4467	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
4468
4469	/* Blacklist entries taken from Silicon Image 3124/3132
4470	   Windows driver .inf file - also several Linux problem reports */
4471	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4472	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4473	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4474
4475	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4476	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4477
4478	/* Some Sandisk SSDs lock up hard with NCQ enabled.  Reported on
4479	   SD7SN6S256G and SD8SN8U256G */
4480	{ "SanDisk SD[78]SN*G",	NULL,		ATA_HORKAGE_NONCQ, },
4481
4482	/* devices which puke on READ_NATIVE_MAX */
4483	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4484	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4485	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4486	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4487
4488	/* this one allows HPA unlocking but fails IOs on the area */
4489	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4490
4491	/* Devices which report 1 sector over size HPA */
4492	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4493	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4494	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4495
4496	/* Devices which get the IVB wrong */
4497	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4498	/* Maybe we should just blacklist TSSTcorp... */
4499	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4500
4501	/* Devices that do not need bridging limits applied */
4502	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4503	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4504
4505	/* Devices which aren't very happy with higher link speeds */
4506	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4507	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
4508
4509	/*
4510	 * Devices which choke on SETXFER.  Applies only if both the
4511	 * device and controller are SATA.
4512	 */
4513	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4514	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4515	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4516	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4517	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4518
4519	/* Crucial BX100 SSD 500GB has broken LPM support */
4520	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
4521
4522	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4523	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4524						ATA_HORKAGE_ZERO_AFTER_TRIM |
4525						ATA_HORKAGE_NOLPM, },
4526	/* 512GB MX100 with newer firmware has only LPM issues */
4527	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
4528						ATA_HORKAGE_NOLPM, },
4529
4530	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4531	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4532						ATA_HORKAGE_ZERO_AFTER_TRIM |
4533						ATA_HORKAGE_NOLPM, },
4534	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4535						ATA_HORKAGE_ZERO_AFTER_TRIM |
4536						ATA_HORKAGE_NOLPM, },
4537
4538	/* These specific Samsung models/firmware-revs do not handle LPM well */
4539	{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
4540	{ "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM, },
4541	{ "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM, },
4542	{ "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
4543
4544	/* devices that don't properly handle queued TRIM commands */
4545	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4546						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4547	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4548						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4549	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4550						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4551	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4552						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4553	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4554						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4555	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4556						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4557	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4558						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4559	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4560						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 
 
 
 
 
 
4561	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4562						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4563
4564	/* devices that don't properly handle TRIM commands */
4565	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
4566
4567	/*
4568	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4569	 * (Return Zero After Trim) flags in the ATA Command Set are
4570	 * unreliable in the sense that they only define what happens if
4571	 * the device successfully executed the DSM TRIM command. TRIM
4572	 * is only advisory, however, and the device is free to silently
4573	 * ignore all or parts of the request.
4574	 *
4575	 * Whitelist drives that are known to reliably return zeroes
4576	 * after TRIM.
4577	 */
4578
4579	/*
4580	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4581	 * that model before whitelisting all other intel SSDs.
4582	 */
4583	{ "INTEL*SSDSC2MH*",		NULL,	0, },
4584
4585	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4586	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4587	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4588	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4589	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4590	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4591	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4592	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4593
4594	/*
4595	 * Some WD SATA-I drives spin up and down erratically when the link
4596	 * is put into the slumber mode.  We don't have full list of the
4597	 * affected devices.  Disable LPM if the device matches one of the
4598	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4599	 * lost too.
4600	 *
4601	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4602	 */
4603	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4604	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4605	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4606	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4607	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4608	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4609	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4610
4611	/* End Marker */
4612	{ }
4613};
4614
4615static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4616{
4617	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4618	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4619	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4620
4621	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4622	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4623
4624	while (ad->model_num) {
4625		if (glob_match(ad->model_num, model_num)) {
4626			if (ad->model_rev == NULL)
4627				return ad->horkage;
4628			if (glob_match(ad->model_rev, model_rev))
4629				return ad->horkage;
4630		}
4631		ad++;
4632	}
4633	return 0;
4634}
4635
4636static int ata_dma_blacklisted(const struct ata_device *dev)
4637{
4638	/* We don't support polling DMA.
4639	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4640	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4641	 */
4642	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4643	    (dev->flags & ATA_DFLAG_CDB_INTR))
4644		return 1;
4645	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4646}
4647
4648/**
4649 *	ata_is_40wire		-	check drive side detection
4650 *	@dev: device
4651 *
4652 *	Perform drive side detection decoding, allowing for device vendors
4653 *	who can't follow the documentation.
4654 */
4655
4656static int ata_is_40wire(struct ata_device *dev)
4657{
4658	if (dev->horkage & ATA_HORKAGE_IVB)
4659		return ata_drive_40wire_relaxed(dev->id);
4660	return ata_drive_40wire(dev->id);
4661}
4662
4663/**
4664 *	cable_is_40wire		-	40/80/SATA decider
4665 *	@ap: port to consider
4666 *
4667 *	This function encapsulates the policy for speed management
4668 *	in one place. At the moment we don't cache the result but
4669 *	there is a good case for setting ap->cbl to the result when
4670 *	we are called with unknown cables (and figuring out if it
4671 *	impacts hotplug at all).
4672 *
4673 *	Return 1 if the cable appears to be 40 wire.
4674 */
4675
4676static int cable_is_40wire(struct ata_port *ap)
4677{
4678	struct ata_link *link;
4679	struct ata_device *dev;
4680
4681	/* If the controller thinks we are 40 wire, we are. */
4682	if (ap->cbl == ATA_CBL_PATA40)
4683		return 1;
4684
4685	/* If the controller thinks we are 80 wire, we are. */
4686	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4687		return 0;
4688
4689	/* If the system is known to be 40 wire short cable (eg
4690	 * laptop), then we allow 80 wire modes even if the drive
4691	 * isn't sure.
4692	 */
4693	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4694		return 0;
4695
4696	/* If the controller doesn't know, we scan.
4697	 *
4698	 * Note: We look for all 40 wire detects at this point.  Any
4699	 *       80 wire detect is taken to be 80 wire cable because
4700	 * - in many setups only the one drive (slave if present) will
4701	 *   give a valid detect
4702	 * - if you have a non detect capable drive you don't want it
4703	 *   to colour the choice
4704	 */
4705	ata_for_each_link(link, ap, EDGE) {
4706		ata_for_each_dev(dev, link, ENABLED) {
4707			if (!ata_is_40wire(dev))
4708				return 0;
4709		}
4710	}
4711	return 1;
4712}
4713
4714/**
4715 *	ata_dev_xfermask - Compute supported xfermask of the given device
4716 *	@dev: Device to compute xfermask for
4717 *
4718 *	Compute supported xfermask of @dev and store it in
4719 *	dev->*_mask.  This function is responsible for applying all
4720 *	known limits including host controller limits, device
4721 *	blacklist, etc...
4722 *
4723 *	LOCKING:
4724 *	None.
4725 */
4726static void ata_dev_xfermask(struct ata_device *dev)
4727{
4728	struct ata_link *link = dev->link;
4729	struct ata_port *ap = link->ap;
4730	struct ata_host *host = ap->host;
4731	unsigned long xfer_mask;
4732
4733	/* controller modes available */
4734	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4735				      ap->mwdma_mask, ap->udma_mask);
4736
4737	/* drive modes available */
4738	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4739				       dev->mwdma_mask, dev->udma_mask);
4740	xfer_mask &= ata_id_xfermask(dev->id);
4741
4742	/*
4743	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4744	 *	cable
4745	 */
4746	if (ata_dev_pair(dev)) {
4747		/* No PIO5 or PIO6 */
4748		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4749		/* No MWDMA3 or MWDMA 4 */
4750		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4751	}
4752
4753	if (ata_dma_blacklisted(dev)) {
4754		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4755		ata_dev_warn(dev,
4756			     "device is on DMA blacklist, disabling DMA\n");
4757	}
4758
4759	if ((host->flags & ATA_HOST_SIMPLEX) &&
4760	    host->simplex_claimed && host->simplex_claimed != ap) {
4761		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4762		ata_dev_warn(dev,
4763			     "simplex DMA is claimed by other device, disabling DMA\n");
4764	}
4765
4766	if (ap->flags & ATA_FLAG_NO_IORDY)
4767		xfer_mask &= ata_pio_mask_no_iordy(dev);
4768
4769	if (ap->ops->mode_filter)
4770		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4771
4772	/* Apply cable rule here.  Don't apply it early because when
4773	 * we handle hot plug the cable type can itself change.
4774	 * Check this last so that we know if the transfer rate was
4775	 * solely limited by the cable.
4776	 * Unknown or 80 wire cables reported host side are checked
4777	 * drive side as well. Cases where we know a 40wire cable
4778	 * is used safely for 80 are not checked here.
4779	 */
4780	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4781		/* UDMA/44 or higher would be available */
4782		if (cable_is_40wire(ap)) {
4783			ata_dev_warn(dev,
4784				     "limited to UDMA/33 due to 40-wire cable\n");
4785			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4786		}
4787
4788	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4789			    &dev->mwdma_mask, &dev->udma_mask);
4790}
4791
4792/**
4793 *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4794 *	@dev: Device to which command will be sent
4795 *
4796 *	Issue SET FEATURES - XFER MODE command to device @dev
4797 *	on port @ap.
4798 *
4799 *	LOCKING:
4800 *	PCI/etc. bus probe sem.
4801 *
4802 *	RETURNS:
4803 *	0 on success, AC_ERR_* mask otherwise.
4804 */
4805
4806static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4807{
4808	struct ata_taskfile tf;
4809	unsigned int err_mask;
4810
4811	/* set up set-features taskfile */
4812	DPRINTK("set features - xfer mode\n");
4813
4814	/* Some controllers and ATAPI devices show flaky interrupt
4815	 * behavior after setting xfer mode.  Use polling instead.
4816	 */
4817	ata_tf_init(dev, &tf);
4818	tf.command = ATA_CMD_SET_FEATURES;
4819	tf.feature = SETFEATURES_XFER;
4820	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4821	tf.protocol = ATA_PROT_NODATA;
4822	/* If we are using IORDY we must send the mode setting command */
4823	if (ata_pio_need_iordy(dev))
4824		tf.nsect = dev->xfer_mode;
4825	/* If the device has IORDY and the controller does not - turn it off */
4826 	else if (ata_id_has_iordy(dev->id))
4827		tf.nsect = 0x01;
4828	else /* In the ancient relic department - skip all of this */
4829		return 0;
4830
4831	/* On some disks, this command causes spin-up, so we need longer timeout */
4832	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4833
4834	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4835	return err_mask;
4836}
4837
4838/**
4839 *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4840 *	@dev: Device to which command will be sent
4841 *	@enable: Whether to enable or disable the feature
4842 *	@feature: The sector count represents the feature to set
4843 *
4844 *	Issue SET FEATURES - SATA FEATURES command to device @dev
4845 *	on port @ap with sector count
4846 *
4847 *	LOCKING:
4848 *	PCI/etc. bus probe sem.
4849 *
4850 *	RETURNS:
4851 *	0 on success, AC_ERR_* mask otherwise.
4852 */
4853unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4854{
4855	struct ata_taskfile tf;
4856	unsigned int err_mask;
4857	unsigned long timeout = 0;
4858
4859	/* set up set-features taskfile */
4860	DPRINTK("set features - SATA features\n");
4861
4862	ata_tf_init(dev, &tf);
4863	tf.command = ATA_CMD_SET_FEATURES;
4864	tf.feature = enable;
4865	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4866	tf.protocol = ATA_PROT_NODATA;
4867	tf.nsect = feature;
4868
4869	if (enable == SETFEATURES_SPINUP)
4870		timeout = ata_probe_timeout ?
4871			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4872	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4873
4874	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4875	return err_mask;
4876}
4877EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4878
4879/**
4880 *	ata_dev_init_params - Issue INIT DEV PARAMS command
4881 *	@dev: Device to which command will be sent
4882 *	@heads: Number of heads (taskfile parameter)
4883 *	@sectors: Number of sectors (taskfile parameter)
4884 *
4885 *	LOCKING:
4886 *	Kernel thread context (may sleep)
4887 *
4888 *	RETURNS:
4889 *	0 on success, AC_ERR_* mask otherwise.
4890 */
4891static unsigned int ata_dev_init_params(struct ata_device *dev,
4892					u16 heads, u16 sectors)
4893{
4894	struct ata_taskfile tf;
4895	unsigned int err_mask;
4896
4897	/* Number of sectors per track 1-255. Number of heads 1-16 */
4898	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4899		return AC_ERR_INVALID;
4900
4901	/* set up init dev params taskfile */
4902	DPRINTK("init dev params \n");
4903
4904	ata_tf_init(dev, &tf);
4905	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4906	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4907	tf.protocol = ATA_PROT_NODATA;
4908	tf.nsect = sectors;
4909	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4910
4911	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4912	/* A clean abort indicates an original or just out of spec drive
4913	   and we should continue as we issue the setup based on the
4914	   drive reported working geometry */
4915	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4916		err_mask = 0;
4917
4918	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4919	return err_mask;
4920}
4921
4922/**
4923 *	atapi_check_dma - Check whether ATAPI DMA can be supported
4924 *	@qc: Metadata associated with taskfile to check
4925 *
4926 *	Allow low-level driver to filter ATA PACKET commands, returning
4927 *	a status indicating whether or not it is OK to use DMA for the
4928 *	supplied PACKET command.
4929 *
4930 *	LOCKING:
4931 *	spin_lock_irqsave(host lock)
4932 *
4933 *	RETURNS: 0 when ATAPI DMA can be used
4934 *               nonzero otherwise
4935 */
4936int atapi_check_dma(struct ata_queued_cmd *qc)
4937{
4938	struct ata_port *ap = qc->ap;
4939
4940	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4941	 * few ATAPI devices choke on such DMA requests.
4942	 */
4943	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4944	    unlikely(qc->nbytes & 15))
4945		return 1;
4946
4947	if (ap->ops->check_atapi_dma)
4948		return ap->ops->check_atapi_dma(qc);
4949
4950	return 0;
4951}
4952
4953/**
4954 *	ata_std_qc_defer - Check whether a qc needs to be deferred
4955 *	@qc: ATA command in question
4956 *
4957 *	Non-NCQ commands cannot run with any other command, NCQ or
4958 *	not.  As upper layer only knows the queue depth, we are
4959 *	responsible for maintaining exclusion.  This function checks
4960 *	whether a new command @qc can be issued.
4961 *
4962 *	LOCKING:
4963 *	spin_lock_irqsave(host lock)
4964 *
4965 *	RETURNS:
4966 *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4967 */
4968int ata_std_qc_defer(struct ata_queued_cmd *qc)
4969{
4970	struct ata_link *link = qc->dev->link;
4971
4972	if (ata_is_ncq(qc->tf.protocol)) {
4973		if (!ata_tag_valid(link->active_tag))
4974			return 0;
4975	} else {
4976		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4977			return 0;
4978	}
4979
4980	return ATA_DEFER_LINK;
4981}
 
4982
4983void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
 
 
 
 
4984
4985/**
4986 *	ata_sg_init - Associate command with scatter-gather table.
4987 *	@qc: Command to be associated
4988 *	@sg: Scatter-gather table.
4989 *	@n_elem: Number of elements in s/g table.
4990 *
4991 *	Initialize the data-related elements of queued_cmd @qc
4992 *	to point to a scatter-gather table @sg, containing @n_elem
4993 *	elements.
4994 *
4995 *	LOCKING:
4996 *	spin_lock_irqsave(host lock)
4997 */
4998void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4999		 unsigned int n_elem)
5000{
5001	qc->sg = sg;
5002	qc->n_elem = n_elem;
5003	qc->cursg = qc->sg;
5004}
5005
5006#ifdef CONFIG_HAS_DMA
5007
5008/**
5009 *	ata_sg_clean - Unmap DMA memory associated with command
5010 *	@qc: Command containing DMA memory to be released
5011 *
5012 *	Unmap all mapped DMA memory associated with this command.
5013 *
5014 *	LOCKING:
5015 *	spin_lock_irqsave(host lock)
5016 */
5017static void ata_sg_clean(struct ata_queued_cmd *qc)
5018{
5019	struct ata_port *ap = qc->ap;
5020	struct scatterlist *sg = qc->sg;
5021	int dir = qc->dma_dir;
5022
5023	WARN_ON_ONCE(sg == NULL);
5024
5025	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
5026
5027	if (qc->n_elem)
5028		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
5029
5030	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5031	qc->sg = NULL;
5032}
5033
5034/**
5035 *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
5036 *	@qc: Command with scatter-gather table to be mapped.
5037 *
5038 *	DMA-map the scatter-gather table associated with queued_cmd @qc.
5039 *
5040 *	LOCKING:
5041 *	spin_lock_irqsave(host lock)
5042 *
5043 *	RETURNS:
5044 *	Zero on success, negative on error.
5045 *
5046 */
5047static int ata_sg_setup(struct ata_queued_cmd *qc)
5048{
5049	struct ata_port *ap = qc->ap;
5050	unsigned int n_elem;
5051
5052	VPRINTK("ENTER, ata%u\n", ap->print_id);
5053
5054	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5055	if (n_elem < 1)
5056		return -1;
5057
5058	VPRINTK("%d sg elements mapped\n", n_elem);
5059	qc->orig_n_elem = qc->n_elem;
5060	qc->n_elem = n_elem;
5061	qc->flags |= ATA_QCFLAG_DMAMAP;
5062
5063	return 0;
5064}
5065
5066#else /* !CONFIG_HAS_DMA */
5067
5068static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
5069static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
5070
5071#endif /* !CONFIG_HAS_DMA */
5072
5073/**
5074 *	swap_buf_le16 - swap halves of 16-bit words in place
5075 *	@buf:  Buffer to swap
5076 *	@buf_words:  Number of 16-bit words in buffer.
5077 *
5078 *	Swap halves of 16-bit words if needed to convert from
5079 *	little-endian byte order to native cpu byte order, or
5080 *	vice-versa.
5081 *
5082 *	LOCKING:
5083 *	Inherited from caller.
5084 */
5085void swap_buf_le16(u16 *buf, unsigned int buf_words)
5086{
5087#ifdef __BIG_ENDIAN
5088	unsigned int i;
5089
5090	for (i = 0; i < buf_words; i++)
5091		buf[i] = le16_to_cpu(buf[i]);
5092#endif /* __BIG_ENDIAN */
5093}
5094
5095/**
5096 *	ata_qc_new_init - Request an available ATA command, and initialize it
5097 *	@dev: Device from whom we request an available command structure
5098 *	@tag: tag
5099 *
5100 *	LOCKING:
5101 *	None.
5102 */
5103
5104struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
5105{
5106	struct ata_port *ap = dev->link->ap;
5107	struct ata_queued_cmd *qc;
5108
5109	/* no command while frozen */
5110	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5111		return NULL;
5112
5113	/* libsas case */
5114	if (ap->flags & ATA_FLAG_SAS_HOST) {
5115		tag = ata_sas_allocate_tag(ap);
5116		if (tag < 0)
5117			return NULL;
5118	}
5119
5120	qc = __ata_qc_from_tag(ap, tag);
5121	qc->tag = qc->hw_tag = tag;
5122	qc->scsicmd = NULL;
5123	qc->ap = ap;
5124	qc->dev = dev;
5125
5126	ata_qc_reinit(qc);
5127
5128	return qc;
5129}
5130
5131/**
5132 *	ata_qc_free - free unused ata_queued_cmd
5133 *	@qc: Command to complete
5134 *
5135 *	Designed to free unused ata_queued_cmd object
5136 *	in case something prevents using it.
5137 *
5138 *	LOCKING:
5139 *	spin_lock_irqsave(host lock)
5140 */
5141void ata_qc_free(struct ata_queued_cmd *qc)
5142{
5143	struct ata_port *ap;
5144	unsigned int tag;
5145
5146	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5147	ap = qc->ap;
5148
5149	qc->flags = 0;
5150	tag = qc->tag;
5151	if (ata_tag_valid(tag)) {
5152		qc->tag = ATA_TAG_POISON;
5153		if (ap->flags & ATA_FLAG_SAS_HOST)
5154			ata_sas_free_tag(tag, ap);
5155	}
5156}
5157
5158void __ata_qc_complete(struct ata_queued_cmd *qc)
5159{
5160	struct ata_port *ap;
5161	struct ata_link *link;
5162
5163	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5164	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5165	ap = qc->ap;
5166	link = qc->dev->link;
5167
5168	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5169		ata_sg_clean(qc);
5170
5171	/* command should be marked inactive atomically with qc completion */
5172	if (ata_is_ncq(qc->tf.protocol)) {
5173		link->sactive &= ~(1 << qc->hw_tag);
5174		if (!link->sactive)
5175			ap->nr_active_links--;
5176	} else {
5177		link->active_tag = ATA_TAG_POISON;
5178		ap->nr_active_links--;
5179	}
5180
5181	/* clear exclusive status */
5182	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5183		     ap->excl_link == link))
5184		ap->excl_link = NULL;
5185
5186	/* atapi: mark qc as inactive to prevent the interrupt handler
5187	 * from completing the command twice later, before the error handler
5188	 * is called. (when rc != 0 and atapi request sense is needed)
5189	 */
5190	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5191	ap->qc_active &= ~(1ULL << qc->tag);
5192
5193	/* call completion callback */
5194	qc->complete_fn(qc);
5195}
5196
5197static void fill_result_tf(struct ata_queued_cmd *qc)
5198{
5199	struct ata_port *ap = qc->ap;
5200
5201	qc->result_tf.flags = qc->tf.flags;
5202	ap->ops->qc_fill_rtf(qc);
5203}
5204
5205static void ata_verify_xfer(struct ata_queued_cmd *qc)
5206{
5207	struct ata_device *dev = qc->dev;
5208
5209	if (!ata_is_data(qc->tf.protocol))
5210		return;
5211
5212	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5213		return;
5214
5215	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5216}
5217
5218/**
5219 *	ata_qc_complete - Complete an active ATA command
5220 *	@qc: Command to complete
5221 *
5222 *	Indicate to the mid and upper layers that an ATA command has
5223 *	completed, with either an ok or not-ok status.
5224 *
5225 *	Refrain from calling this function multiple times when
5226 *	successfully completing multiple NCQ commands.
5227 *	ata_qc_complete_multiple() should be used instead, which will
5228 *	properly update IRQ expect state.
5229 *
5230 *	LOCKING:
5231 *	spin_lock_irqsave(host lock)
5232 */
5233void ata_qc_complete(struct ata_queued_cmd *qc)
5234{
5235	struct ata_port *ap = qc->ap;
5236
5237	/* Trigger the LED (if available) */
5238	ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
5239
5240	/* XXX: New EH and old EH use different mechanisms to
5241	 * synchronize EH with regular execution path.
5242	 *
5243	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5244	 * Normal execution path is responsible for not accessing a
5245	 * failed qc.  libata core enforces the rule by returning NULL
5246	 * from ata_qc_from_tag() for failed qcs.
5247	 *
5248	 * Old EH depends on ata_qc_complete() nullifying completion
5249	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5250	 * not synchronize with interrupt handler.  Only PIO task is
5251	 * taken care of.
5252	 */
5253	if (ap->ops->error_handler) {
5254		struct ata_device *dev = qc->dev;
5255		struct ata_eh_info *ehi = &dev->link->eh_info;
5256
5257		if (unlikely(qc->err_mask))
5258			qc->flags |= ATA_QCFLAG_FAILED;
5259
5260		/*
5261		 * Finish internal commands without any further processing
5262		 * and always with the result TF filled.
5263		 */
5264		if (unlikely(ata_tag_internal(qc->tag))) {
5265			fill_result_tf(qc);
5266			trace_ata_qc_complete_internal(qc);
5267			__ata_qc_complete(qc);
5268			return;
5269		}
5270
5271		/*
5272		 * Non-internal qc has failed.  Fill the result TF and
5273		 * summon EH.
5274		 */
5275		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5276			fill_result_tf(qc);
5277			trace_ata_qc_complete_failed(qc);
5278			ata_qc_schedule_eh(qc);
5279			return;
5280		}
5281
5282		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5283
5284		/* read result TF if requested */
5285		if (qc->flags & ATA_QCFLAG_RESULT_TF)
5286			fill_result_tf(qc);
5287
5288		trace_ata_qc_complete_done(qc);
5289		/* Some commands need post-processing after successful
5290		 * completion.
5291		 */
5292		switch (qc->tf.command) {
5293		case ATA_CMD_SET_FEATURES:
5294			if (qc->tf.feature != SETFEATURES_WC_ON &&
5295			    qc->tf.feature != SETFEATURES_WC_OFF &&
5296			    qc->tf.feature != SETFEATURES_RA_ON &&
5297			    qc->tf.feature != SETFEATURES_RA_OFF)
5298				break;
5299			/* fall through */
5300		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5301		case ATA_CMD_SET_MULTI: /* multi_count changed */
5302			/* revalidate device */
5303			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5304			ata_port_schedule_eh(ap);
5305			break;
5306
5307		case ATA_CMD_SLEEP:
5308			dev->flags |= ATA_DFLAG_SLEEPING;
5309			break;
5310		}
5311
5312		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5313			ata_verify_xfer(qc);
5314
5315		__ata_qc_complete(qc);
5316	} else {
5317		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5318			return;
5319
5320		/* read result TF if failed or requested */
5321		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5322			fill_result_tf(qc);
5323
5324		__ata_qc_complete(qc);
5325	}
5326}
 
5327
5328/**
5329 *	ata_qc_complete_multiple - Complete multiple qcs successfully
5330 *	@ap: port in question
5331 *	@qc_active: new qc_active mask
5332 *
5333 *	Complete in-flight commands.  This functions is meant to be
5334 *	called from low-level driver's interrupt routine to complete
5335 *	requests normally.  ap->qc_active and @qc_active is compared
5336 *	and commands are completed accordingly.
5337 *
5338 *	Always use this function when completing multiple NCQ commands
5339 *	from IRQ handlers instead of calling ata_qc_complete()
5340 *	multiple times to keep IRQ expect status properly in sync.
5341 *
5342 *	LOCKING:
5343 *	spin_lock_irqsave(host lock)
5344 *
5345 *	RETURNS:
5346 *	Number of completed commands on success, -errno otherwise.
5347 */
5348int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
5349{
5350	u64 done_mask, ap_qc_active = ap->qc_active;
5351	int nr_done = 0;
5352
5353	/*
5354	 * If the internal tag is set on ap->qc_active, then we care about
5355	 * bit0 on the passed in qc_active mask. Move that bit up to match
5356	 * the internal tag.
5357	 */
5358	if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
5359		qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
5360		qc_active ^= qc_active & 0x01;
5361	}
5362
5363	done_mask = ap_qc_active ^ qc_active;
5364
5365	if (unlikely(done_mask & qc_active)) {
5366		ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
5367			     ap->qc_active, qc_active);
5368		return -EINVAL;
5369	}
5370
5371	while (done_mask) {
5372		struct ata_queued_cmd *qc;
5373		unsigned int tag = __ffs64(done_mask);
5374
5375		qc = ata_qc_from_tag(ap, tag);
5376		if (qc) {
5377			ata_qc_complete(qc);
5378			nr_done++;
5379		}
5380		done_mask &= ~(1ULL << tag);
5381	}
5382
5383	return nr_done;
5384}
 
5385
5386/**
5387 *	ata_qc_issue - issue taskfile to device
5388 *	@qc: command to issue to device
5389 *
5390 *	Prepare an ATA command to submission to device.
5391 *	This includes mapping the data into a DMA-able
5392 *	area, filling in the S/G table, and finally
5393 *	writing the taskfile to hardware, starting the command.
5394 *
5395 *	LOCKING:
5396 *	spin_lock_irqsave(host lock)
5397 */
5398void ata_qc_issue(struct ata_queued_cmd *qc)
5399{
5400	struct ata_port *ap = qc->ap;
5401	struct ata_link *link = qc->dev->link;
5402	u8 prot = qc->tf.protocol;
5403
5404	/* Make sure only one non-NCQ command is outstanding.  The
5405	 * check is skipped for old EH because it reuses active qc to
5406	 * request ATAPI sense.
5407	 */
5408	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5409
5410	if (ata_is_ncq(prot)) {
5411		WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
5412
5413		if (!link->sactive)
5414			ap->nr_active_links++;
5415		link->sactive |= 1 << qc->hw_tag;
5416	} else {
5417		WARN_ON_ONCE(link->sactive);
5418
5419		ap->nr_active_links++;
5420		link->active_tag = qc->tag;
5421	}
5422
5423	qc->flags |= ATA_QCFLAG_ACTIVE;
5424	ap->qc_active |= 1ULL << qc->tag;
5425
5426	/*
5427	 * We guarantee to LLDs that they will have at least one
5428	 * non-zero sg if the command is a data command.
5429	 */
5430	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5431		goto sys_err;
5432
5433	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5434				 (ap->flags & ATA_FLAG_PIO_DMA)))
5435		if (ata_sg_setup(qc))
5436			goto sys_err;
5437
5438	/* if device is sleeping, schedule reset and abort the link */
5439	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5440		link->eh_info.action |= ATA_EH_RESET;
5441		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5442		ata_link_abort(link);
5443		return;
5444	}
5445
5446	ap->ops->qc_prep(qc);
 
 
5447	trace_ata_qc_issue(qc);
5448	qc->err_mask |= ap->ops->qc_issue(qc);
5449	if (unlikely(qc->err_mask))
5450		goto err;
5451	return;
5452
5453sys_err:
5454	qc->err_mask |= AC_ERR_SYSTEM;
5455err:
5456	ata_qc_complete(qc);
5457}
5458
5459/**
5460 *	sata_scr_valid - test whether SCRs are accessible
5461 *	@link: ATA link to test SCR accessibility for
5462 *
5463 *	Test whether SCRs are accessible for @link.
5464 *
5465 *	LOCKING:
5466 *	None.
5467 *
5468 *	RETURNS:
5469 *	1 if SCRs are accessible, 0 otherwise.
5470 */
5471int sata_scr_valid(struct ata_link *link)
5472{
5473	struct ata_port *ap = link->ap;
5474
5475	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5476}
5477
5478/**
5479 *	sata_scr_read - read SCR register of the specified port
5480 *	@link: ATA link to read SCR for
5481 *	@reg: SCR to read
5482 *	@val: Place to store read value
5483 *
5484 *	Read SCR register @reg of @link into *@val.  This function is
5485 *	guaranteed to succeed if @link is ap->link, the cable type of
5486 *	the port is SATA and the port implements ->scr_read.
5487 *
5488 *	LOCKING:
5489 *	None if @link is ap->link.  Kernel thread context otherwise.
5490 *
5491 *	RETURNS:
5492 *	0 on success, negative errno on failure.
5493 */
5494int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5495{
5496	if (ata_is_host_link(link)) {
5497		if (sata_scr_valid(link))
5498			return link->ap->ops->scr_read(link, reg, val);
5499		return -EOPNOTSUPP;
5500	}
5501
5502	return sata_pmp_scr_read(link, reg, val);
5503}
5504
5505/**
5506 *	sata_scr_write - write SCR register of the specified port
5507 *	@link: ATA link to write SCR for
5508 *	@reg: SCR to write
5509 *	@val: value to write
5510 *
5511 *	Write @val to SCR register @reg of @link.  This function is
5512 *	guaranteed to succeed if @link is ap->link, the cable type of
5513 *	the port is SATA and the port implements ->scr_read.
5514 *
5515 *	LOCKING:
5516 *	None if @link is ap->link.  Kernel thread context otherwise.
5517 *
5518 *	RETURNS:
5519 *	0 on success, negative errno on failure.
5520 */
5521int sata_scr_write(struct ata_link *link, int reg, u32 val)
5522{
5523	if (ata_is_host_link(link)) {
5524		if (sata_scr_valid(link))
5525			return link->ap->ops->scr_write(link, reg, val);
5526		return -EOPNOTSUPP;
5527	}
5528
5529	return sata_pmp_scr_write(link, reg, val);
5530}
5531
5532/**
5533 *	sata_scr_write_flush - write SCR register of the specified port and flush
5534 *	@link: ATA link to write SCR for
5535 *	@reg: SCR to write
5536 *	@val: value to write
5537 *
5538 *	This function is identical to sata_scr_write() except that this
5539 *	function performs flush after writing to the register.
5540 *
5541 *	LOCKING:
5542 *	None if @link is ap->link.  Kernel thread context otherwise.
5543 *
5544 *	RETURNS:
5545 *	0 on success, negative errno on failure.
5546 */
5547int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5548{
5549	if (ata_is_host_link(link)) {
5550		int rc;
5551
5552		if (sata_scr_valid(link)) {
5553			rc = link->ap->ops->scr_write(link, reg, val);
5554			if (rc == 0)
5555				rc = link->ap->ops->scr_read(link, reg, &val);
5556			return rc;
5557		}
5558		return -EOPNOTSUPP;
5559	}
5560
5561	return sata_pmp_scr_write(link, reg, val);
5562}
5563
5564/**
5565 *	ata_phys_link_online - test whether the given link is online
5566 *	@link: ATA link to test
5567 *
5568 *	Test whether @link is online.  Note that this function returns
5569 *	0 if online status of @link cannot be obtained, so
5570 *	ata_link_online(link) != !ata_link_offline(link).
5571 *
5572 *	LOCKING:
5573 *	None.
5574 *
5575 *	RETURNS:
5576 *	True if the port online status is available and online.
5577 */
5578bool ata_phys_link_online(struct ata_link *link)
5579{
5580	u32 sstatus;
5581
5582	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5583	    ata_sstatus_online(sstatus))
5584		return true;
5585	return false;
5586}
5587
5588/**
5589 *	ata_phys_link_offline - test whether the given link is offline
5590 *	@link: ATA link to test
5591 *
5592 *	Test whether @link is offline.  Note that this function
5593 *	returns 0 if offline status of @link cannot be obtained, so
5594 *	ata_link_online(link) != !ata_link_offline(link).
5595 *
5596 *	LOCKING:
5597 *	None.
5598 *
5599 *	RETURNS:
5600 *	True if the port offline status is available and offline.
5601 */
5602bool ata_phys_link_offline(struct ata_link *link)
5603{
5604	u32 sstatus;
5605
5606	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5607	    !ata_sstatus_online(sstatus))
5608		return true;
5609	return false;
5610}
5611
5612/**
5613 *	ata_link_online - test whether the given link is online
5614 *	@link: ATA link to test
5615 *
5616 *	Test whether @link is online.  This is identical to
5617 *	ata_phys_link_online() when there's no slave link.  When
5618 *	there's a slave link, this function should only be called on
5619 *	the master link and will return true if any of M/S links is
5620 *	online.
5621 *
5622 *	LOCKING:
5623 *	None.
5624 *
5625 *	RETURNS:
5626 *	True if the port online status is available and online.
5627 */
5628bool ata_link_online(struct ata_link *link)
5629{
5630	struct ata_link *slave = link->ap->slave_link;
5631
5632	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5633
5634	return ata_phys_link_online(link) ||
5635		(slave && ata_phys_link_online(slave));
5636}
 
5637
5638/**
5639 *	ata_link_offline - test whether the given link is offline
5640 *	@link: ATA link to test
5641 *
5642 *	Test whether @link is offline.  This is identical to
5643 *	ata_phys_link_offline() when there's no slave link.  When
5644 *	there's a slave link, this function should only be called on
5645 *	the master link and will return true if both M/S links are
5646 *	offline.
5647 *
5648 *	LOCKING:
5649 *	None.
5650 *
5651 *	RETURNS:
5652 *	True if the port offline status is available and offline.
5653 */
5654bool ata_link_offline(struct ata_link *link)
5655{
5656	struct ata_link *slave = link->ap->slave_link;
5657
5658	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5659
5660	return ata_phys_link_offline(link) &&
5661		(!slave || ata_phys_link_offline(slave));
5662}
 
5663
5664#ifdef CONFIG_PM
5665static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5666				unsigned int action, unsigned int ehi_flags,
5667				bool async)
5668{
5669	struct ata_link *link;
5670	unsigned long flags;
5671
5672	/* Previous resume operation might still be in
5673	 * progress.  Wait for PM_PENDING to clear.
5674	 */
5675	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5676		ata_port_wait_eh(ap);
5677		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5678	}
5679
5680	/* request PM ops to EH */
5681	spin_lock_irqsave(ap->lock, flags);
5682
5683	ap->pm_mesg = mesg;
5684	ap->pflags |= ATA_PFLAG_PM_PENDING;
5685	ata_for_each_link(link, ap, HOST_FIRST) {
5686		link->eh_info.action |= action;
5687		link->eh_info.flags |= ehi_flags;
5688	}
5689
5690	ata_port_schedule_eh(ap);
5691
5692	spin_unlock_irqrestore(ap->lock, flags);
5693
5694	if (!async) {
5695		ata_port_wait_eh(ap);
5696		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5697	}
5698}
5699
5700/*
5701 * On some hardware, device fails to respond after spun down for suspend.  As
5702 * the device won't be used before being resumed, we don't need to touch the
5703 * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
5704 *
5705 * http://thread.gmane.org/gmane.linux.ide/46764
5706 */
5707static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5708						 | ATA_EHI_NO_AUTOPSY
5709						 | ATA_EHI_NO_RECOVERY;
5710
5711static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5712{
5713	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5714}
5715
5716static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5717{
5718	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5719}
5720
5721static int ata_port_pm_suspend(struct device *dev)
5722{
5723	struct ata_port *ap = to_ata_port(dev);
5724
5725	if (pm_runtime_suspended(dev))
5726		return 0;
5727
5728	ata_port_suspend(ap, PMSG_SUSPEND);
5729	return 0;
5730}
5731
5732static int ata_port_pm_freeze(struct device *dev)
5733{
5734	struct ata_port *ap = to_ata_port(dev);
5735
5736	if (pm_runtime_suspended(dev))
5737		return 0;
5738
5739	ata_port_suspend(ap, PMSG_FREEZE);
5740	return 0;
5741}
5742
5743static int ata_port_pm_poweroff(struct device *dev)
5744{
5745	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5746	return 0;
5747}
5748
5749static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5750						| ATA_EHI_QUIET;
5751
5752static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5753{
5754	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5755}
5756
5757static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5758{
5759	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5760}
5761
5762static int ata_port_pm_resume(struct device *dev)
5763{
5764	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5765	pm_runtime_disable(dev);
5766	pm_runtime_set_active(dev);
5767	pm_runtime_enable(dev);
5768	return 0;
5769}
5770
5771/*
5772 * For ODDs, the upper layer will poll for media change every few seconds,
5773 * which will make it enter and leave suspend state every few seconds. And
5774 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5775 * is very little and the ODD may malfunction after constantly being reset.
5776 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5777 * ODD is attached to the port.
5778 */
5779static int ata_port_runtime_idle(struct device *dev)
5780{
5781	struct ata_port *ap = to_ata_port(dev);
5782	struct ata_link *link;
5783	struct ata_device *adev;
5784
5785	ata_for_each_link(link, ap, HOST_FIRST) {
5786		ata_for_each_dev(adev, link, ENABLED)
5787			if (adev->class == ATA_DEV_ATAPI &&
5788			    !zpodd_dev_enabled(adev))
5789				return -EBUSY;
5790	}
5791
5792	return 0;
5793}
5794
5795static int ata_port_runtime_suspend(struct device *dev)
5796{
5797	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5798	return 0;
5799}
5800
5801static int ata_port_runtime_resume(struct device *dev)
5802{
5803	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5804	return 0;
5805}
5806
5807static const struct dev_pm_ops ata_port_pm_ops = {
5808	.suspend = ata_port_pm_suspend,
5809	.resume = ata_port_pm_resume,
5810	.freeze = ata_port_pm_freeze,
5811	.thaw = ata_port_pm_resume,
5812	.poweroff = ata_port_pm_poweroff,
5813	.restore = ata_port_pm_resume,
5814
5815	.runtime_suspend = ata_port_runtime_suspend,
5816	.runtime_resume = ata_port_runtime_resume,
5817	.runtime_idle = ata_port_runtime_idle,
5818};
5819
5820/* sas ports don't participate in pm runtime management of ata_ports,
5821 * and need to resume ata devices at the domain level, not the per-port
5822 * level. sas suspend/resume is async to allow parallel port recovery
5823 * since sas has multiple ata_port instances per Scsi_Host.
5824 */
5825void ata_sas_port_suspend(struct ata_port *ap)
5826{
5827	ata_port_suspend_async(ap, PMSG_SUSPEND);
5828}
5829EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5830
5831void ata_sas_port_resume(struct ata_port *ap)
5832{
5833	ata_port_resume_async(ap, PMSG_RESUME);
5834}
5835EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5836
5837/**
5838 *	ata_host_suspend - suspend host
5839 *	@host: host to suspend
5840 *	@mesg: PM message
5841 *
5842 *	Suspend @host.  Actual operation is performed by port suspend.
5843 */
5844int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5845{
5846	host->dev->power.power_state = mesg;
5847	return 0;
5848}
 
5849
5850/**
5851 *	ata_host_resume - resume host
5852 *	@host: host to resume
5853 *
5854 *	Resume @host.  Actual operation is performed by port resume.
5855 */
5856void ata_host_resume(struct ata_host *host)
5857{
5858	host->dev->power.power_state = PMSG_ON;
5859}
 
5860#endif
5861
5862const struct device_type ata_port_type = {
5863	.name = "ata_port",
5864#ifdef CONFIG_PM
5865	.pm = &ata_port_pm_ops,
5866#endif
5867};
5868
5869/**
5870 *	ata_dev_init - Initialize an ata_device structure
5871 *	@dev: Device structure to initialize
5872 *
5873 *	Initialize @dev in preparation for probing.
5874 *
5875 *	LOCKING:
5876 *	Inherited from caller.
5877 */
5878void ata_dev_init(struct ata_device *dev)
5879{
5880	struct ata_link *link = ata_dev_phys_link(dev);
5881	struct ata_port *ap = link->ap;
5882	unsigned long flags;
5883
5884	/* SATA spd limit is bound to the attached device, reset together */
5885	link->sata_spd_limit = link->hw_sata_spd_limit;
5886	link->sata_spd = 0;
5887
5888	/* High bits of dev->flags are used to record warm plug
5889	 * requests which occur asynchronously.  Synchronize using
5890	 * host lock.
5891	 */
5892	spin_lock_irqsave(ap->lock, flags);
5893	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5894	dev->horkage = 0;
5895	spin_unlock_irqrestore(ap->lock, flags);
5896
5897	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5898	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5899	dev->pio_mask = UINT_MAX;
5900	dev->mwdma_mask = UINT_MAX;
5901	dev->udma_mask = UINT_MAX;
5902}
5903
5904/**
5905 *	ata_link_init - Initialize an ata_link structure
5906 *	@ap: ATA port link is attached to
5907 *	@link: Link structure to initialize
5908 *	@pmp: Port multiplier port number
5909 *
5910 *	Initialize @link.
5911 *
5912 *	LOCKING:
5913 *	Kernel thread context (may sleep)
5914 */
5915void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5916{
5917	int i;
5918
5919	/* clear everything except for devices */
5920	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5921	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5922
5923	link->ap = ap;
5924	link->pmp = pmp;
5925	link->active_tag = ATA_TAG_POISON;
5926	link->hw_sata_spd_limit = UINT_MAX;
5927
5928	/* can't use iterator, ap isn't initialized yet */
5929	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5930		struct ata_device *dev = &link->device[i];
5931
5932		dev->link = link;
5933		dev->devno = dev - link->device;
5934#ifdef CONFIG_ATA_ACPI
5935		dev->gtf_filter = ata_acpi_gtf_filter;
5936#endif
5937		ata_dev_init(dev);
5938	}
5939}
5940
5941/**
5942 *	sata_link_init_spd - Initialize link->sata_spd_limit
5943 *	@link: Link to configure sata_spd_limit for
5944 *
5945 *	Initialize @link->[hw_]sata_spd_limit to the currently
5946 *	configured value.
5947 *
5948 *	LOCKING:
5949 *	Kernel thread context (may sleep).
5950 *
5951 *	RETURNS:
5952 *	0 on success, -errno on failure.
5953 */
5954int sata_link_init_spd(struct ata_link *link)
5955{
5956	u8 spd;
5957	int rc;
5958
5959	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5960	if (rc)
5961		return rc;
5962
5963	spd = (link->saved_scontrol >> 4) & 0xf;
5964	if (spd)
5965		link->hw_sata_spd_limit &= (1 << spd) - 1;
5966
5967	ata_force_link_limits(link);
5968
5969	link->sata_spd_limit = link->hw_sata_spd_limit;
5970
5971	return 0;
5972}
5973
5974/**
5975 *	ata_port_alloc - allocate and initialize basic ATA port resources
5976 *	@host: ATA host this allocated port belongs to
5977 *
5978 *	Allocate and initialize basic ATA port resources.
5979 *
5980 *	RETURNS:
5981 *	Allocate ATA port on success, NULL on failure.
5982 *
5983 *	LOCKING:
5984 *	Inherited from calling layer (may sleep).
5985 */
5986struct ata_port *ata_port_alloc(struct ata_host *host)
5987{
5988	struct ata_port *ap;
5989
5990	DPRINTK("ENTER\n");
5991
5992	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5993	if (!ap)
5994		return NULL;
5995
5996	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5997	ap->lock = &host->lock;
5998	ap->print_id = -1;
5999	ap->local_port_no = -1;
6000	ap->host = host;
6001	ap->dev = host->dev;
6002
6003#if defined(ATA_VERBOSE_DEBUG)
6004	/* turn on all debugging levels */
6005	ap->msg_enable = 0x00FF;
6006#elif defined(ATA_DEBUG)
6007	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6008#else
6009	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6010#endif
6011
6012	mutex_init(&ap->scsi_scan_mutex);
6013	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6014	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6015	INIT_LIST_HEAD(&ap->eh_done_q);
6016	init_waitqueue_head(&ap->eh_wait_q);
6017	init_completion(&ap->park_req_pending);
6018	timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
6019		    TIMER_DEFERRABLE);
6020
6021	ap->cbl = ATA_CBL_NONE;
6022
6023	ata_link_init(ap, &ap->link, 0);
6024
6025#ifdef ATA_IRQ_TRAP
6026	ap->stats.unhandled_irq = 1;
6027	ap->stats.idle_irq = 1;
6028#endif
6029	ata_sff_port_init(ap);
6030
6031	return ap;
6032}
6033
6034static void ata_devres_release(struct device *gendev, void *res)
6035{
6036	struct ata_host *host = dev_get_drvdata(gendev);
6037	int i;
6038
6039	for (i = 0; i < host->n_ports; i++) {
6040		struct ata_port *ap = host->ports[i];
6041
6042		if (!ap)
6043			continue;
6044
6045		if (ap->scsi_host)
6046			scsi_host_put(ap->scsi_host);
6047
6048	}
6049
6050	dev_set_drvdata(gendev, NULL);
6051	ata_host_put(host);
6052}
6053
6054static void ata_host_release(struct kref *kref)
6055{
6056	struct ata_host *host = container_of(kref, struct ata_host, kref);
6057	int i;
6058
6059	for (i = 0; i < host->n_ports; i++) {
6060		struct ata_port *ap = host->ports[i];
6061
6062		kfree(ap->pmp_link);
6063		kfree(ap->slave_link);
6064		kfree(ap);
6065		host->ports[i] = NULL;
6066	}
6067	kfree(host);
6068}
6069
6070void ata_host_get(struct ata_host *host)
6071{
6072	kref_get(&host->kref);
6073}
6074
6075void ata_host_put(struct ata_host *host)
6076{
6077	kref_put(&host->kref, ata_host_release);
6078}
 
6079
6080/**
6081 *	ata_host_alloc - allocate and init basic ATA host resources
6082 *	@dev: generic device this host is associated with
6083 *	@max_ports: maximum number of ATA ports associated with this host
6084 *
6085 *	Allocate and initialize basic ATA host resources.  LLD calls
6086 *	this function to allocate a host, initializes it fully and
6087 *	attaches it using ata_host_register().
6088 *
6089 *	@max_ports ports are allocated and host->n_ports is
6090 *	initialized to @max_ports.  The caller is allowed to decrease
6091 *	host->n_ports before calling ata_host_register().  The unused
6092 *	ports will be automatically freed on registration.
6093 *
6094 *	RETURNS:
6095 *	Allocate ATA host on success, NULL on failure.
6096 *
6097 *	LOCKING:
6098 *	Inherited from calling layer (may sleep).
6099 */
6100struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6101{
6102	struct ata_host *host;
6103	size_t sz;
6104	int i;
6105	void *dr;
6106
6107	DPRINTK("ENTER\n");
6108
6109	/* alloc a container for our list of ATA ports (buses) */
6110	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6111	host = kzalloc(sz, GFP_KERNEL);
6112	if (!host)
6113		return NULL;
6114
6115	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6116		goto err_free;
6117
6118	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
6119	if (!dr)
6120		goto err_out;
6121
6122	devres_add(dev, dr);
6123	dev_set_drvdata(dev, host);
6124
6125	spin_lock_init(&host->lock);
6126	mutex_init(&host->eh_mutex);
6127	host->dev = dev;
6128	host->n_ports = max_ports;
6129	kref_init(&host->kref);
6130
6131	/* allocate ports bound to this host */
6132	for (i = 0; i < max_ports; i++) {
6133		struct ata_port *ap;
6134
6135		ap = ata_port_alloc(host);
6136		if (!ap)
6137			goto err_out;
6138
6139		ap->port_no = i;
6140		host->ports[i] = ap;
6141	}
6142
6143	devres_remove_group(dev, NULL);
6144	return host;
6145
6146 err_out:
6147	devres_release_group(dev, NULL);
6148 err_free:
6149	kfree(host);
6150	return NULL;
6151}
 
6152
6153/**
6154 *	ata_host_alloc_pinfo - alloc host and init with port_info array
6155 *	@dev: generic device this host is associated with
6156 *	@ppi: array of ATA port_info to initialize host with
6157 *	@n_ports: number of ATA ports attached to this host
6158 *
6159 *	Allocate ATA host and initialize with info from @ppi.  If NULL
6160 *	terminated, @ppi may contain fewer entries than @n_ports.  The
6161 *	last entry will be used for the remaining ports.
6162 *
6163 *	RETURNS:
6164 *	Allocate ATA host on success, NULL on failure.
6165 *
6166 *	LOCKING:
6167 *	Inherited from calling layer (may sleep).
6168 */
6169struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6170				      const struct ata_port_info * const * ppi,
6171				      int n_ports)
6172{
6173	const struct ata_port_info *pi;
6174	struct ata_host *host;
6175	int i, j;
6176
6177	host = ata_host_alloc(dev, n_ports);
6178	if (!host)
6179		return NULL;
6180
6181	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6182		struct ata_port *ap = host->ports[i];
6183
6184		if (ppi[j])
6185			pi = ppi[j++];
6186
6187		ap->pio_mask = pi->pio_mask;
6188		ap->mwdma_mask = pi->mwdma_mask;
6189		ap->udma_mask = pi->udma_mask;
6190		ap->flags |= pi->flags;
6191		ap->link.flags |= pi->link_flags;
6192		ap->ops = pi->port_ops;
6193
6194		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6195			host->ops = pi->port_ops;
6196	}
6197
6198	return host;
6199}
6200
6201/**
6202 *	ata_slave_link_init - initialize slave link
6203 *	@ap: port to initialize slave link for
6204 *
6205 *	Create and initialize slave link for @ap.  This enables slave
6206 *	link handling on the port.
6207 *
6208 *	In libata, a port contains links and a link contains devices.
6209 *	There is single host link but if a PMP is attached to it,
6210 *	there can be multiple fan-out links.  On SATA, there's usually
6211 *	a single device connected to a link but PATA and SATA
6212 *	controllers emulating TF based interface can have two - master
6213 *	and slave.
6214 *
6215 *	However, there are a few controllers which don't fit into this
6216 *	abstraction too well - SATA controllers which emulate TF
6217 *	interface with both master and slave devices but also have
6218 *	separate SCR register sets for each device.  These controllers
6219 *	need separate links for physical link handling
6220 *	(e.g. onlineness, link speed) but should be treated like a
6221 *	traditional M/S controller for everything else (e.g. command
6222 *	issue, softreset).
6223 *
6224 *	slave_link is libata's way of handling this class of
6225 *	controllers without impacting core layer too much.  For
6226 *	anything other than physical link handling, the default host
6227 *	link is used for both master and slave.  For physical link
6228 *	handling, separate @ap->slave_link is used.  All dirty details
6229 *	are implemented inside libata core layer.  From LLD's POV, the
6230 *	only difference is that prereset, hardreset and postreset are
6231 *	called once more for the slave link, so the reset sequence
6232 *	looks like the following.
6233 *
6234 *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
6235 *	softreset(M) -> postreset(M) -> postreset(S)
6236 *
6237 *	Note that softreset is called only for the master.  Softreset
6238 *	resets both M/S by definition, so SRST on master should handle
6239 *	both (the standard method will work just fine).
6240 *
6241 *	LOCKING:
6242 *	Should be called before host is registered.
6243 *
6244 *	RETURNS:
6245 *	0 on success, -errno on failure.
6246 */
6247int ata_slave_link_init(struct ata_port *ap)
6248{
6249	struct ata_link *link;
6250
6251	WARN_ON(ap->slave_link);
6252	WARN_ON(ap->flags & ATA_FLAG_PMP);
6253
6254	link = kzalloc(sizeof(*link), GFP_KERNEL);
6255	if (!link)
6256		return -ENOMEM;
6257
6258	ata_link_init(ap, link, 1);
6259	ap->slave_link = link;
6260	return 0;
6261}
6262
6263static void ata_host_stop(struct device *gendev, void *res)
6264{
6265	struct ata_host *host = dev_get_drvdata(gendev);
6266	int i;
6267
6268	WARN_ON(!(host->flags & ATA_HOST_STARTED));
6269
6270	for (i = 0; i < host->n_ports; i++) {
6271		struct ata_port *ap = host->ports[i];
6272
6273		if (ap->ops->port_stop)
6274			ap->ops->port_stop(ap);
6275	}
6276
6277	if (host->ops->host_stop)
6278		host->ops->host_stop(host);
6279}
6280
6281/**
6282 *	ata_finalize_port_ops - finalize ata_port_operations
6283 *	@ops: ata_port_operations to finalize
6284 *
6285 *	An ata_port_operations can inherit from another ops and that
6286 *	ops can again inherit from another.  This can go on as many
6287 *	times as necessary as long as there is no loop in the
6288 *	inheritance chain.
6289 *
6290 *	Ops tables are finalized when the host is started.  NULL or
6291 *	unspecified entries are inherited from the closet ancestor
6292 *	which has the method and the entry is populated with it.
6293 *	After finalization, the ops table directly points to all the
6294 *	methods and ->inherits is no longer necessary and cleared.
6295 *
6296 *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
6297 *
6298 *	LOCKING:
6299 *	None.
6300 */
6301static void ata_finalize_port_ops(struct ata_port_operations *ops)
6302{
6303	static DEFINE_SPINLOCK(lock);
6304	const struct ata_port_operations *cur;
6305	void **begin = (void **)ops;
6306	void **end = (void **)&ops->inherits;
6307	void **pp;
6308
6309	if (!ops || !ops->inherits)
6310		return;
6311
6312	spin_lock(&lock);
6313
6314	for (cur = ops->inherits; cur; cur = cur->inherits) {
6315		void **inherit = (void **)cur;
6316
6317		for (pp = begin; pp < end; pp++, inherit++)
6318			if (!*pp)
6319				*pp = *inherit;
6320	}
6321
6322	for (pp = begin; pp < end; pp++)
6323		if (IS_ERR(*pp))
6324			*pp = NULL;
6325
6326	ops->inherits = NULL;
6327
6328	spin_unlock(&lock);
6329}
6330
6331/**
6332 *	ata_host_start - start and freeze ports of an ATA host
6333 *	@host: ATA host to start ports for
6334 *
6335 *	Start and then freeze ports of @host.  Started status is
6336 *	recorded in host->flags, so this function can be called
6337 *	multiple times.  Ports are guaranteed to get started only
6338 *	once.  If host->ops isn't initialized yet, its set to the
6339 *	first non-dummy port ops.
6340 *
6341 *	LOCKING:
6342 *	Inherited from calling layer (may sleep).
6343 *
6344 *	RETURNS:
6345 *	0 if all ports are started successfully, -errno otherwise.
6346 */
6347int ata_host_start(struct ata_host *host)
6348{
6349	int have_stop = 0;
6350	void *start_dr = NULL;
6351	int i, rc;
6352
6353	if (host->flags & ATA_HOST_STARTED)
6354		return 0;
6355
6356	ata_finalize_port_ops(host->ops);
6357
6358	for (i = 0; i < host->n_ports; i++) {
6359		struct ata_port *ap = host->ports[i];
6360
6361		ata_finalize_port_ops(ap->ops);
6362
6363		if (!host->ops && !ata_port_is_dummy(ap))
6364			host->ops = ap->ops;
6365
6366		if (ap->ops->port_stop)
6367			have_stop = 1;
6368	}
6369
6370	if (host->ops->host_stop)
6371		have_stop = 1;
6372
6373	if (have_stop) {
6374		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6375		if (!start_dr)
6376			return -ENOMEM;
6377	}
6378
6379	for (i = 0; i < host->n_ports; i++) {
6380		struct ata_port *ap = host->ports[i];
6381
6382		if (ap->ops->port_start) {
6383			rc = ap->ops->port_start(ap);
6384			if (rc) {
6385				if (rc != -ENODEV)
6386					dev_err(host->dev,
6387						"failed to start port %d (errno=%d)\n",
6388						i, rc);
6389				goto err_out;
6390			}
6391		}
6392		ata_eh_freeze_port(ap);
6393	}
6394
6395	if (start_dr)
6396		devres_add(host->dev, start_dr);
6397	host->flags |= ATA_HOST_STARTED;
6398	return 0;
6399
6400 err_out:
6401	while (--i >= 0) {
6402		struct ata_port *ap = host->ports[i];
6403
6404		if (ap->ops->port_stop)
6405			ap->ops->port_stop(ap);
6406	}
6407	devres_free(start_dr);
6408	return rc;
6409}
 
6410
6411/**
6412 *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6413 *	@host:	host to initialize
6414 *	@dev:	device host is attached to
6415 *	@ops:	port_ops
6416 *
6417 */
6418void ata_host_init(struct ata_host *host, struct device *dev,
6419		   struct ata_port_operations *ops)
6420{
6421	spin_lock_init(&host->lock);
6422	mutex_init(&host->eh_mutex);
6423	host->n_tags = ATA_MAX_QUEUE;
6424	host->dev = dev;
6425	host->ops = ops;
6426	kref_init(&host->kref);
6427}
 
6428
6429void __ata_port_probe(struct ata_port *ap)
6430{
6431	struct ata_eh_info *ehi = &ap->link.eh_info;
6432	unsigned long flags;
6433
6434	/* kick EH for boot probing */
6435	spin_lock_irqsave(ap->lock, flags);
6436
6437	ehi->probe_mask |= ATA_ALL_DEVICES;
6438	ehi->action |= ATA_EH_RESET;
6439	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6440
6441	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6442	ap->pflags |= ATA_PFLAG_LOADING;
6443	ata_port_schedule_eh(ap);
6444
6445	spin_unlock_irqrestore(ap->lock, flags);
6446}
6447
6448int ata_port_probe(struct ata_port *ap)
6449{
6450	int rc = 0;
6451
6452	if (ap->ops->error_handler) {
6453		__ata_port_probe(ap);
6454		ata_port_wait_eh(ap);
6455	} else {
6456		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6457		rc = ata_bus_probe(ap);
6458		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6459	}
6460	return rc;
6461}
6462
6463
6464static void async_port_probe(void *data, async_cookie_t cookie)
6465{
6466	struct ata_port *ap = data;
6467
6468	/*
6469	 * If we're not allowed to scan this host in parallel,
6470	 * we need to wait until all previous scans have completed
6471	 * before going further.
6472	 * Jeff Garzik says this is only within a controller, so we
6473	 * don't need to wait for port 0, only for later ports.
6474	 */
6475	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6476		async_synchronize_cookie(cookie);
6477
6478	(void)ata_port_probe(ap);
6479
6480	/* in order to keep device order, we need to synchronize at this point */
6481	async_synchronize_cookie(cookie);
6482
6483	ata_scsi_scan_host(ap, 1);
6484}
6485
6486/**
6487 *	ata_host_register - register initialized ATA host
6488 *	@host: ATA host to register
6489 *	@sht: template for SCSI host
6490 *
6491 *	Register initialized ATA host.  @host is allocated using
6492 *	ata_host_alloc() and fully initialized by LLD.  This function
6493 *	starts ports, registers @host with ATA and SCSI layers and
6494 *	probe registered devices.
6495 *
6496 *	LOCKING:
6497 *	Inherited from calling layer (may sleep).
6498 *
6499 *	RETURNS:
6500 *	0 on success, -errno otherwise.
6501 */
6502int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6503{
6504	int i, rc;
6505
6506	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
6507
6508	/* host must have been started */
6509	if (!(host->flags & ATA_HOST_STARTED)) {
6510		dev_err(host->dev, "BUG: trying to register unstarted host\n");
6511		WARN_ON(1);
6512		return -EINVAL;
6513	}
6514
6515	/* Blow away unused ports.  This happens when LLD can't
6516	 * determine the exact number of ports to allocate at
6517	 * allocation time.
6518	 */
6519	for (i = host->n_ports; host->ports[i]; i++)
6520		kfree(host->ports[i]);
6521
6522	/* give ports names and add SCSI hosts */
6523	for (i = 0; i < host->n_ports; i++) {
6524		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6525		host->ports[i]->local_port_no = i + 1;
6526	}
6527
6528	/* Create associated sysfs transport objects  */
6529	for (i = 0; i < host->n_ports; i++) {
6530		rc = ata_tport_add(host->dev,host->ports[i]);
6531		if (rc) {
6532			goto err_tadd;
6533		}
6534	}
6535
6536	rc = ata_scsi_add_hosts(host, sht);
6537	if (rc)
6538		goto err_tadd;
6539
6540	/* set cable, sata_spd_limit and report */
6541	for (i = 0; i < host->n_ports; i++) {
6542		struct ata_port *ap = host->ports[i];
6543		unsigned long xfer_mask;
6544
6545		/* set SATA cable type if still unset */
6546		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6547			ap->cbl = ATA_CBL_SATA;
6548
6549		/* init sata_spd_limit to the current value */
6550		sata_link_init_spd(&ap->link);
6551		if (ap->slave_link)
6552			sata_link_init_spd(ap->slave_link);
6553
6554		/* print per-port info to dmesg */
6555		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6556					      ap->udma_mask);
6557
6558		if (!ata_port_is_dummy(ap)) {
6559			ata_port_info(ap, "%cATA max %s %s\n",
6560				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6561				      ata_mode_string(xfer_mask),
6562				      ap->link.eh_info.desc);
6563			ata_ehi_clear_desc(&ap->link.eh_info);
6564		} else
6565			ata_port_info(ap, "DUMMY\n");
6566	}
6567
6568	/* perform each probe asynchronously */
6569	for (i = 0; i < host->n_ports; i++) {
6570		struct ata_port *ap = host->ports[i];
6571		async_schedule(async_port_probe, ap);
6572	}
6573
6574	return 0;
6575
6576 err_tadd:
6577	while (--i >= 0) {
6578		ata_tport_delete(host->ports[i]);
6579	}
6580	return rc;
6581
6582}
 
6583
6584/**
6585 *	ata_host_activate - start host, request IRQ and register it
6586 *	@host: target ATA host
6587 *	@irq: IRQ to request
6588 *	@irq_handler: irq_handler used when requesting IRQ
6589 *	@irq_flags: irq_flags used when requesting IRQ
6590 *	@sht: scsi_host_template to use when registering the host
6591 *
6592 *	After allocating an ATA host and initializing it, most libata
6593 *	LLDs perform three steps to activate the host - start host,
6594 *	request IRQ and register it.  This helper takes necessary
6595 *	arguments and performs the three steps in one go.
6596 *
6597 *	An invalid IRQ skips the IRQ registration and expects the host to
6598 *	have set polling mode on the port. In this case, @irq_handler
6599 *	should be NULL.
6600 *
6601 *	LOCKING:
6602 *	Inherited from calling layer (may sleep).
6603 *
6604 *	RETURNS:
6605 *	0 on success, -errno otherwise.
6606 */
6607int ata_host_activate(struct ata_host *host, int irq,
6608		      irq_handler_t irq_handler, unsigned long irq_flags,
6609		      struct scsi_host_template *sht)
6610{
6611	int i, rc;
6612	char *irq_desc;
6613
6614	rc = ata_host_start(host);
6615	if (rc)
6616		return rc;
6617
6618	/* Special case for polling mode */
6619	if (!irq) {
6620		WARN_ON(irq_handler);
6621		return ata_host_register(host, sht);
6622	}
6623
6624	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6625				  dev_driver_string(host->dev),
6626				  dev_name(host->dev));
6627	if (!irq_desc)
6628		return -ENOMEM;
6629
6630	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6631			      irq_desc, host);
6632	if (rc)
6633		return rc;
6634
6635	for (i = 0; i < host->n_ports; i++)
6636		ata_port_desc(host->ports[i], "irq %d", irq);
6637
6638	rc = ata_host_register(host, sht);
6639	/* if failed, just free the IRQ and leave ports alone */
6640	if (rc)
6641		devm_free_irq(host->dev, irq, host);
6642
6643	return rc;
6644}
 
6645
6646/**
6647 *	ata_port_detach - Detach ATA port in preparation of device removal
6648 *	@ap: ATA port to be detached
6649 *
6650 *	Detach all ATA devices and the associated SCSI devices of @ap;
6651 *	then, remove the associated SCSI host.  @ap is guaranteed to
6652 *	be quiescent on return from this function.
6653 *
6654 *	LOCKING:
6655 *	Kernel thread context (may sleep).
6656 */
6657static void ata_port_detach(struct ata_port *ap)
6658{
6659	unsigned long flags;
6660	struct ata_link *link;
6661	struct ata_device *dev;
6662
6663	if (!ap->ops->error_handler)
6664		goto skip_eh;
6665
6666	/* tell EH we're leaving & flush EH */
6667	spin_lock_irqsave(ap->lock, flags);
6668	ap->pflags |= ATA_PFLAG_UNLOADING;
6669	ata_port_schedule_eh(ap);
6670	spin_unlock_irqrestore(ap->lock, flags);
6671
6672	/* wait till EH commits suicide */
6673	ata_port_wait_eh(ap);
6674
6675	/* it better be dead now */
6676	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6677
6678	cancel_delayed_work_sync(&ap->hotplug_task);
6679
6680 skip_eh:
6681	/* clean up zpodd on port removal */
6682	ata_for_each_link(link, ap, HOST_FIRST) {
6683		ata_for_each_dev(dev, link, ALL) {
6684			if (zpodd_dev_enabled(dev))
6685				zpodd_exit(dev);
6686		}
6687	}
6688	if (ap->pmp_link) {
6689		int i;
6690		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6691			ata_tlink_delete(&ap->pmp_link[i]);
6692	}
6693	/* remove the associated SCSI host */
6694	scsi_remove_host(ap->scsi_host);
6695	ata_tport_delete(ap);
6696}
6697
6698/**
6699 *	ata_host_detach - Detach all ports of an ATA host
6700 *	@host: Host to detach
6701 *
6702 *	Detach all ports of @host.
6703 *
6704 *	LOCKING:
6705 *	Kernel thread context (may sleep).
6706 */
6707void ata_host_detach(struct ata_host *host)
6708{
6709	int i;
6710
6711	for (i = 0; i < host->n_ports; i++)
 
 
6712		ata_port_detach(host->ports[i]);
 
6713
6714	/* the host is dead now, dissociate ACPI */
6715	ata_acpi_dissociate(host);
6716}
 
6717
6718#ifdef CONFIG_PCI
6719
6720/**
6721 *	ata_pci_remove_one - PCI layer callback for device removal
6722 *	@pdev: PCI device that was removed
6723 *
6724 *	PCI layer indicates to libata via this hook that hot-unplug or
6725 *	module unload event has occurred.  Detach all ports.  Resource
6726 *	release is handled via devres.
6727 *
6728 *	LOCKING:
6729 *	Inherited from PCI layer (may sleep).
6730 */
6731void ata_pci_remove_one(struct pci_dev *pdev)
6732{
6733	struct ata_host *host = pci_get_drvdata(pdev);
6734
6735	ata_host_detach(host);
6736}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6737
6738/* move to PCI subsystem */
6739int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6740{
6741	unsigned long tmp = 0;
6742
6743	switch (bits->width) {
6744	case 1: {
6745		u8 tmp8 = 0;
6746		pci_read_config_byte(pdev, bits->reg, &tmp8);
6747		tmp = tmp8;
6748		break;
6749	}
6750	case 2: {
6751		u16 tmp16 = 0;
6752		pci_read_config_word(pdev, bits->reg, &tmp16);
6753		tmp = tmp16;
6754		break;
6755	}
6756	case 4: {
6757		u32 tmp32 = 0;
6758		pci_read_config_dword(pdev, bits->reg, &tmp32);
6759		tmp = tmp32;
6760		break;
6761	}
6762
6763	default:
6764		return -EINVAL;
6765	}
6766
6767	tmp &= bits->mask;
6768
6769	return (tmp == bits->val) ? 1 : 0;
6770}
 
6771
6772#ifdef CONFIG_PM
6773void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6774{
6775	pci_save_state(pdev);
6776	pci_disable_device(pdev);
6777
6778	if (mesg.event & PM_EVENT_SLEEP)
6779		pci_set_power_state(pdev, PCI_D3hot);
6780}
 
6781
6782int ata_pci_device_do_resume(struct pci_dev *pdev)
6783{
6784	int rc;
6785
6786	pci_set_power_state(pdev, PCI_D0);
6787	pci_restore_state(pdev);
6788
6789	rc = pcim_enable_device(pdev);
6790	if (rc) {
6791		dev_err(&pdev->dev,
6792			"failed to enable device after resume (%d)\n", rc);
6793		return rc;
6794	}
6795
6796	pci_set_master(pdev);
6797	return 0;
6798}
 
6799
6800int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6801{
6802	struct ata_host *host = pci_get_drvdata(pdev);
6803	int rc = 0;
6804
6805	rc = ata_host_suspend(host, mesg);
6806	if (rc)
6807		return rc;
6808
6809	ata_pci_device_do_suspend(pdev, mesg);
6810
6811	return 0;
6812}
 
6813
6814int ata_pci_device_resume(struct pci_dev *pdev)
6815{
6816	struct ata_host *host = pci_get_drvdata(pdev);
6817	int rc;
6818
6819	rc = ata_pci_device_do_resume(pdev);
6820	if (rc == 0)
6821		ata_host_resume(host);
6822	return rc;
6823}
 
6824#endif /* CONFIG_PM */
6825
6826#endif /* CONFIG_PCI */
6827
6828/**
6829 *	ata_platform_remove_one - Platform layer callback for device removal
6830 *	@pdev: Platform device that was removed
6831 *
6832 *	Platform layer indicates to libata via this hook that hot-unplug or
6833 *	module unload event has occurred.  Detach all ports.  Resource
6834 *	release is handled via devres.
6835 *
6836 *	LOCKING:
6837 *	Inherited from platform layer (may sleep).
6838 */
6839int ata_platform_remove_one(struct platform_device *pdev)
6840{
6841	struct ata_host *host = platform_get_drvdata(pdev);
6842
6843	ata_host_detach(host);
6844
6845	return 0;
6846}
 
6847
 
6848static int __init ata_parse_force_one(char **cur,
6849				      struct ata_force_ent *force_ent,
6850				      const char **reason)
6851{
6852	static const struct ata_force_param force_tbl[] __initconst = {
6853		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6854		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6855		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6856		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6857		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6858		{ "sata",	.cbl		= ATA_CBL_SATA },
6859		{ "1.5Gbps",	.spd_limit	= 1 },
6860		{ "3.0Gbps",	.spd_limit	= 2 },
6861		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6862		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6863		{ "noncqtrim",	.horkage_on	= ATA_HORKAGE_NO_NCQ_TRIM },
6864		{ "ncqtrim",	.horkage_off	= ATA_HORKAGE_NO_NCQ_TRIM },
 
 
6865		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6866		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6867		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6868		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6869		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6870		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6871		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6872		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6873		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6874		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6875		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6876		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6877		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6878		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6879		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6880		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6881		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6882		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6883		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6884		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6885		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6886		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6887		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6888		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6889		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6890		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6891		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6892		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6893		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6894		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6895		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6896		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6897		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6898		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6899		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6900		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6901		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6902		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6903		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
6904		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
6905		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },
6906	};
6907	char *start = *cur, *p = *cur;
6908	char *id, *val, *endp;
6909	const struct ata_force_param *match_fp = NULL;
6910	int nr_matches = 0, i;
6911
6912	/* find where this param ends and update *cur */
6913	while (*p != '\0' && *p != ',')
6914		p++;
6915
6916	if (*p == '\0')
6917		*cur = p;
6918	else
6919		*cur = p + 1;
6920
6921	*p = '\0';
6922
6923	/* parse */
6924	p = strchr(start, ':');
6925	if (!p) {
6926		val = strstrip(start);
6927		goto parse_val;
6928	}
6929	*p = '\0';
6930
6931	id = strstrip(start);
6932	val = strstrip(p + 1);
6933
6934	/* parse id */
6935	p = strchr(id, '.');
6936	if (p) {
6937		*p++ = '\0';
6938		force_ent->device = simple_strtoul(p, &endp, 10);
6939		if (p == endp || *endp != '\0') {
6940			*reason = "invalid device";
6941			return -EINVAL;
6942		}
6943	}
6944
6945	force_ent->port = simple_strtoul(id, &endp, 10);
6946	if (id == endp || *endp != '\0') {
6947		*reason = "invalid port/link";
6948		return -EINVAL;
6949	}
6950
6951 parse_val:
6952	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6953	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6954		const struct ata_force_param *fp = &force_tbl[i];
6955
6956		if (strncasecmp(val, fp->name, strlen(val)))
6957			continue;
6958
6959		nr_matches++;
6960		match_fp = fp;
6961
6962		if (strcasecmp(val, fp->name) == 0) {
6963			nr_matches = 1;
6964			break;
6965		}
6966	}
6967
6968	if (!nr_matches) {
6969		*reason = "unknown value";
6970		return -EINVAL;
6971	}
6972	if (nr_matches > 1) {
6973		*reason = "ambiguous value";
6974		return -EINVAL;
6975	}
6976
6977	force_ent->param = *match_fp;
6978
6979	return 0;
6980}
6981
6982static void __init ata_parse_force_param(void)
6983{
6984	int idx = 0, size = 1;
6985	int last_port = -1, last_device = -1;
6986	char *p, *cur, *next;
6987
6988	/* calculate maximum number of params and allocate force_tbl */
6989	for (p = ata_force_param_buf; *p; p++)
6990		if (*p == ',')
6991			size++;
6992
6993	ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
6994	if (!ata_force_tbl) {
6995		printk(KERN_WARNING "ata: failed to extend force table, "
6996		       "libata.force ignored\n");
6997		return;
6998	}
6999
7000	/* parse and populate the table */
7001	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7002		const char *reason = "";
7003		struct ata_force_ent te = { .port = -1, .device = -1 };
7004
7005		next = cur;
7006		if (ata_parse_force_one(&next, &te, &reason)) {
7007			printk(KERN_WARNING "ata: failed to parse force "
7008			       "parameter \"%s\" (%s)\n",
7009			       cur, reason);
7010			continue;
7011		}
7012
7013		if (te.port == -1) {
7014			te.port = last_port;
7015			te.device = last_device;
7016		}
7017
7018		ata_force_tbl[idx++] = te;
7019
7020		last_port = te.port;
7021		last_device = te.device;
7022	}
7023
7024	ata_force_tbl_size = idx;
7025}
7026
 
 
 
 
 
 
 
 
 
7027static int __init ata_init(void)
7028{
7029	int rc;
7030
7031	ata_parse_force_param();
7032
7033	rc = ata_sff_init();
7034	if (rc) {
7035		kfree(ata_force_tbl);
7036		return rc;
7037	}
7038
7039	libata_transport_init();
7040	ata_scsi_transport_template = ata_attach_transport();
7041	if (!ata_scsi_transport_template) {
7042		ata_sff_exit();
7043		rc = -ENOMEM;
7044		goto err_out;
7045	}
7046
7047	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7048	return 0;
7049
7050err_out:
7051	return rc;
7052}
7053
7054static void __exit ata_exit(void)
7055{
7056	ata_release_transport(ata_scsi_transport_template);
7057	libata_transport_exit();
7058	ata_sff_exit();
7059	kfree(ata_force_tbl);
7060}
7061
7062subsys_initcall(ata_init);
7063module_exit(ata_exit);
7064
7065static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
7066
7067int ata_ratelimit(void)
7068{
7069	return __ratelimit(&ratelimit);
7070}
 
7071
7072/**
7073 *	ata_msleep - ATA EH owner aware msleep
7074 *	@ap: ATA port to attribute the sleep to
7075 *	@msecs: duration to sleep in milliseconds
7076 *
7077 *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
7078 *	ownership is released before going to sleep and reacquired
7079 *	after the sleep is complete.  IOW, other ports sharing the
7080 *	@ap->host will be allowed to own the EH while this task is
7081 *	sleeping.
7082 *
7083 *	LOCKING:
7084 *	Might sleep.
7085 */
7086void ata_msleep(struct ata_port *ap, unsigned int msecs)
7087{
7088	bool owns_eh = ap && ap->host->eh_owner == current;
7089
7090	if (owns_eh)
7091		ata_eh_release(ap);
7092
7093	if (msecs < 20) {
7094		unsigned long usecs = msecs * USEC_PER_MSEC;
7095		usleep_range(usecs, usecs + 50);
7096	} else {
7097		msleep(msecs);
7098	}
7099
7100	if (owns_eh)
7101		ata_eh_acquire(ap);
7102}
 
7103
7104/**
7105 *	ata_wait_register - wait until register value changes
7106 *	@ap: ATA port to wait register for, can be NULL
7107 *	@reg: IO-mapped register
7108 *	@mask: Mask to apply to read register value
7109 *	@val: Wait condition
7110 *	@interval: polling interval in milliseconds
7111 *	@timeout: timeout in milliseconds
7112 *
7113 *	Waiting for some bits of register to change is a common
7114 *	operation for ATA controllers.  This function reads 32bit LE
7115 *	IO-mapped register @reg and tests for the following condition.
7116 *
7117 *	(*@reg & mask) != val
7118 *
7119 *	If the condition is met, it returns; otherwise, the process is
7120 *	repeated after @interval_msec until timeout.
7121 *
7122 *	LOCKING:
7123 *	Kernel thread context (may sleep)
7124 *
7125 *	RETURNS:
7126 *	The final register value.
7127 */
7128u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
7129		      unsigned long interval, unsigned long timeout)
7130{
7131	unsigned long deadline;
7132	u32 tmp;
7133
7134	tmp = ioread32(reg);
7135
7136	/* Calculate timeout _after_ the first read to make sure
7137	 * preceding writes reach the controller before starting to
7138	 * eat away the timeout.
7139	 */
7140	deadline = ata_deadline(jiffies, timeout);
7141
7142	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
7143		ata_msleep(ap, interval);
7144		tmp = ioread32(reg);
7145	}
7146
7147	return tmp;
7148}
7149
7150/**
7151 *	sata_lpm_ignore_phy_events - test if PHY event should be ignored
7152 *	@link: Link receiving the event
7153 *
7154 *	Test whether the received PHY event has to be ignored or not.
7155 *
7156 *	LOCKING:
7157 *	None:
7158 *
7159 *	RETURNS:
7160 *	True if the event has to be ignored.
7161 */
7162bool sata_lpm_ignore_phy_events(struct ata_link *link)
7163{
7164	unsigned long lpm_timeout = link->last_lpm_change +
7165				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
7166
7167	/* if LPM is enabled, PHYRDY doesn't mean anything */
7168	if (link->lpm_policy > ATA_LPM_MAX_POWER)
7169		return true;
7170
7171	/* ignore the first PHY event after the LPM policy changed
7172	 * as it is might be spurious
7173	 */
7174	if ((link->flags & ATA_LFLAG_CHANGED) &&
7175	    time_before(jiffies, lpm_timeout))
7176		return true;
7177
7178	return false;
7179}
7180EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7181
7182/*
7183 * Dummy port_ops
7184 */
7185static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7186{
7187	return AC_ERR_SYSTEM;
7188}
7189
7190static void ata_dummy_error_handler(struct ata_port *ap)
7191{
7192	/* truly dummy */
7193}
7194
7195struct ata_port_operations ata_dummy_port_ops = {
7196	.qc_prep		= ata_noop_qc_prep,
7197	.qc_issue		= ata_dummy_qc_issue,
7198	.error_handler		= ata_dummy_error_handler,
7199	.sched_eh		= ata_std_sched_eh,
7200	.end_eh			= ata_std_end_eh,
7201};
 
7202
7203const struct ata_port_info ata_dummy_port_info = {
7204	.port_ops		= &ata_dummy_port_ops,
7205};
 
7206
7207/*
7208 * Utility print functions
7209 */
7210void ata_port_printk(const struct ata_port *ap, const char *level,
7211		     const char *fmt, ...)
7212{
7213	struct va_format vaf;
7214	va_list args;
7215
7216	va_start(args, fmt);
7217
7218	vaf.fmt = fmt;
7219	vaf.va = &args;
7220
7221	printk("%sata%u: %pV", level, ap->print_id, &vaf);
7222
7223	va_end(args);
7224}
7225EXPORT_SYMBOL(ata_port_printk);
7226
7227void ata_link_printk(const struct ata_link *link, const char *level,
7228		     const char *fmt, ...)
7229{
7230	struct va_format vaf;
7231	va_list args;
7232
7233	va_start(args, fmt);
7234
7235	vaf.fmt = fmt;
7236	vaf.va = &args;
7237
7238	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7239		printk("%sata%u.%02u: %pV",
7240		       level, link->ap->print_id, link->pmp, &vaf);
7241	else
7242		printk("%sata%u: %pV",
7243		       level, link->ap->print_id, &vaf);
7244
7245	va_end(args);
7246}
7247EXPORT_SYMBOL(ata_link_printk);
7248
7249void ata_dev_printk(const struct ata_device *dev, const char *level,
7250		    const char *fmt, ...)
7251{
7252	struct va_format vaf;
7253	va_list args;
7254
7255	va_start(args, fmt);
7256
7257	vaf.fmt = fmt;
7258	vaf.va = &args;
7259
7260	printk("%sata%u.%02u: %pV",
7261	       level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7262	       &vaf);
7263
7264	va_end(args);
7265}
7266EXPORT_SYMBOL(ata_dev_printk);
7267
7268void ata_print_version(const struct device *dev, const char *version)
7269{
7270	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7271}
7272EXPORT_SYMBOL(ata_print_version);
7273
7274/*
7275 * libata is essentially a library of internal helper functions for
7276 * low-level ATA host controller drivers.  As such, the API/ABI is
7277 * likely to change as new drivers are added and updated.
7278 * Do not depend on ABI/API stability.
7279 */
7280EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7281EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7282EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7283EXPORT_SYMBOL_GPL(ata_base_port_ops);
7284EXPORT_SYMBOL_GPL(sata_port_ops);
7285EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7286EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7287EXPORT_SYMBOL_GPL(ata_link_next);
7288EXPORT_SYMBOL_GPL(ata_dev_next);
7289EXPORT_SYMBOL_GPL(ata_std_bios_param);
7290EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
7291EXPORT_SYMBOL_GPL(ata_host_init);
7292EXPORT_SYMBOL_GPL(ata_host_alloc);
7293EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7294EXPORT_SYMBOL_GPL(ata_slave_link_init);
7295EXPORT_SYMBOL_GPL(ata_host_start);
7296EXPORT_SYMBOL_GPL(ata_host_register);
7297EXPORT_SYMBOL_GPL(ata_host_activate);
7298EXPORT_SYMBOL_GPL(ata_host_detach);
7299EXPORT_SYMBOL_GPL(ata_sg_init);
7300EXPORT_SYMBOL_GPL(ata_qc_complete);
7301EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7302EXPORT_SYMBOL_GPL(atapi_cmd_type);
7303EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7304EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7305EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7306EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7307EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7308EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7309EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7310EXPORT_SYMBOL_GPL(ata_mode_string);
7311EXPORT_SYMBOL_GPL(ata_id_xfermask);
7312EXPORT_SYMBOL_GPL(ata_do_set_mode);
7313EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7314EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7315EXPORT_SYMBOL_GPL(ata_dev_disable);
7316EXPORT_SYMBOL_GPL(sata_set_spd);
7317EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7318EXPORT_SYMBOL_GPL(sata_link_debounce);
7319EXPORT_SYMBOL_GPL(sata_link_resume);
7320EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7321EXPORT_SYMBOL_GPL(ata_std_prereset);
7322EXPORT_SYMBOL_GPL(sata_link_hardreset);
7323EXPORT_SYMBOL_GPL(sata_std_hardreset);
7324EXPORT_SYMBOL_GPL(ata_std_postreset);
7325EXPORT_SYMBOL_GPL(ata_dev_classify);
7326EXPORT_SYMBOL_GPL(ata_dev_pair);
7327EXPORT_SYMBOL_GPL(ata_ratelimit);
7328EXPORT_SYMBOL_GPL(ata_msleep);
7329EXPORT_SYMBOL_GPL(ata_wait_register);
7330EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7331EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7332EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7333EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7334EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7335EXPORT_SYMBOL_GPL(sata_scr_valid);
7336EXPORT_SYMBOL_GPL(sata_scr_read);
7337EXPORT_SYMBOL_GPL(sata_scr_write);
7338EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7339EXPORT_SYMBOL_GPL(ata_link_online);
7340EXPORT_SYMBOL_GPL(ata_link_offline);
7341#ifdef CONFIG_PM
7342EXPORT_SYMBOL_GPL(ata_host_suspend);
7343EXPORT_SYMBOL_GPL(ata_host_resume);
7344#endif /* CONFIG_PM */
7345EXPORT_SYMBOL_GPL(ata_id_string);
7346EXPORT_SYMBOL_GPL(ata_id_c_string);
7347EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7348EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7349
7350EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7351EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7352EXPORT_SYMBOL_GPL(ata_timing_compute);
7353EXPORT_SYMBOL_GPL(ata_timing_merge);
7354EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7355
7356#ifdef CONFIG_PCI
7357EXPORT_SYMBOL_GPL(pci_test_config_bits);
7358EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7359#ifdef CONFIG_PM
7360EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7361EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7362EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7363EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7364#endif /* CONFIG_PM */
7365#endif /* CONFIG_PCI */
7366
7367EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7368
7369EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7370EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7371EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7372EXPORT_SYMBOL_GPL(ata_port_desc);
7373#ifdef CONFIG_PCI
7374EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7375#endif /* CONFIG_PCI */
7376EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7377EXPORT_SYMBOL_GPL(ata_link_abort);
7378EXPORT_SYMBOL_GPL(ata_port_abort);
7379EXPORT_SYMBOL_GPL(ata_port_freeze);
7380EXPORT_SYMBOL_GPL(sata_async_notification);
7381EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7382EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7383EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7384EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7385EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7386EXPORT_SYMBOL_GPL(ata_do_eh);
7387EXPORT_SYMBOL_GPL(ata_std_error_handler);
7388
7389EXPORT_SYMBOL_GPL(ata_cable_40wire);
7390EXPORT_SYMBOL_GPL(ata_cable_80wire);
7391EXPORT_SYMBOL_GPL(ata_cable_unknown);
7392EXPORT_SYMBOL_GPL(ata_cable_ignore);
7393EXPORT_SYMBOL_GPL(ata_cable_sata);
7394EXPORT_SYMBOL_GPL(ata_host_get);
7395EXPORT_SYMBOL_GPL(ata_host_put);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  libata-core.c - helper library for ATA
   4 *
 
 
 
 
   5 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
   6 *  Copyright 2003-2004 Jeff Garzik
   7 *
   8 *  libata documentation is available via 'make {ps|pdf}docs',
   9 *  as Documentation/driver-api/libata.rst
  10 *
  11 *  Hardware documentation available from http://www.t13.org/ and
  12 *  http://www.sata-io.org/
  13 *
  14 *  Standards documents from:
  15 *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
  16 *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
  17 *	http://www.sata-io.org (SATA)
  18 *	http://www.compactflash.org (CF)
  19 *	http://www.qic.org (QIC157 - Tape and DSC)
  20 *	http://www.ce-ata.org (CE-ATA: not supported)
  21 *
  22 * libata is essentially a library of internal helper functions for
  23 * low-level ATA host controller drivers.  As such, the API/ABI is
  24 * likely to change as new drivers are added and updated.
  25 * Do not depend on ABI/API stability.
  26 */
  27
  28#include <linux/kernel.h>
  29#include <linux/module.h>
  30#include <linux/pci.h>
  31#include <linux/init.h>
  32#include <linux/list.h>
  33#include <linux/mm.h>
  34#include <linux/spinlock.h>
  35#include <linux/blkdev.h>
  36#include <linux/delay.h>
  37#include <linux/timer.h>
  38#include <linux/time.h>
  39#include <linux/interrupt.h>
  40#include <linux/completion.h>
  41#include <linux/suspend.h>
  42#include <linux/workqueue.h>
  43#include <linux/scatterlist.h>
  44#include <linux/io.h>
 
  45#include <linux/log2.h>
  46#include <linux/slab.h>
  47#include <linux/glob.h>
  48#include <scsi/scsi.h>
  49#include <scsi/scsi_cmnd.h>
  50#include <scsi/scsi_host.h>
  51#include <linux/libata.h>
  52#include <asm/byteorder.h>
  53#include <asm/unaligned.h>
  54#include <linux/cdrom.h>
  55#include <linux/ratelimit.h>
  56#include <linux/leds.h>
  57#include <linux/pm_runtime.h>
  58#include <linux/platform_device.h>
  59#include <asm/setup.h>
  60
  61#define CREATE_TRACE_POINTS
  62#include <trace/events/libata.h>
  63
  64#include "libata.h"
  65#include "libata-transport.h"
  66
 
 
 
 
 
  67const struct ata_port_operations ata_base_port_ops = {
  68	.prereset		= ata_std_prereset,
  69	.postreset		= ata_std_postreset,
  70	.error_handler		= ata_std_error_handler,
  71	.sched_eh		= ata_std_sched_eh,
  72	.end_eh			= ata_std_end_eh,
  73};
  74
  75const struct ata_port_operations sata_port_ops = {
  76	.inherits		= &ata_base_port_ops,
  77
  78	.qc_defer		= ata_std_qc_defer,
  79	.hardreset		= sata_std_hardreset,
  80};
  81EXPORT_SYMBOL_GPL(sata_port_ops);
  82
  83static unsigned int ata_dev_init_params(struct ata_device *dev,
  84					u16 heads, u16 sectors);
  85static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
  86static void ata_dev_xfermask(struct ata_device *dev);
  87static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
  88
  89atomic_t ata_print_id = ATOMIC_INIT(0);
  90
  91#ifdef CONFIG_ATA_FORCE
  92struct ata_force_param {
  93	const char	*name;
  94	u8		cbl;
  95	u8		spd_limit;
  96	unsigned long	xfer_mask;
  97	unsigned int	horkage_on;
  98	unsigned int	horkage_off;
  99	u16		lflags;
 100};
 101
 102struct ata_force_ent {
 103	int			port;
 104	int			device;
 105	struct ata_force_param	param;
 106};
 107
 108static struct ata_force_ent *ata_force_tbl;
 109static int ata_force_tbl_size;
 110
 111static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
 112/* param_buf is thrown away after initialization, disallow read */
 113module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
 114MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
 115#endif
 116
 117static int atapi_enabled = 1;
 118module_param(atapi_enabled, int, 0444);
 119MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
 120
 121static int atapi_dmadir = 0;
 122module_param(atapi_dmadir, int, 0444);
 123MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
 124
 125int atapi_passthru16 = 1;
 126module_param(atapi_passthru16, int, 0444);
 127MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
 128
 129int libata_fua = 0;
 130module_param_named(fua, libata_fua, int, 0444);
 131MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
 132
 133static int ata_ignore_hpa;
 134module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
 135MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
 136
 137static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
 138module_param_named(dma, libata_dma_mask, int, 0444);
 139MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
 140
 141static int ata_probe_timeout;
 142module_param(ata_probe_timeout, int, 0444);
 143MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
 144
 145int libata_noacpi = 0;
 146module_param_named(noacpi, libata_noacpi, int, 0444);
 147MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
 148
 149int libata_allow_tpm = 0;
 150module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
 151MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
 152
 153static int atapi_an;
 154module_param(atapi_an, int, 0444);
 155MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
 156
 157MODULE_AUTHOR("Jeff Garzik");
 158MODULE_DESCRIPTION("Library module for ATA devices");
 159MODULE_LICENSE("GPL");
 160MODULE_VERSION(DRV_VERSION);
 161
 162
 163static bool ata_sstatus_online(u32 sstatus)
 164{
 165	return (sstatus & 0xf) == 0x3;
 166}
 167
 168/**
 169 *	ata_link_next - link iteration helper
 170 *	@link: the previous link, NULL to start
 171 *	@ap: ATA port containing links to iterate
 172 *	@mode: iteration mode, one of ATA_LITER_*
 173 *
 174 *	LOCKING:
 175 *	Host lock or EH context.
 176 *
 177 *	RETURNS:
 178 *	Pointer to the next link.
 179 */
 180struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
 181			       enum ata_link_iter_mode mode)
 182{
 183	BUG_ON(mode != ATA_LITER_EDGE &&
 184	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
 185
 186	/* NULL link indicates start of iteration */
 187	if (!link)
 188		switch (mode) {
 189		case ATA_LITER_EDGE:
 190		case ATA_LITER_PMP_FIRST:
 191			if (sata_pmp_attached(ap))
 192				return ap->pmp_link;
 193			fallthrough;
 194		case ATA_LITER_HOST_FIRST:
 195			return &ap->link;
 196		}
 197
 198	/* we just iterated over the host link, what's next? */
 199	if (link == &ap->link)
 200		switch (mode) {
 201		case ATA_LITER_HOST_FIRST:
 202			if (sata_pmp_attached(ap))
 203				return ap->pmp_link;
 204			fallthrough;
 205		case ATA_LITER_PMP_FIRST:
 206			if (unlikely(ap->slave_link))
 207				return ap->slave_link;
 208			fallthrough;
 209		case ATA_LITER_EDGE:
 210			return NULL;
 211		}
 212
 213	/* slave_link excludes PMP */
 214	if (unlikely(link == ap->slave_link))
 215		return NULL;
 216
 217	/* we were over a PMP link */
 218	if (++link < ap->pmp_link + ap->nr_pmp_links)
 219		return link;
 220
 221	if (mode == ATA_LITER_PMP_FIRST)
 222		return &ap->link;
 223
 224	return NULL;
 225}
 226EXPORT_SYMBOL_GPL(ata_link_next);
 227
 228/**
 229 *	ata_dev_next - device iteration helper
 230 *	@dev: the previous device, NULL to start
 231 *	@link: ATA link containing devices to iterate
 232 *	@mode: iteration mode, one of ATA_DITER_*
 233 *
 234 *	LOCKING:
 235 *	Host lock or EH context.
 236 *
 237 *	RETURNS:
 238 *	Pointer to the next device.
 239 */
 240struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
 241				enum ata_dev_iter_mode mode)
 242{
 243	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
 244	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
 245
 246	/* NULL dev indicates start of iteration */
 247	if (!dev)
 248		switch (mode) {
 249		case ATA_DITER_ENABLED:
 250		case ATA_DITER_ALL:
 251			dev = link->device;
 252			goto check;
 253		case ATA_DITER_ENABLED_REVERSE:
 254		case ATA_DITER_ALL_REVERSE:
 255			dev = link->device + ata_link_max_devices(link) - 1;
 256			goto check;
 257		}
 258
 259 next:
 260	/* move to the next one */
 261	switch (mode) {
 262	case ATA_DITER_ENABLED:
 263	case ATA_DITER_ALL:
 264		if (++dev < link->device + ata_link_max_devices(link))
 265			goto check;
 266		return NULL;
 267	case ATA_DITER_ENABLED_REVERSE:
 268	case ATA_DITER_ALL_REVERSE:
 269		if (--dev >= link->device)
 270			goto check;
 271		return NULL;
 272	}
 273
 274 check:
 275	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
 276	    !ata_dev_enabled(dev))
 277		goto next;
 278	return dev;
 279}
 280EXPORT_SYMBOL_GPL(ata_dev_next);
 281
 282/**
 283 *	ata_dev_phys_link - find physical link for a device
 284 *	@dev: ATA device to look up physical link for
 285 *
 286 *	Look up physical link which @dev is attached to.  Note that
 287 *	this is different from @dev->link only when @dev is on slave
 288 *	link.  For all other cases, it's the same as @dev->link.
 289 *
 290 *	LOCKING:
 291 *	Don't care.
 292 *
 293 *	RETURNS:
 294 *	Pointer to the found physical link.
 295 */
 296struct ata_link *ata_dev_phys_link(struct ata_device *dev)
 297{
 298	struct ata_port *ap = dev->link->ap;
 299
 300	if (!ap->slave_link)
 301		return dev->link;
 302	if (!dev->devno)
 303		return &ap->link;
 304	return ap->slave_link;
 305}
 306
 307#ifdef CONFIG_ATA_FORCE
 308/**
 309 *	ata_force_cbl - force cable type according to libata.force
 310 *	@ap: ATA port of interest
 311 *
 312 *	Force cable type according to libata.force and whine about it.
 313 *	The last entry which has matching port number is used, so it
 314 *	can be specified as part of device force parameters.  For
 315 *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
 316 *	same effect.
 317 *
 318 *	LOCKING:
 319 *	EH context.
 320 */
 321void ata_force_cbl(struct ata_port *ap)
 322{
 323	int i;
 324
 325	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 326		const struct ata_force_ent *fe = &ata_force_tbl[i];
 327
 328		if (fe->port != -1 && fe->port != ap->print_id)
 329			continue;
 330
 331		if (fe->param.cbl == ATA_CBL_NONE)
 332			continue;
 333
 334		ap->cbl = fe->param.cbl;
 335		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
 336		return;
 337	}
 338}
 339
 340/**
 341 *	ata_force_link_limits - force link limits according to libata.force
 342 *	@link: ATA link of interest
 343 *
 344 *	Force link flags and SATA spd limit according to libata.force
 345 *	and whine about it.  When only the port part is specified
 346 *	(e.g. 1:), the limit applies to all links connected to both
 347 *	the host link and all fan-out ports connected via PMP.  If the
 348 *	device part is specified as 0 (e.g. 1.00:), it specifies the
 349 *	first fan-out link not the host link.  Device number 15 always
 350 *	points to the host link whether PMP is attached or not.  If the
 351 *	controller has slave link, device number 16 points to it.
 352 *
 353 *	LOCKING:
 354 *	EH context.
 355 */
 356static void ata_force_link_limits(struct ata_link *link)
 357{
 358	bool did_spd = false;
 359	int linkno = link->pmp;
 360	int i;
 361
 362	if (ata_is_host_link(link))
 363		linkno += 15;
 364
 365	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 366		const struct ata_force_ent *fe = &ata_force_tbl[i];
 367
 368		if (fe->port != -1 && fe->port != link->ap->print_id)
 369			continue;
 370
 371		if (fe->device != -1 && fe->device != linkno)
 372			continue;
 373
 374		/* only honor the first spd limit */
 375		if (!did_spd && fe->param.spd_limit) {
 376			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
 377			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
 378					fe->param.name);
 379			did_spd = true;
 380		}
 381
 382		/* let lflags stack */
 383		if (fe->param.lflags) {
 384			link->flags |= fe->param.lflags;
 385			ata_link_notice(link,
 386					"FORCE: link flag 0x%x forced -> 0x%x\n",
 387					fe->param.lflags, link->flags);
 388		}
 389	}
 390}
 391
 392/**
 393 *	ata_force_xfermask - force xfermask according to libata.force
 394 *	@dev: ATA device of interest
 395 *
 396 *	Force xfer_mask according to libata.force and whine about it.
 397 *	For consistency with link selection, device number 15 selects
 398 *	the first device connected to the host link.
 399 *
 400 *	LOCKING:
 401 *	EH context.
 402 */
 403static void ata_force_xfermask(struct ata_device *dev)
 404{
 405	int devno = dev->link->pmp + dev->devno;
 406	int alt_devno = devno;
 407	int i;
 408
 409	/* allow n.15/16 for devices attached to host port */
 410	if (ata_is_host_link(dev->link))
 411		alt_devno += 15;
 412
 413	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 414		const struct ata_force_ent *fe = &ata_force_tbl[i];
 415		unsigned long pio_mask, mwdma_mask, udma_mask;
 416
 417		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
 418			continue;
 419
 420		if (fe->device != -1 && fe->device != devno &&
 421		    fe->device != alt_devno)
 422			continue;
 423
 424		if (!fe->param.xfer_mask)
 425			continue;
 426
 427		ata_unpack_xfermask(fe->param.xfer_mask,
 428				    &pio_mask, &mwdma_mask, &udma_mask);
 429		if (udma_mask)
 430			dev->udma_mask = udma_mask;
 431		else if (mwdma_mask) {
 432			dev->udma_mask = 0;
 433			dev->mwdma_mask = mwdma_mask;
 434		} else {
 435			dev->udma_mask = 0;
 436			dev->mwdma_mask = 0;
 437			dev->pio_mask = pio_mask;
 438		}
 439
 440		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
 441			       fe->param.name);
 442		return;
 443	}
 444}
 445
 446/**
 447 *	ata_force_horkage - force horkage according to libata.force
 448 *	@dev: ATA device of interest
 449 *
 450 *	Force horkage according to libata.force and whine about it.
 451 *	For consistency with link selection, device number 15 selects
 452 *	the first device connected to the host link.
 453 *
 454 *	LOCKING:
 455 *	EH context.
 456 */
 457static void ata_force_horkage(struct ata_device *dev)
 458{
 459	int devno = dev->link->pmp + dev->devno;
 460	int alt_devno = devno;
 461	int i;
 462
 463	/* allow n.15/16 for devices attached to host port */
 464	if (ata_is_host_link(dev->link))
 465		alt_devno += 15;
 466
 467	for (i = 0; i < ata_force_tbl_size; i++) {
 468		const struct ata_force_ent *fe = &ata_force_tbl[i];
 469
 470		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
 471			continue;
 472
 473		if (fe->device != -1 && fe->device != devno &&
 474		    fe->device != alt_devno)
 475			continue;
 476
 477		if (!(~dev->horkage & fe->param.horkage_on) &&
 478		    !(dev->horkage & fe->param.horkage_off))
 479			continue;
 480
 481		dev->horkage |= fe->param.horkage_on;
 482		dev->horkage &= ~fe->param.horkage_off;
 483
 484		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
 485			       fe->param.name);
 486	}
 487}
 488#else
 489static inline void ata_force_link_limits(struct ata_link *link) { }
 490static inline void ata_force_xfermask(struct ata_device *dev) { }
 491static inline void ata_force_horkage(struct ata_device *dev) { }
 492#endif
 493
 494/**
 495 *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
 496 *	@opcode: SCSI opcode
 497 *
 498 *	Determine ATAPI command type from @opcode.
 499 *
 500 *	LOCKING:
 501 *	None.
 502 *
 503 *	RETURNS:
 504 *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
 505 */
 506int atapi_cmd_type(u8 opcode)
 507{
 508	switch (opcode) {
 509	case GPCMD_READ_10:
 510	case GPCMD_READ_12:
 511		return ATAPI_READ;
 512
 513	case GPCMD_WRITE_10:
 514	case GPCMD_WRITE_12:
 515	case GPCMD_WRITE_AND_VERIFY_10:
 516		return ATAPI_WRITE;
 517
 518	case GPCMD_READ_CD:
 519	case GPCMD_READ_CD_MSF:
 520		return ATAPI_READ_CD;
 521
 522	case ATA_16:
 523	case ATA_12:
 524		if (atapi_passthru16)
 525			return ATAPI_PASS_THRU;
 526		fallthrough;
 527	default:
 528		return ATAPI_MISC;
 529	}
 530}
 531EXPORT_SYMBOL_GPL(atapi_cmd_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 532
 533static const u8 ata_rw_cmds[] = {
 534	/* pio multi */
 535	ATA_CMD_READ_MULTI,
 536	ATA_CMD_WRITE_MULTI,
 537	ATA_CMD_READ_MULTI_EXT,
 538	ATA_CMD_WRITE_MULTI_EXT,
 539	0,
 540	0,
 541	0,
 542	ATA_CMD_WRITE_MULTI_FUA_EXT,
 543	/* pio */
 544	ATA_CMD_PIO_READ,
 545	ATA_CMD_PIO_WRITE,
 546	ATA_CMD_PIO_READ_EXT,
 547	ATA_CMD_PIO_WRITE_EXT,
 548	0,
 549	0,
 550	0,
 551	0,
 552	/* dma */
 553	ATA_CMD_READ,
 554	ATA_CMD_WRITE,
 555	ATA_CMD_READ_EXT,
 556	ATA_CMD_WRITE_EXT,
 557	0,
 558	0,
 559	0,
 560	ATA_CMD_WRITE_FUA_EXT
 561};
 562
 563/**
 564 *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
 565 *	@tf: command to examine and configure
 566 *	@dev: device tf belongs to
 567 *
 568 *	Examine the device configuration and tf->flags to calculate
 569 *	the proper read/write commands and protocol to use.
 570 *
 571 *	LOCKING:
 572 *	caller.
 573 */
 574static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
 575{
 576	u8 cmd;
 577
 578	int index, fua, lba48, write;
 579
 580	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
 581	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
 582	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
 583
 584	if (dev->flags & ATA_DFLAG_PIO) {
 585		tf->protocol = ATA_PROT_PIO;
 586		index = dev->multi_count ? 0 : 8;
 587	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
 588		/* Unable to use DMA due to host limitation */
 589		tf->protocol = ATA_PROT_PIO;
 590		index = dev->multi_count ? 0 : 8;
 591	} else {
 592		tf->protocol = ATA_PROT_DMA;
 593		index = 16;
 594	}
 595
 596	cmd = ata_rw_cmds[index + fua + lba48 + write];
 597	if (cmd) {
 598		tf->command = cmd;
 599		return 0;
 600	}
 601	return -1;
 602}
 603
 604/**
 605 *	ata_tf_read_block - Read block address from ATA taskfile
 606 *	@tf: ATA taskfile of interest
 607 *	@dev: ATA device @tf belongs to
 608 *
 609 *	LOCKING:
 610 *	None.
 611 *
 612 *	Read block address from @tf.  This function can handle all
 613 *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
 614 *	flags select the address format to use.
 615 *
 616 *	RETURNS:
 617 *	Block address read from @tf.
 618 */
 619u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
 620{
 621	u64 block = 0;
 622
 623	if (tf->flags & ATA_TFLAG_LBA) {
 624		if (tf->flags & ATA_TFLAG_LBA48) {
 625			block |= (u64)tf->hob_lbah << 40;
 626			block |= (u64)tf->hob_lbam << 32;
 627			block |= (u64)tf->hob_lbal << 24;
 628		} else
 629			block |= (tf->device & 0xf) << 24;
 630
 631		block |= tf->lbah << 16;
 632		block |= tf->lbam << 8;
 633		block |= tf->lbal;
 634	} else {
 635		u32 cyl, head, sect;
 636
 637		cyl = tf->lbam | (tf->lbah << 8);
 638		head = tf->device & 0xf;
 639		sect = tf->lbal;
 640
 641		if (!sect) {
 642			ata_dev_warn(dev,
 643				     "device reported invalid CHS sector 0\n");
 644			return U64_MAX;
 645		}
 646
 647		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
 648	}
 649
 650	return block;
 651}
 652
 653/**
 654 *	ata_build_rw_tf - Build ATA taskfile for given read/write request
 655 *	@tf: Target ATA taskfile
 656 *	@dev: ATA device @tf belongs to
 657 *	@block: Block address
 658 *	@n_block: Number of blocks
 659 *	@tf_flags: RW/FUA etc...
 660 *	@tag: tag
 661 *	@class: IO priority class
 662 *
 663 *	LOCKING:
 664 *	None.
 665 *
 666 *	Build ATA taskfile @tf for read/write request described by
 667 *	@block, @n_block, @tf_flags and @tag on @dev.
 668 *
 669 *	RETURNS:
 670 *
 671 *	0 on success, -ERANGE if the request is too large for @dev,
 672 *	-EINVAL if the request is invalid.
 673 */
 674int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
 675		    u64 block, u32 n_block, unsigned int tf_flags,
 676		    unsigned int tag, int class)
 677{
 678	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 679	tf->flags |= tf_flags;
 680
 681	if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
 682		/* yay, NCQ */
 683		if (!lba_48_ok(block, n_block))
 684			return -ERANGE;
 685
 686		tf->protocol = ATA_PROT_NCQ;
 687		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
 688
 689		if (tf->flags & ATA_TFLAG_WRITE)
 690			tf->command = ATA_CMD_FPDMA_WRITE;
 691		else
 692			tf->command = ATA_CMD_FPDMA_READ;
 693
 694		tf->nsect = tag << 3;
 695		tf->hob_feature = (n_block >> 8) & 0xff;
 696		tf->feature = n_block & 0xff;
 697
 698		tf->hob_lbah = (block >> 40) & 0xff;
 699		tf->hob_lbam = (block >> 32) & 0xff;
 700		tf->hob_lbal = (block >> 24) & 0xff;
 701		tf->lbah = (block >> 16) & 0xff;
 702		tf->lbam = (block >> 8) & 0xff;
 703		tf->lbal = block & 0xff;
 704
 705		tf->device = ATA_LBA;
 706		if (tf->flags & ATA_TFLAG_FUA)
 707			tf->device |= 1 << 7;
 708
 709		if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
 710			if (class == IOPRIO_CLASS_RT)
 711				tf->hob_nsect |= ATA_PRIO_HIGH <<
 712						 ATA_SHIFT_PRIO;
 713		}
 714	} else if (dev->flags & ATA_DFLAG_LBA) {
 715		tf->flags |= ATA_TFLAG_LBA;
 716
 717		if (lba_28_ok(block, n_block)) {
 718			/* use LBA28 */
 719			tf->device |= (block >> 24) & 0xf;
 720		} else if (lba_48_ok(block, n_block)) {
 721			if (!(dev->flags & ATA_DFLAG_LBA48))
 722				return -ERANGE;
 723
 724			/* use LBA48 */
 725			tf->flags |= ATA_TFLAG_LBA48;
 726
 727			tf->hob_nsect = (n_block >> 8) & 0xff;
 728
 729			tf->hob_lbah = (block >> 40) & 0xff;
 730			tf->hob_lbam = (block >> 32) & 0xff;
 731			tf->hob_lbal = (block >> 24) & 0xff;
 732		} else
 733			/* request too large even for LBA48 */
 734			return -ERANGE;
 735
 736		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
 737			return -EINVAL;
 738
 739		tf->nsect = n_block & 0xff;
 740
 741		tf->lbah = (block >> 16) & 0xff;
 742		tf->lbam = (block >> 8) & 0xff;
 743		tf->lbal = block & 0xff;
 744
 745		tf->device |= ATA_LBA;
 746	} else {
 747		/* CHS */
 748		u32 sect, head, cyl, track;
 749
 750		/* The request -may- be too large for CHS addressing. */
 751		if (!lba_28_ok(block, n_block))
 752			return -ERANGE;
 753
 754		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
 755			return -EINVAL;
 756
 757		/* Convert LBA to CHS */
 758		track = (u32)block / dev->sectors;
 759		cyl   = track / dev->heads;
 760		head  = track % dev->heads;
 761		sect  = (u32)block % dev->sectors + 1;
 762
 763		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
 764			(u32)block, track, cyl, head, sect);
 765
 766		/* Check whether the converted CHS can fit.
 767		   Cylinder: 0-65535
 768		   Head: 0-15
 769		   Sector: 1-255*/
 770		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
 771			return -ERANGE;
 772
 773		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
 774		tf->lbal = sect;
 775		tf->lbam = cyl;
 776		tf->lbah = cyl >> 8;
 777		tf->device |= head;
 778	}
 779
 780	return 0;
 781}
 782
 783/**
 784 *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
 785 *	@pio_mask: pio_mask
 786 *	@mwdma_mask: mwdma_mask
 787 *	@udma_mask: udma_mask
 788 *
 789 *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
 790 *	unsigned int xfer_mask.
 791 *
 792 *	LOCKING:
 793 *	None.
 794 *
 795 *	RETURNS:
 796 *	Packed xfer_mask.
 797 */
 798unsigned long ata_pack_xfermask(unsigned long pio_mask,
 799				unsigned long mwdma_mask,
 800				unsigned long udma_mask)
 801{
 802	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
 803		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
 804		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
 805}
 806EXPORT_SYMBOL_GPL(ata_pack_xfermask);
 807
 808/**
 809 *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
 810 *	@xfer_mask: xfer_mask to unpack
 811 *	@pio_mask: resulting pio_mask
 812 *	@mwdma_mask: resulting mwdma_mask
 813 *	@udma_mask: resulting udma_mask
 814 *
 815 *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
 816 *	Any NULL destination masks will be ignored.
 817 */
 818void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
 819			 unsigned long *mwdma_mask, unsigned long *udma_mask)
 820{
 821	if (pio_mask)
 822		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
 823	if (mwdma_mask)
 824		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
 825	if (udma_mask)
 826		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
 827}
 828
 829static const struct ata_xfer_ent {
 830	int shift, bits;
 831	u8 base;
 832} ata_xfer_tbl[] = {
 833	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
 834	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
 835	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
 836	{ -1, },
 837};
 838
 839/**
 840 *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
 841 *	@xfer_mask: xfer_mask of interest
 842 *
 843 *	Return matching XFER_* value for @xfer_mask.  Only the highest
 844 *	bit of @xfer_mask is considered.
 845 *
 846 *	LOCKING:
 847 *	None.
 848 *
 849 *	RETURNS:
 850 *	Matching XFER_* value, 0xff if no match found.
 851 */
 852u8 ata_xfer_mask2mode(unsigned long xfer_mask)
 853{
 854	int highbit = fls(xfer_mask) - 1;
 855	const struct ata_xfer_ent *ent;
 856
 857	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 858		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
 859			return ent->base + highbit - ent->shift;
 860	return 0xff;
 861}
 862EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
 863
 864/**
 865 *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
 866 *	@xfer_mode: XFER_* of interest
 867 *
 868 *	Return matching xfer_mask for @xfer_mode.
 869 *
 870 *	LOCKING:
 871 *	None.
 872 *
 873 *	RETURNS:
 874 *	Matching xfer_mask, 0 if no match found.
 875 */
 876unsigned long ata_xfer_mode2mask(u8 xfer_mode)
 877{
 878	const struct ata_xfer_ent *ent;
 879
 880	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 881		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
 882			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
 883				& ~((1 << ent->shift) - 1);
 884	return 0;
 885}
 886EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
 887
 888/**
 889 *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
 890 *	@xfer_mode: XFER_* of interest
 891 *
 892 *	Return matching xfer_shift for @xfer_mode.
 893 *
 894 *	LOCKING:
 895 *	None.
 896 *
 897 *	RETURNS:
 898 *	Matching xfer_shift, -1 if no match found.
 899 */
 900int ata_xfer_mode2shift(unsigned long xfer_mode)
 901{
 902	const struct ata_xfer_ent *ent;
 903
 904	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 905		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
 906			return ent->shift;
 907	return -1;
 908}
 909EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
 910
 911/**
 912 *	ata_mode_string - convert xfer_mask to string
 913 *	@xfer_mask: mask of bits supported; only highest bit counts.
 914 *
 915 *	Determine string which represents the highest speed
 916 *	(highest bit in @modemask).
 917 *
 918 *	LOCKING:
 919 *	None.
 920 *
 921 *	RETURNS:
 922 *	Constant C string representing highest speed listed in
 923 *	@mode_mask, or the constant C string "<n/a>".
 924 */
 925const char *ata_mode_string(unsigned long xfer_mask)
 926{
 927	static const char * const xfer_mode_str[] = {
 928		"PIO0",
 929		"PIO1",
 930		"PIO2",
 931		"PIO3",
 932		"PIO4",
 933		"PIO5",
 934		"PIO6",
 935		"MWDMA0",
 936		"MWDMA1",
 937		"MWDMA2",
 938		"MWDMA3",
 939		"MWDMA4",
 940		"UDMA/16",
 941		"UDMA/25",
 942		"UDMA/33",
 943		"UDMA/44",
 944		"UDMA/66",
 945		"UDMA/100",
 946		"UDMA/133",
 947		"UDMA7",
 948	};
 949	int highbit;
 950
 951	highbit = fls(xfer_mask) - 1;
 952	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
 953		return xfer_mode_str[highbit];
 954	return "<n/a>";
 955}
 956EXPORT_SYMBOL_GPL(ata_mode_string);
 957
 958const char *sata_spd_string(unsigned int spd)
 959{
 960	static const char * const spd_str[] = {
 961		"1.5 Gbps",
 962		"3.0 Gbps",
 963		"6.0 Gbps",
 964	};
 965
 966	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
 967		return "<unknown>";
 968	return spd_str[spd - 1];
 969}
 970
 971/**
 972 *	ata_dev_classify - determine device type based on ATA-spec signature
 973 *	@tf: ATA taskfile register set for device to be identified
 974 *
 975 *	Determine from taskfile register contents whether a device is
 976 *	ATA or ATAPI, as per "Signature and persistence" section
 977 *	of ATA/PI spec (volume 1, sect 5.14).
 978 *
 979 *	LOCKING:
 980 *	None.
 981 *
 982 *	RETURNS:
 983 *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
 984 *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
 985 */
 986unsigned int ata_dev_classify(const struct ata_taskfile *tf)
 987{
 988	/* Apple's open source Darwin code hints that some devices only
 989	 * put a proper signature into the LBA mid/high registers,
 990	 * So, we only check those.  It's sufficient for uniqueness.
 991	 *
 992	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
 993	 * signatures for ATA and ATAPI devices attached on SerialATA,
 994	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
 995	 * spec has never mentioned about using different signatures
 996	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
 997	 * Multiplier specification began to use 0x69/0x96 to identify
 998	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
 999	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1000	 * 0x69/0x96 shortly and described them as reserved for
1001	 * SerialATA.
1002	 *
1003	 * We follow the current spec and consider that 0x69/0x96
1004	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1005	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1006	 * SEMB signature.  This is worked around in
1007	 * ata_dev_read_id().
1008	 */
1009	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1010		DPRINTK("found ATA device by sig\n");
1011		return ATA_DEV_ATA;
1012	}
1013
1014	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1015		DPRINTK("found ATAPI device by sig\n");
1016		return ATA_DEV_ATAPI;
1017	}
1018
1019	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1020		DPRINTK("found PMP device by sig\n");
1021		return ATA_DEV_PMP;
1022	}
1023
1024	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1025		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1026		return ATA_DEV_SEMB;
1027	}
1028
1029	if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1030		DPRINTK("found ZAC device by sig\n");
1031		return ATA_DEV_ZAC;
1032	}
1033
1034	DPRINTK("unknown device\n");
1035	return ATA_DEV_UNKNOWN;
1036}
1037EXPORT_SYMBOL_GPL(ata_dev_classify);
1038
1039/**
1040 *	ata_id_string - Convert IDENTIFY DEVICE page into string
1041 *	@id: IDENTIFY DEVICE results we will examine
1042 *	@s: string into which data is output
1043 *	@ofs: offset into identify device page
1044 *	@len: length of string to return. must be an even number.
1045 *
1046 *	The strings in the IDENTIFY DEVICE page are broken up into
1047 *	16-bit chunks.  Run through the string, and output each
1048 *	8-bit chunk linearly, regardless of platform.
1049 *
1050 *	LOCKING:
1051 *	caller.
1052 */
1053
1054void ata_id_string(const u16 *id, unsigned char *s,
1055		   unsigned int ofs, unsigned int len)
1056{
1057	unsigned int c;
1058
1059	BUG_ON(len & 1);
1060
1061	while (len > 0) {
1062		c = id[ofs] >> 8;
1063		*s = c;
1064		s++;
1065
1066		c = id[ofs] & 0xff;
1067		*s = c;
1068		s++;
1069
1070		ofs++;
1071		len -= 2;
1072	}
1073}
1074EXPORT_SYMBOL_GPL(ata_id_string);
1075
1076/**
1077 *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1078 *	@id: IDENTIFY DEVICE results we will examine
1079 *	@s: string into which data is output
1080 *	@ofs: offset into identify device page
1081 *	@len: length of string to return. must be an odd number.
1082 *
1083 *	This function is identical to ata_id_string except that it
1084 *	trims trailing spaces and terminates the resulting string with
1085 *	null.  @len must be actual maximum length (even number) + 1.
1086 *
1087 *	LOCKING:
1088 *	caller.
1089 */
1090void ata_id_c_string(const u16 *id, unsigned char *s,
1091		     unsigned int ofs, unsigned int len)
1092{
1093	unsigned char *p;
1094
1095	ata_id_string(id, s, ofs, len - 1);
1096
1097	p = s + strnlen(s, len - 1);
1098	while (p > s && p[-1] == ' ')
1099		p--;
1100	*p = '\0';
1101}
1102EXPORT_SYMBOL_GPL(ata_id_c_string);
1103
1104static u64 ata_id_n_sectors(const u16 *id)
1105{
1106	if (ata_id_has_lba(id)) {
1107		if (ata_id_has_lba48(id))
1108			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1109		else
1110			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1111	} else {
1112		if (ata_id_current_chs_valid(id))
1113			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1114			       id[ATA_ID_CUR_SECTORS];
1115		else
1116			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1117			       id[ATA_ID_SECTORS];
1118	}
1119}
1120
1121u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1122{
1123	u64 sectors = 0;
1124
1125	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1126	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1127	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1128	sectors |= (tf->lbah & 0xff) << 16;
1129	sectors |= (tf->lbam & 0xff) << 8;
1130	sectors |= (tf->lbal & 0xff);
1131
1132	return sectors;
1133}
1134
1135u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1136{
1137	u64 sectors = 0;
1138
1139	sectors |= (tf->device & 0x0f) << 24;
1140	sectors |= (tf->lbah & 0xff) << 16;
1141	sectors |= (tf->lbam & 0xff) << 8;
1142	sectors |= (tf->lbal & 0xff);
1143
1144	return sectors;
1145}
1146
1147/**
1148 *	ata_read_native_max_address - Read native max address
1149 *	@dev: target device
1150 *	@max_sectors: out parameter for the result native max address
1151 *
1152 *	Perform an LBA48 or LBA28 native size query upon the device in
1153 *	question.
1154 *
1155 *	RETURNS:
1156 *	0 on success, -EACCES if command is aborted by the drive.
1157 *	-EIO on other errors.
1158 */
1159static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1160{
1161	unsigned int err_mask;
1162	struct ata_taskfile tf;
1163	int lba48 = ata_id_has_lba48(dev->id);
1164
1165	ata_tf_init(dev, &tf);
1166
1167	/* always clear all address registers */
1168	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1169
1170	if (lba48) {
1171		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1172		tf.flags |= ATA_TFLAG_LBA48;
1173	} else
1174		tf.command = ATA_CMD_READ_NATIVE_MAX;
1175
1176	tf.protocol = ATA_PROT_NODATA;
1177	tf.device |= ATA_LBA;
1178
1179	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1180	if (err_mask) {
1181		ata_dev_warn(dev,
1182			     "failed to read native max address (err_mask=0x%x)\n",
1183			     err_mask);
1184		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1185			return -EACCES;
1186		return -EIO;
1187	}
1188
1189	if (lba48)
1190		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1191	else
1192		*max_sectors = ata_tf_to_lba(&tf) + 1;
1193	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1194		(*max_sectors)--;
1195	return 0;
1196}
1197
1198/**
1199 *	ata_set_max_sectors - Set max sectors
1200 *	@dev: target device
1201 *	@new_sectors: new max sectors value to set for the device
1202 *
1203 *	Set max sectors of @dev to @new_sectors.
1204 *
1205 *	RETURNS:
1206 *	0 on success, -EACCES if command is aborted or denied (due to
1207 *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1208 *	errors.
1209 */
1210static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1211{
1212	unsigned int err_mask;
1213	struct ata_taskfile tf;
1214	int lba48 = ata_id_has_lba48(dev->id);
1215
1216	new_sectors--;
1217
1218	ata_tf_init(dev, &tf);
1219
1220	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1221
1222	if (lba48) {
1223		tf.command = ATA_CMD_SET_MAX_EXT;
1224		tf.flags |= ATA_TFLAG_LBA48;
1225
1226		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1227		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1228		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1229	} else {
1230		tf.command = ATA_CMD_SET_MAX;
1231
1232		tf.device |= (new_sectors >> 24) & 0xf;
1233	}
1234
1235	tf.protocol = ATA_PROT_NODATA;
1236	tf.device |= ATA_LBA;
1237
1238	tf.lbal = (new_sectors >> 0) & 0xff;
1239	tf.lbam = (new_sectors >> 8) & 0xff;
1240	tf.lbah = (new_sectors >> 16) & 0xff;
1241
1242	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1243	if (err_mask) {
1244		ata_dev_warn(dev,
1245			     "failed to set max address (err_mask=0x%x)\n",
1246			     err_mask);
1247		if (err_mask == AC_ERR_DEV &&
1248		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1249			return -EACCES;
1250		return -EIO;
1251	}
1252
1253	return 0;
1254}
1255
1256/**
1257 *	ata_hpa_resize		-	Resize a device with an HPA set
1258 *	@dev: Device to resize
1259 *
1260 *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1261 *	it if required to the full size of the media. The caller must check
1262 *	the drive has the HPA feature set enabled.
1263 *
1264 *	RETURNS:
1265 *	0 on success, -errno on failure.
1266 */
1267static int ata_hpa_resize(struct ata_device *dev)
1268{
1269	struct ata_eh_context *ehc = &dev->link->eh_context;
1270	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1271	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1272	u64 sectors = ata_id_n_sectors(dev->id);
1273	u64 native_sectors;
1274	int rc;
1275
1276	/* do we need to do it? */
1277	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1278	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1279	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1280		return 0;
1281
1282	/* read native max address */
1283	rc = ata_read_native_max_address(dev, &native_sectors);
1284	if (rc) {
1285		/* If device aborted the command or HPA isn't going to
1286		 * be unlocked, skip HPA resizing.
1287		 */
1288		if (rc == -EACCES || !unlock_hpa) {
1289			ata_dev_warn(dev,
1290				     "HPA support seems broken, skipping HPA handling\n");
1291			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1292
1293			/* we can continue if device aborted the command */
1294			if (rc == -EACCES)
1295				rc = 0;
1296		}
1297
1298		return rc;
1299	}
1300	dev->n_native_sectors = native_sectors;
1301
1302	/* nothing to do? */
1303	if (native_sectors <= sectors || !unlock_hpa) {
1304		if (!print_info || native_sectors == sectors)
1305			return 0;
1306
1307		if (native_sectors > sectors)
1308			ata_dev_info(dev,
1309				"HPA detected: current %llu, native %llu\n",
1310				(unsigned long long)sectors,
1311				(unsigned long long)native_sectors);
1312		else if (native_sectors < sectors)
1313			ata_dev_warn(dev,
1314				"native sectors (%llu) is smaller than sectors (%llu)\n",
1315				(unsigned long long)native_sectors,
1316				(unsigned long long)sectors);
1317		return 0;
1318	}
1319
1320	/* let's unlock HPA */
1321	rc = ata_set_max_sectors(dev, native_sectors);
1322	if (rc == -EACCES) {
1323		/* if device aborted the command, skip HPA resizing */
1324		ata_dev_warn(dev,
1325			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1326			     (unsigned long long)sectors,
1327			     (unsigned long long)native_sectors);
1328		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1329		return 0;
1330	} else if (rc)
1331		return rc;
1332
1333	/* re-read IDENTIFY data */
1334	rc = ata_dev_reread_id(dev, 0);
1335	if (rc) {
1336		ata_dev_err(dev,
1337			    "failed to re-read IDENTIFY data after HPA resizing\n");
1338		return rc;
1339	}
1340
1341	if (print_info) {
1342		u64 new_sectors = ata_id_n_sectors(dev->id);
1343		ata_dev_info(dev,
1344			"HPA unlocked: %llu -> %llu, native %llu\n",
1345			(unsigned long long)sectors,
1346			(unsigned long long)new_sectors,
1347			(unsigned long long)native_sectors);
1348	}
1349
1350	return 0;
1351}
1352
1353/**
1354 *	ata_dump_id - IDENTIFY DEVICE info debugging output
1355 *	@id: IDENTIFY DEVICE page to dump
1356 *
1357 *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1358 *	page.
1359 *
1360 *	LOCKING:
1361 *	caller.
1362 */
1363
1364static inline void ata_dump_id(const u16 *id)
1365{
1366	DPRINTK("49==0x%04x  "
1367		"53==0x%04x  "
1368		"63==0x%04x  "
1369		"64==0x%04x  "
1370		"75==0x%04x  \n",
1371		id[49],
1372		id[53],
1373		id[63],
1374		id[64],
1375		id[75]);
1376	DPRINTK("80==0x%04x  "
1377		"81==0x%04x  "
1378		"82==0x%04x  "
1379		"83==0x%04x  "
1380		"84==0x%04x  \n",
1381		id[80],
1382		id[81],
1383		id[82],
1384		id[83],
1385		id[84]);
1386	DPRINTK("88==0x%04x  "
1387		"93==0x%04x\n",
1388		id[88],
1389		id[93]);
1390}
1391
1392/**
1393 *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1394 *	@id: IDENTIFY data to compute xfer mask from
1395 *
1396 *	Compute the xfermask for this device. This is not as trivial
1397 *	as it seems if we must consider early devices correctly.
1398 *
1399 *	FIXME: pre IDE drive timing (do we care ?).
1400 *
1401 *	LOCKING:
1402 *	None.
1403 *
1404 *	RETURNS:
1405 *	Computed xfermask
1406 */
1407unsigned long ata_id_xfermask(const u16 *id)
1408{
1409	unsigned long pio_mask, mwdma_mask, udma_mask;
1410
1411	/* Usual case. Word 53 indicates word 64 is valid */
1412	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1413		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1414		pio_mask <<= 3;
1415		pio_mask |= 0x7;
1416	} else {
1417		/* If word 64 isn't valid then Word 51 high byte holds
1418		 * the PIO timing number for the maximum. Turn it into
1419		 * a mask.
1420		 */
1421		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1422		if (mode < 5)	/* Valid PIO range */
1423			pio_mask = (2 << mode) - 1;
1424		else
1425			pio_mask = 1;
1426
1427		/* But wait.. there's more. Design your standards by
1428		 * committee and you too can get a free iordy field to
1429		 * process. However its the speeds not the modes that
1430		 * are supported... Note drivers using the timing API
1431		 * will get this right anyway
1432		 */
1433	}
1434
1435	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1436
1437	if (ata_id_is_cfa(id)) {
1438		/*
1439		 *	Process compact flash extended modes
1440		 */
1441		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1442		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1443
1444		if (pio)
1445			pio_mask |= (1 << 5);
1446		if (pio > 1)
1447			pio_mask |= (1 << 6);
1448		if (dma)
1449			mwdma_mask |= (1 << 3);
1450		if (dma > 1)
1451			mwdma_mask |= (1 << 4);
1452	}
1453
1454	udma_mask = 0;
1455	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1456		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1457
1458	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1459}
1460EXPORT_SYMBOL_GPL(ata_id_xfermask);
1461
1462static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1463{
1464	struct completion *waiting = qc->private_data;
1465
1466	complete(waiting);
1467}
1468
1469/**
1470 *	ata_exec_internal_sg - execute libata internal command
1471 *	@dev: Device to which the command is sent
1472 *	@tf: Taskfile registers for the command and the result
1473 *	@cdb: CDB for packet command
1474 *	@dma_dir: Data transfer direction of the command
1475 *	@sgl: sg list for the data buffer of the command
1476 *	@n_elem: Number of sg entries
1477 *	@timeout: Timeout in msecs (0 for default)
1478 *
1479 *	Executes libata internal command with timeout.  @tf contains
1480 *	command on entry and result on return.  Timeout and error
1481 *	conditions are reported via return value.  No recovery action
1482 *	is taken after a command times out.  It's caller's duty to
1483 *	clean up after timeout.
1484 *
1485 *	LOCKING:
1486 *	None.  Should be called with kernel context, might sleep.
1487 *
1488 *	RETURNS:
1489 *	Zero on success, AC_ERR_* mask on failure
1490 */
1491unsigned ata_exec_internal_sg(struct ata_device *dev,
1492			      struct ata_taskfile *tf, const u8 *cdb,
1493			      int dma_dir, struct scatterlist *sgl,
1494			      unsigned int n_elem, unsigned long timeout)
1495{
1496	struct ata_link *link = dev->link;
1497	struct ata_port *ap = link->ap;
1498	u8 command = tf->command;
1499	int auto_timeout = 0;
1500	struct ata_queued_cmd *qc;
1501	unsigned int preempted_tag;
1502	u32 preempted_sactive;
1503	u64 preempted_qc_active;
1504	int preempted_nr_active_links;
1505	DECLARE_COMPLETION_ONSTACK(wait);
1506	unsigned long flags;
1507	unsigned int err_mask;
1508	int rc;
1509
1510	spin_lock_irqsave(ap->lock, flags);
1511
1512	/* no internal command while frozen */
1513	if (ap->pflags & ATA_PFLAG_FROZEN) {
1514		spin_unlock_irqrestore(ap->lock, flags);
1515		return AC_ERR_SYSTEM;
1516	}
1517
1518	/* initialize internal qc */
1519	qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1520
1521	qc->tag = ATA_TAG_INTERNAL;
1522	qc->hw_tag = 0;
1523	qc->scsicmd = NULL;
1524	qc->ap = ap;
1525	qc->dev = dev;
1526	ata_qc_reinit(qc);
1527
1528	preempted_tag = link->active_tag;
1529	preempted_sactive = link->sactive;
1530	preempted_qc_active = ap->qc_active;
1531	preempted_nr_active_links = ap->nr_active_links;
1532	link->active_tag = ATA_TAG_POISON;
1533	link->sactive = 0;
1534	ap->qc_active = 0;
1535	ap->nr_active_links = 0;
1536
1537	/* prepare & issue qc */
1538	qc->tf = *tf;
1539	if (cdb)
1540		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1541
1542	/* some SATA bridges need us to indicate data xfer direction */
1543	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1544	    dma_dir == DMA_FROM_DEVICE)
1545		qc->tf.feature |= ATAPI_DMADIR;
1546
1547	qc->flags |= ATA_QCFLAG_RESULT_TF;
1548	qc->dma_dir = dma_dir;
1549	if (dma_dir != DMA_NONE) {
1550		unsigned int i, buflen = 0;
1551		struct scatterlist *sg;
1552
1553		for_each_sg(sgl, sg, n_elem, i)
1554			buflen += sg->length;
1555
1556		ata_sg_init(qc, sgl, n_elem);
1557		qc->nbytes = buflen;
1558	}
1559
1560	qc->private_data = &wait;
1561	qc->complete_fn = ata_qc_complete_internal;
1562
1563	ata_qc_issue(qc);
1564
1565	spin_unlock_irqrestore(ap->lock, flags);
1566
1567	if (!timeout) {
1568		if (ata_probe_timeout)
1569			timeout = ata_probe_timeout * 1000;
1570		else {
1571			timeout = ata_internal_cmd_timeout(dev, command);
1572			auto_timeout = 1;
1573		}
1574	}
1575
1576	if (ap->ops->error_handler)
1577		ata_eh_release(ap);
1578
1579	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1580
1581	if (ap->ops->error_handler)
1582		ata_eh_acquire(ap);
1583
1584	ata_sff_flush_pio_task(ap);
1585
1586	if (!rc) {
1587		spin_lock_irqsave(ap->lock, flags);
1588
1589		/* We're racing with irq here.  If we lose, the
1590		 * following test prevents us from completing the qc
1591		 * twice.  If we win, the port is frozen and will be
1592		 * cleaned up by ->post_internal_cmd().
1593		 */
1594		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1595			qc->err_mask |= AC_ERR_TIMEOUT;
1596
1597			if (ap->ops->error_handler)
1598				ata_port_freeze(ap);
1599			else
1600				ata_qc_complete(qc);
1601
1602			if (ata_msg_warn(ap))
1603				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1604					     command);
1605		}
1606
1607		spin_unlock_irqrestore(ap->lock, flags);
1608	}
1609
1610	/* do post_internal_cmd */
1611	if (ap->ops->post_internal_cmd)
1612		ap->ops->post_internal_cmd(qc);
1613
1614	/* perform minimal error analysis */
1615	if (qc->flags & ATA_QCFLAG_FAILED) {
1616		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1617			qc->err_mask |= AC_ERR_DEV;
1618
1619		if (!qc->err_mask)
1620			qc->err_mask |= AC_ERR_OTHER;
1621
1622		if (qc->err_mask & ~AC_ERR_OTHER)
1623			qc->err_mask &= ~AC_ERR_OTHER;
1624	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1625		qc->result_tf.command |= ATA_SENSE;
1626	}
1627
1628	/* finish up */
1629	spin_lock_irqsave(ap->lock, flags);
1630
1631	*tf = qc->result_tf;
1632	err_mask = qc->err_mask;
1633
1634	ata_qc_free(qc);
1635	link->active_tag = preempted_tag;
1636	link->sactive = preempted_sactive;
1637	ap->qc_active = preempted_qc_active;
1638	ap->nr_active_links = preempted_nr_active_links;
1639
1640	spin_unlock_irqrestore(ap->lock, flags);
1641
1642	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1643		ata_internal_cmd_timed_out(dev, command);
1644
1645	return err_mask;
1646}
1647
1648/**
1649 *	ata_exec_internal - execute libata internal command
1650 *	@dev: Device to which the command is sent
1651 *	@tf: Taskfile registers for the command and the result
1652 *	@cdb: CDB for packet command
1653 *	@dma_dir: Data transfer direction of the command
1654 *	@buf: Data buffer of the command
1655 *	@buflen: Length of data buffer
1656 *	@timeout: Timeout in msecs (0 for default)
1657 *
1658 *	Wrapper around ata_exec_internal_sg() which takes simple
1659 *	buffer instead of sg list.
1660 *
1661 *	LOCKING:
1662 *	None.  Should be called with kernel context, might sleep.
1663 *
1664 *	RETURNS:
1665 *	Zero on success, AC_ERR_* mask on failure
1666 */
1667unsigned ata_exec_internal(struct ata_device *dev,
1668			   struct ata_taskfile *tf, const u8 *cdb,
1669			   int dma_dir, void *buf, unsigned int buflen,
1670			   unsigned long timeout)
1671{
1672	struct scatterlist *psg = NULL, sg;
1673	unsigned int n_elem = 0;
1674
1675	if (dma_dir != DMA_NONE) {
1676		WARN_ON(!buf);
1677		sg_init_one(&sg, buf, buflen);
1678		psg = &sg;
1679		n_elem++;
1680	}
1681
1682	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1683				    timeout);
1684}
1685
1686/**
1687 *	ata_pio_need_iordy	-	check if iordy needed
1688 *	@adev: ATA device
1689 *
1690 *	Check if the current speed of the device requires IORDY. Used
1691 *	by various controllers for chip configuration.
1692 */
1693unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1694{
1695	/* Don't set IORDY if we're preparing for reset.  IORDY may
1696	 * lead to controller lock up on certain controllers if the
1697	 * port is not occupied.  See bko#11703 for details.
1698	 */
1699	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1700		return 0;
1701	/* Controller doesn't support IORDY.  Probably a pointless
1702	 * check as the caller should know this.
1703	 */
1704	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1705		return 0;
1706	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1707	if (ata_id_is_cfa(adev->id)
1708	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1709		return 0;
1710	/* PIO3 and higher it is mandatory */
1711	if (adev->pio_mode > XFER_PIO_2)
1712		return 1;
1713	/* We turn it on when possible */
1714	if (ata_id_has_iordy(adev->id))
1715		return 1;
1716	return 0;
1717}
1718EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
1719
1720/**
1721 *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1722 *	@adev: ATA device
1723 *
1724 *	Compute the highest mode possible if we are not using iordy. Return
1725 *	-1 if no iordy mode is available.
1726 */
1727static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1728{
1729	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1730	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1731		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1732		/* Is the speed faster than the drive allows non IORDY ? */
1733		if (pio) {
1734			/* This is cycle times not frequency - watch the logic! */
1735			if (pio > 240)	/* PIO2 is 240nS per cycle */
1736				return 3 << ATA_SHIFT_PIO;
1737			return 7 << ATA_SHIFT_PIO;
1738		}
1739	}
1740	return 3 << ATA_SHIFT_PIO;
1741}
1742
1743/**
1744 *	ata_do_dev_read_id		-	default ID read method
1745 *	@dev: device
1746 *	@tf: proposed taskfile
1747 *	@id: data buffer
1748 *
1749 *	Issue the identify taskfile and hand back the buffer containing
1750 *	identify data. For some RAID controllers and for pre ATA devices
1751 *	this function is wrapped or replaced by the driver
1752 */
1753unsigned int ata_do_dev_read_id(struct ata_device *dev,
1754					struct ata_taskfile *tf, u16 *id)
1755{
1756	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1757				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1758}
1759EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1760
1761/**
1762 *	ata_dev_read_id - Read ID data from the specified device
1763 *	@dev: target device
1764 *	@p_class: pointer to class of the target device (may be changed)
1765 *	@flags: ATA_READID_* flags
1766 *	@id: buffer to read IDENTIFY data into
1767 *
1768 *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1769 *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1770 *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1771 *	for pre-ATA4 drives.
1772 *
1773 *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1774 *	now we abort if we hit that case.
1775 *
1776 *	LOCKING:
1777 *	Kernel thread context (may sleep)
1778 *
1779 *	RETURNS:
1780 *	0 on success, -errno otherwise.
1781 */
1782int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1783		    unsigned int flags, u16 *id)
1784{
1785	struct ata_port *ap = dev->link->ap;
1786	unsigned int class = *p_class;
1787	struct ata_taskfile tf;
1788	unsigned int err_mask = 0;
1789	const char *reason;
1790	bool is_semb = class == ATA_DEV_SEMB;
1791	int may_fallback = 1, tried_spinup = 0;
1792	int rc;
1793
1794	if (ata_msg_ctl(ap))
1795		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1796
1797retry:
1798	ata_tf_init(dev, &tf);
1799
1800	switch (class) {
1801	case ATA_DEV_SEMB:
1802		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1803		fallthrough;
1804	case ATA_DEV_ATA:
1805	case ATA_DEV_ZAC:
1806		tf.command = ATA_CMD_ID_ATA;
1807		break;
1808	case ATA_DEV_ATAPI:
1809		tf.command = ATA_CMD_ID_ATAPI;
1810		break;
1811	default:
1812		rc = -ENODEV;
1813		reason = "unsupported class";
1814		goto err_out;
1815	}
1816
1817	tf.protocol = ATA_PROT_PIO;
1818
1819	/* Some devices choke if TF registers contain garbage.  Make
1820	 * sure those are properly initialized.
1821	 */
1822	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1823
1824	/* Device presence detection is unreliable on some
1825	 * controllers.  Always poll IDENTIFY if available.
1826	 */
1827	tf.flags |= ATA_TFLAG_POLLING;
1828
1829	if (ap->ops->read_id)
1830		err_mask = ap->ops->read_id(dev, &tf, id);
1831	else
1832		err_mask = ata_do_dev_read_id(dev, &tf, id);
1833
1834	if (err_mask) {
1835		if (err_mask & AC_ERR_NODEV_HINT) {
1836			ata_dev_dbg(dev, "NODEV after polling detection\n");
1837			return -ENOENT;
1838		}
1839
1840		if (is_semb) {
1841			ata_dev_info(dev,
1842		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1843			/* SEMB is not supported yet */
1844			*p_class = ATA_DEV_SEMB_UNSUP;
1845			return 0;
1846		}
1847
1848		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1849			/* Device or controller might have reported
1850			 * the wrong device class.  Give a shot at the
1851			 * other IDENTIFY if the current one is
1852			 * aborted by the device.
1853			 */
1854			if (may_fallback) {
1855				may_fallback = 0;
1856
1857				if (class == ATA_DEV_ATA)
1858					class = ATA_DEV_ATAPI;
1859				else
1860					class = ATA_DEV_ATA;
1861				goto retry;
1862			}
1863
1864			/* Control reaches here iff the device aborted
1865			 * both flavors of IDENTIFYs which happens
1866			 * sometimes with phantom devices.
1867			 */
1868			ata_dev_dbg(dev,
1869				    "both IDENTIFYs aborted, assuming NODEV\n");
1870			return -ENOENT;
1871		}
1872
1873		rc = -EIO;
1874		reason = "I/O error";
1875		goto err_out;
1876	}
1877
1878	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1879		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1880			    "class=%d may_fallback=%d tried_spinup=%d\n",
1881			    class, may_fallback, tried_spinup);
1882		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1883			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1884	}
1885
1886	/* Falling back doesn't make sense if ID data was read
1887	 * successfully at least once.
1888	 */
1889	may_fallback = 0;
1890
1891	swap_buf_le16(id, ATA_ID_WORDS);
1892
1893	/* sanity check */
1894	rc = -EINVAL;
1895	reason = "device reports invalid type";
1896
1897	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1898		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1899			goto err_out;
1900		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1901							ata_id_is_ata(id)) {
1902			ata_dev_dbg(dev,
1903				"host indicates ignore ATA devices, ignored\n");
1904			return -ENOENT;
1905		}
1906	} else {
1907		if (ata_id_is_ata(id))
1908			goto err_out;
1909	}
1910
1911	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1912		tried_spinup = 1;
1913		/*
1914		 * Drive powered-up in standby mode, and requires a specific
1915		 * SET_FEATURES spin-up subcommand before it will accept
1916		 * anything other than the original IDENTIFY command.
1917		 */
1918		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1919		if (err_mask && id[2] != 0x738c) {
1920			rc = -EIO;
1921			reason = "SPINUP failed";
1922			goto err_out;
1923		}
1924		/*
1925		 * If the drive initially returned incomplete IDENTIFY info,
1926		 * we now must reissue the IDENTIFY command.
1927		 */
1928		if (id[2] == 0x37c8)
1929			goto retry;
1930	}
1931
1932	if ((flags & ATA_READID_POSTRESET) &&
1933	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1934		/*
1935		 * The exact sequence expected by certain pre-ATA4 drives is:
1936		 * SRST RESET
1937		 * IDENTIFY (optional in early ATA)
1938		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1939		 * anything else..
1940		 * Some drives were very specific about that exact sequence.
1941		 *
1942		 * Note that ATA4 says lba is mandatory so the second check
1943		 * should never trigger.
1944		 */
1945		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1946			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1947			if (err_mask) {
1948				rc = -EIO;
1949				reason = "INIT_DEV_PARAMS failed";
1950				goto err_out;
1951			}
1952
1953			/* current CHS translation info (id[53-58]) might be
1954			 * changed. reread the identify device info.
1955			 */
1956			flags &= ~ATA_READID_POSTRESET;
1957			goto retry;
1958		}
1959	}
1960
1961	*p_class = class;
1962
1963	return 0;
1964
1965 err_out:
1966	if (ata_msg_warn(ap))
1967		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1968			     reason, err_mask);
1969	return rc;
1970}
1971
1972/**
1973 *	ata_read_log_page - read a specific log page
1974 *	@dev: target device
1975 *	@log: log to read
1976 *	@page: page to read
1977 *	@buf: buffer to store read page
1978 *	@sectors: number of sectors to read
1979 *
1980 *	Read log page using READ_LOG_EXT command.
1981 *
1982 *	LOCKING:
1983 *	Kernel thread context (may sleep).
1984 *
1985 *	RETURNS:
1986 *	0 on success, AC_ERR_* mask otherwise.
1987 */
1988unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1989			       u8 page, void *buf, unsigned int sectors)
1990{
1991	unsigned long ap_flags = dev->link->ap->flags;
1992	struct ata_taskfile tf;
1993	unsigned int err_mask;
1994	bool dma = false;
1995
1996	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1997
1998	/*
1999	 * Return error without actually issuing the command on controllers
2000	 * which e.g. lockup on a read log page.
2001	 */
2002	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2003		return AC_ERR_DEV;
2004
2005retry:
2006	ata_tf_init(dev, &tf);
2007	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
2008	    !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2009		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2010		tf.protocol = ATA_PROT_DMA;
2011		dma = true;
2012	} else {
2013		tf.command = ATA_CMD_READ_LOG_EXT;
2014		tf.protocol = ATA_PROT_PIO;
2015		dma = false;
2016	}
2017	tf.lbal = log;
2018	tf.lbam = page;
2019	tf.nsect = sectors;
2020	tf.hob_nsect = sectors >> 8;
2021	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2022
2023	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2024				     buf, sectors * ATA_SECT_SIZE, 0);
2025
2026	if (err_mask && dma) {
2027		dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2028		ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
2029		goto retry;
2030	}
2031
2032	DPRINTK("EXIT, err_mask=%x\n", err_mask);
2033	return err_mask;
2034}
2035
2036static bool ata_log_supported(struct ata_device *dev, u8 log)
2037{
2038	struct ata_port *ap = dev->link->ap;
2039
2040	if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2041		return false;
2042	return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2043}
2044
2045static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2046{
2047	struct ata_port *ap = dev->link->ap;
2048	unsigned int err, i;
2049
2050	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2051		ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
2052		return false;
2053	}
2054
2055	/*
2056	 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2057	 * supported.
2058	 */
2059	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2060				1);
2061	if (err) {
2062		ata_dev_info(dev,
2063			     "failed to get Device Identify Log Emask 0x%x\n",
2064			     err);
2065		return false;
2066	}
2067
2068	for (i = 0; i < ap->sector_buf[8]; i++) {
2069		if (ap->sector_buf[9 + i] == page)
2070			return true;
2071	}
2072
2073	return false;
2074}
2075
2076static int ata_do_link_spd_horkage(struct ata_device *dev)
2077{
2078	struct ata_link *plink = ata_dev_phys_link(dev);
2079	u32 target, target_limit;
2080
2081	if (!sata_scr_valid(plink))
2082		return 0;
2083
2084	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2085		target = 1;
2086	else
2087		return 0;
2088
2089	target_limit = (1 << target) - 1;
2090
2091	/* if already on stricter limit, no need to push further */
2092	if (plink->sata_spd_limit <= target_limit)
2093		return 0;
2094
2095	plink->sata_spd_limit = target_limit;
2096
2097	/* Request another EH round by returning -EAGAIN if link is
2098	 * going faster than the target speed.  Forward progress is
2099	 * guaranteed by setting sata_spd_limit to target_limit above.
2100	 */
2101	if (plink->sata_spd > target) {
2102		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2103			     sata_spd_string(target));
2104		return -EAGAIN;
2105	}
2106	return 0;
2107}
2108
2109static inline u8 ata_dev_knobble(struct ata_device *dev)
2110{
2111	struct ata_port *ap = dev->link->ap;
2112
2113	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2114		return 0;
2115
2116	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2117}
2118
2119static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2120{
2121	struct ata_port *ap = dev->link->ap;
2122	unsigned int err_mask;
2123
2124	if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2125		ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2126		return;
2127	}
2128	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2129				     0, ap->sector_buf, 1);
2130	if (err_mask) {
2131		ata_dev_dbg(dev,
2132			    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2133			    err_mask);
2134	} else {
2135		u8 *cmds = dev->ncq_send_recv_cmds;
2136
2137		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2138		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2139
2140		if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2141			ata_dev_dbg(dev, "disabling queued TRIM support\n");
2142			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2143				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2144		}
2145	}
2146}
2147
2148static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2149{
2150	struct ata_port *ap = dev->link->ap;
2151	unsigned int err_mask;
2152
2153	if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2154		ata_dev_warn(dev,
2155			     "NCQ Send/Recv Log not supported\n");
2156		return;
2157	}
2158	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2159				     0, ap->sector_buf, 1);
2160	if (err_mask) {
2161		ata_dev_dbg(dev,
2162			    "failed to get NCQ Non-Data Log Emask 0x%x\n",
2163			    err_mask);
2164	} else {
2165		u8 *cmds = dev->ncq_non_data_cmds;
2166
2167		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2168	}
2169}
2170
2171static void ata_dev_config_ncq_prio(struct ata_device *dev)
2172{
2173	struct ata_port *ap = dev->link->ap;
2174	unsigned int err_mask;
2175
2176	if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2177		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2178		return;
2179	}
2180
2181	err_mask = ata_read_log_page(dev,
2182				     ATA_LOG_IDENTIFY_DEVICE,
2183				     ATA_LOG_SATA_SETTINGS,
2184				     ap->sector_buf,
2185				     1);
2186	if (err_mask) {
2187		ata_dev_dbg(dev,
2188			    "failed to get Identify Device data, Emask 0x%x\n",
2189			    err_mask);
2190		return;
2191	}
2192
2193	if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
2194		dev->flags |= ATA_DFLAG_NCQ_PRIO;
2195	} else {
2196		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2197		ata_dev_dbg(dev, "SATA page does not support priority\n");
2198	}
2199
2200}
2201
2202static bool ata_dev_check_adapter(struct ata_device *dev,
2203				  unsigned short vendor_id)
2204{
2205	struct pci_dev *pcidev = NULL;
2206	struct device *parent_dev = NULL;
2207
2208	for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2209	     parent_dev = parent_dev->parent) {
2210		if (dev_is_pci(parent_dev)) {
2211			pcidev = to_pci_dev(parent_dev);
2212			if (pcidev->vendor == vendor_id)
2213				return true;
2214			break;
2215		}
2216	}
2217
2218	return false;
2219}
2220
2221static int ata_dev_config_ncq(struct ata_device *dev,
2222			       char *desc, size_t desc_sz)
2223{
2224	struct ata_port *ap = dev->link->ap;
2225	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2226	unsigned int err_mask;
2227	char *aa_desc = "";
2228
2229	if (!ata_id_has_ncq(dev->id)) {
2230		desc[0] = '\0';
2231		return 0;
2232	}
2233	if (!IS_ENABLED(CONFIG_SATA_HOST))
2234		return 0;
2235	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2236		snprintf(desc, desc_sz, "NCQ (not used)");
2237		return 0;
2238	}
2239
2240	if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2241	    ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2242		snprintf(desc, desc_sz, "NCQ (not used)");
2243		return 0;
2244	}
2245
2246	if (ap->flags & ATA_FLAG_NCQ) {
2247		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2248		dev->flags |= ATA_DFLAG_NCQ;
2249	}
2250
2251	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2252		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2253		ata_id_has_fpdma_aa(dev->id)) {
2254		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2255			SATA_FPDMA_AA);
2256		if (err_mask) {
2257			ata_dev_err(dev,
2258				    "failed to enable AA (error_mask=0x%x)\n",
2259				    err_mask);
2260			if (err_mask != AC_ERR_DEV) {
2261				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2262				return -EIO;
2263			}
2264		} else
2265			aa_desc = ", AA";
2266	}
2267
2268	if (hdepth >= ddepth)
2269		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2270	else
2271		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2272			ddepth, aa_desc);
2273
2274	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2275		if (ata_id_has_ncq_send_and_recv(dev->id))
2276			ata_dev_config_ncq_send_recv(dev);
2277		if (ata_id_has_ncq_non_data(dev->id))
2278			ata_dev_config_ncq_non_data(dev);
2279		if (ata_id_has_ncq_prio(dev->id))
2280			ata_dev_config_ncq_prio(dev);
2281	}
2282
2283	return 0;
2284}
2285
2286static void ata_dev_config_sense_reporting(struct ata_device *dev)
2287{
2288	unsigned int err_mask;
2289
2290	if (!ata_id_has_sense_reporting(dev->id))
2291		return;
2292
2293	if (ata_id_sense_reporting_enabled(dev->id))
2294		return;
2295
2296	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2297	if (err_mask) {
2298		ata_dev_dbg(dev,
2299			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
2300			    err_mask);
2301	}
2302}
2303
2304static void ata_dev_config_zac(struct ata_device *dev)
2305{
2306	struct ata_port *ap = dev->link->ap;
2307	unsigned int err_mask;
2308	u8 *identify_buf = ap->sector_buf;
2309
2310	dev->zac_zones_optimal_open = U32_MAX;
2311	dev->zac_zones_optimal_nonseq = U32_MAX;
2312	dev->zac_zones_max_open = U32_MAX;
2313
2314	/*
2315	 * Always set the 'ZAC' flag for Host-managed devices.
2316	 */
2317	if (dev->class == ATA_DEV_ZAC)
2318		dev->flags |= ATA_DFLAG_ZAC;
2319	else if (ata_id_zoned_cap(dev->id) == 0x01)
2320		/*
2321		 * Check for host-aware devices.
2322		 */
2323		dev->flags |= ATA_DFLAG_ZAC;
2324
2325	if (!(dev->flags & ATA_DFLAG_ZAC))
2326		return;
2327
2328	if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2329		ata_dev_warn(dev,
2330			     "ATA Zoned Information Log not supported\n");
2331		return;
2332	}
2333
2334	/*
2335	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2336	 */
2337	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2338				     ATA_LOG_ZONED_INFORMATION,
2339				     identify_buf, 1);
2340	if (!err_mask) {
2341		u64 zoned_cap, opt_open, opt_nonseq, max_open;
2342
2343		zoned_cap = get_unaligned_le64(&identify_buf[8]);
2344		if ((zoned_cap >> 63))
2345			dev->zac_zoned_cap = (zoned_cap & 1);
2346		opt_open = get_unaligned_le64(&identify_buf[24]);
2347		if ((opt_open >> 63))
2348			dev->zac_zones_optimal_open = (u32)opt_open;
2349		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2350		if ((opt_nonseq >> 63))
2351			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2352		max_open = get_unaligned_le64(&identify_buf[40]);
2353		if ((max_open >> 63))
2354			dev->zac_zones_max_open = (u32)max_open;
2355	}
2356}
2357
2358static void ata_dev_config_trusted(struct ata_device *dev)
2359{
2360	struct ata_port *ap = dev->link->ap;
2361	u64 trusted_cap;
2362	unsigned int err;
2363
2364	if (!ata_id_has_trusted(dev->id))
2365		return;
2366
2367	if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2368		ata_dev_warn(dev,
2369			     "Security Log not supported\n");
2370		return;
2371	}
2372
2373	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2374			ap->sector_buf, 1);
2375	if (err) {
2376		ata_dev_dbg(dev,
2377			    "failed to read Security Log, Emask 0x%x\n", err);
2378		return;
2379	}
2380
2381	trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2382	if (!(trusted_cap & (1ULL << 63))) {
2383		ata_dev_dbg(dev,
2384			    "Trusted Computing capability qword not valid!\n");
2385		return;
2386	}
2387
2388	if (trusted_cap & (1 << 0))
2389		dev->flags |= ATA_DFLAG_TRUSTED;
2390}
2391
2392/**
2393 *	ata_dev_configure - Configure the specified ATA/ATAPI device
2394 *	@dev: Target device to configure
2395 *
2396 *	Configure @dev according to @dev->id.  Generic and low-level
2397 *	driver specific fixups are also applied.
2398 *
2399 *	LOCKING:
2400 *	Kernel thread context (may sleep)
2401 *
2402 *	RETURNS:
2403 *	0 on success, -errno otherwise
2404 */
2405int ata_dev_configure(struct ata_device *dev)
2406{
2407	struct ata_port *ap = dev->link->ap;
2408	struct ata_eh_context *ehc = &dev->link->eh_context;
2409	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2410	const u16 *id = dev->id;
2411	unsigned long xfer_mask;
2412	unsigned int err_mask;
2413	char revbuf[7];		/* XYZ-99\0 */
2414	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2415	char modelbuf[ATA_ID_PROD_LEN+1];
2416	int rc;
2417
2418	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2419		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2420		return 0;
2421	}
2422
2423	if (ata_msg_probe(ap))
2424		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2425
2426	/* set horkage */
2427	dev->horkage |= ata_dev_blacklisted(dev);
2428	ata_force_horkage(dev);
2429
2430	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2431		ata_dev_info(dev, "unsupported device, disabling\n");
2432		ata_dev_disable(dev);
2433		return 0;
2434	}
2435
2436	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2437	    dev->class == ATA_DEV_ATAPI) {
2438		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2439			     atapi_enabled ? "not supported with this driver"
2440			     : "disabled");
2441		ata_dev_disable(dev);
2442		return 0;
2443	}
2444
2445	rc = ata_do_link_spd_horkage(dev);
2446	if (rc)
2447		return rc;
2448
2449	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2450	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2451	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2452		dev->horkage |= ATA_HORKAGE_NOLPM;
2453
2454	if (ap->flags & ATA_FLAG_NO_LPM)
2455		dev->horkage |= ATA_HORKAGE_NOLPM;
2456
2457	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2458		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2459		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2460	}
2461
2462	/* let ACPI work its magic */
2463	rc = ata_acpi_on_devcfg(dev);
2464	if (rc)
2465		return rc;
2466
2467	/* massage HPA, do it early as it might change IDENTIFY data */
2468	rc = ata_hpa_resize(dev);
2469	if (rc)
2470		return rc;
2471
2472	/* print device capabilities */
2473	if (ata_msg_probe(ap))
2474		ata_dev_dbg(dev,
2475			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2476			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2477			    __func__,
2478			    id[49], id[82], id[83], id[84],
2479			    id[85], id[86], id[87], id[88]);
2480
2481	/* initialize to-be-configured parameters */
2482	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2483	dev->max_sectors = 0;
2484	dev->cdb_len = 0;
2485	dev->n_sectors = 0;
2486	dev->cylinders = 0;
2487	dev->heads = 0;
2488	dev->sectors = 0;
2489	dev->multi_count = 0;
2490
2491	/*
2492	 * common ATA, ATAPI feature tests
2493	 */
2494
2495	/* find max transfer mode; for printk only */
2496	xfer_mask = ata_id_xfermask(id);
2497
2498	if (ata_msg_probe(ap))
2499		ata_dump_id(id);
2500
2501	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2502	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2503			sizeof(fwrevbuf));
2504
2505	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2506			sizeof(modelbuf));
2507
2508	/* ATA-specific feature tests */
2509	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2510		if (ata_id_is_cfa(id)) {
2511			/* CPRM may make this media unusable */
2512			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2513				ata_dev_warn(dev,
2514	"supports DRM functions and may not be fully accessible\n");
2515			snprintf(revbuf, 7, "CFA");
2516		} else {
2517			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2518			/* Warn the user if the device has TPM extensions */
2519			if (ata_id_has_tpm(id))
2520				ata_dev_warn(dev,
2521	"supports DRM functions and may not be fully accessible\n");
2522		}
2523
2524		dev->n_sectors = ata_id_n_sectors(id);
2525
2526		/* get current R/W Multiple count setting */
2527		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2528			unsigned int max = dev->id[47] & 0xff;
2529			unsigned int cnt = dev->id[59] & 0xff;
2530			/* only recognize/allow powers of two here */
2531			if (is_power_of_2(max) && is_power_of_2(cnt))
2532				if (cnt <= max)
2533					dev->multi_count = cnt;
2534		}
2535
2536		if (ata_id_has_lba(id)) {
2537			const char *lba_desc;
2538			char ncq_desc[24];
2539
2540			lba_desc = "LBA";
2541			dev->flags |= ATA_DFLAG_LBA;
2542			if (ata_id_has_lba48(id)) {
2543				dev->flags |= ATA_DFLAG_LBA48;
2544				lba_desc = "LBA48";
2545
2546				if (dev->n_sectors >= (1UL << 28) &&
2547				    ata_id_has_flush_ext(id))
2548					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2549			}
2550
2551			/* config NCQ */
2552			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2553			if (rc)
2554				return rc;
2555
2556			/* print device info to dmesg */
2557			if (ata_msg_drv(ap) && print_info) {
2558				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2559					     revbuf, modelbuf, fwrevbuf,
2560					     ata_mode_string(xfer_mask));
2561				ata_dev_info(dev,
2562					     "%llu sectors, multi %u: %s %s\n",
2563					(unsigned long long)dev->n_sectors,
2564					dev->multi_count, lba_desc, ncq_desc);
2565			}
2566		} else {
2567			/* CHS */
2568
2569			/* Default translation */
2570			dev->cylinders	= id[1];
2571			dev->heads	= id[3];
2572			dev->sectors	= id[6];
2573
2574			if (ata_id_current_chs_valid(id)) {
2575				/* Current CHS translation is valid. */
2576				dev->cylinders = id[54];
2577				dev->heads     = id[55];
2578				dev->sectors   = id[56];
2579			}
2580
2581			/* print device info to dmesg */
2582			if (ata_msg_drv(ap) && print_info) {
2583				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2584					     revbuf,	modelbuf, fwrevbuf,
2585					     ata_mode_string(xfer_mask));
2586				ata_dev_info(dev,
2587					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2588					     (unsigned long long)dev->n_sectors,
2589					     dev->multi_count, dev->cylinders,
2590					     dev->heads, dev->sectors);
2591			}
2592		}
2593
2594		/* Check and mark DevSlp capability. Get DevSlp timing variables
2595		 * from SATA Settings page of Identify Device Data Log.
2596		 */
2597		if (ata_id_has_devslp(dev->id)) {
2598			u8 *sata_setting = ap->sector_buf;
2599			int i, j;
2600
2601			dev->flags |= ATA_DFLAG_DEVSLP;
2602			err_mask = ata_read_log_page(dev,
2603						     ATA_LOG_IDENTIFY_DEVICE,
2604						     ATA_LOG_SATA_SETTINGS,
2605						     sata_setting,
2606						     1);
2607			if (err_mask)
2608				ata_dev_dbg(dev,
2609					    "failed to get Identify Device Data, Emask 0x%x\n",
2610					    err_mask);
2611			else
2612				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2613					j = ATA_LOG_DEVSLP_OFFSET + i;
2614					dev->devslp_timing[i] = sata_setting[j];
2615				}
2616		}
2617		ata_dev_config_sense_reporting(dev);
2618		ata_dev_config_zac(dev);
2619		ata_dev_config_trusted(dev);
2620		dev->cdb_len = 32;
2621	}
2622
2623	/* ATAPI-specific feature tests */
2624	else if (dev->class == ATA_DEV_ATAPI) {
2625		const char *cdb_intr_string = "";
2626		const char *atapi_an_string = "";
2627		const char *dma_dir_string = "";
2628		u32 sntf;
2629
2630		rc = atapi_cdb_len(id);
2631		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2632			if (ata_msg_warn(ap))
2633				ata_dev_warn(dev, "unsupported CDB len\n");
2634			rc = -EINVAL;
2635			goto err_out_nosup;
2636		}
2637		dev->cdb_len = (unsigned int) rc;
2638
2639		/* Enable ATAPI AN if both the host and device have
2640		 * the support.  If PMP is attached, SNTF is required
2641		 * to enable ATAPI AN to discern between PHY status
2642		 * changed notifications and ATAPI ANs.
2643		 */
2644		if (atapi_an &&
2645		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2646		    (!sata_pmp_attached(ap) ||
2647		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2648			/* issue SET feature command to turn this on */
2649			err_mask = ata_dev_set_feature(dev,
2650					SETFEATURES_SATA_ENABLE, SATA_AN);
2651			if (err_mask)
2652				ata_dev_err(dev,
2653					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2654					    err_mask);
2655			else {
2656				dev->flags |= ATA_DFLAG_AN;
2657				atapi_an_string = ", ATAPI AN";
2658			}
2659		}
2660
2661		if (ata_id_cdb_intr(dev->id)) {
2662			dev->flags |= ATA_DFLAG_CDB_INTR;
2663			cdb_intr_string = ", CDB intr";
2664		}
2665
2666		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2667			dev->flags |= ATA_DFLAG_DMADIR;
2668			dma_dir_string = ", DMADIR";
2669		}
2670
2671		if (ata_id_has_da(dev->id)) {
2672			dev->flags |= ATA_DFLAG_DA;
2673			zpodd_init(dev);
2674		}
2675
2676		/* print device info to dmesg */
2677		if (ata_msg_drv(ap) && print_info)
2678			ata_dev_info(dev,
2679				     "ATAPI: %s, %s, max %s%s%s%s\n",
2680				     modelbuf, fwrevbuf,
2681				     ata_mode_string(xfer_mask),
2682				     cdb_intr_string, atapi_an_string,
2683				     dma_dir_string);
2684	}
2685
2686	/* determine max_sectors */
2687	dev->max_sectors = ATA_MAX_SECTORS;
2688	if (dev->flags & ATA_DFLAG_LBA48)
2689		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2690
2691	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2692	   200 sectors */
2693	if (ata_dev_knobble(dev)) {
2694		if (ata_msg_drv(ap) && print_info)
2695			ata_dev_info(dev, "applying bridge limits\n");
2696		dev->udma_mask &= ATA_UDMA5;
2697		dev->max_sectors = ATA_MAX_SECTORS;
2698	}
2699
2700	if ((dev->class == ATA_DEV_ATAPI) &&
2701	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2702		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2703		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2704	}
2705
2706	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2707		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2708					 dev->max_sectors);
2709
2710	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2711		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2712					 dev->max_sectors);
2713
2714	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2715		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2716
2717	if (ap->ops->dev_config)
2718		ap->ops->dev_config(dev);
2719
2720	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2721		/* Let the user know. We don't want to disallow opens for
2722		   rescue purposes, or in case the vendor is just a blithering
2723		   idiot. Do this after the dev_config call as some controllers
2724		   with buggy firmware may want to avoid reporting false device
2725		   bugs */
2726
2727		if (print_info) {
2728			ata_dev_warn(dev,
2729"Drive reports diagnostics failure. This may indicate a drive\n");
2730			ata_dev_warn(dev,
2731"fault or invalid emulation. Contact drive vendor for information.\n");
2732		}
2733	}
2734
2735	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2736		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2737		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2738	}
2739
2740	return 0;
2741
2742err_out_nosup:
2743	if (ata_msg_probe(ap))
2744		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2745	return rc;
2746}
2747
2748/**
2749 *	ata_cable_40wire	-	return 40 wire cable type
2750 *	@ap: port
2751 *
2752 *	Helper method for drivers which want to hardwire 40 wire cable
2753 *	detection.
2754 */
2755
2756int ata_cable_40wire(struct ata_port *ap)
2757{
2758	return ATA_CBL_PATA40;
2759}
2760EXPORT_SYMBOL_GPL(ata_cable_40wire);
2761
2762/**
2763 *	ata_cable_80wire	-	return 80 wire cable type
2764 *	@ap: port
2765 *
2766 *	Helper method for drivers which want to hardwire 80 wire cable
2767 *	detection.
2768 */
2769
2770int ata_cable_80wire(struct ata_port *ap)
2771{
2772	return ATA_CBL_PATA80;
2773}
2774EXPORT_SYMBOL_GPL(ata_cable_80wire);
2775
2776/**
2777 *	ata_cable_unknown	-	return unknown PATA cable.
2778 *	@ap: port
2779 *
2780 *	Helper method for drivers which have no PATA cable detection.
2781 */
2782
2783int ata_cable_unknown(struct ata_port *ap)
2784{
2785	return ATA_CBL_PATA_UNK;
2786}
2787EXPORT_SYMBOL_GPL(ata_cable_unknown);
2788
2789/**
2790 *	ata_cable_ignore	-	return ignored PATA cable.
2791 *	@ap: port
2792 *
2793 *	Helper method for drivers which don't use cable type to limit
2794 *	transfer mode.
2795 */
2796int ata_cable_ignore(struct ata_port *ap)
2797{
2798	return ATA_CBL_PATA_IGN;
2799}
2800EXPORT_SYMBOL_GPL(ata_cable_ignore);
2801
2802/**
2803 *	ata_cable_sata	-	return SATA cable type
2804 *	@ap: port
2805 *
2806 *	Helper method for drivers which have SATA cables
2807 */
2808
2809int ata_cable_sata(struct ata_port *ap)
2810{
2811	return ATA_CBL_SATA;
2812}
2813EXPORT_SYMBOL_GPL(ata_cable_sata);
2814
2815/**
2816 *	ata_bus_probe - Reset and probe ATA bus
2817 *	@ap: Bus to probe
2818 *
2819 *	Master ATA bus probing function.  Initiates a hardware-dependent
2820 *	bus reset, then attempts to identify any devices found on
2821 *	the bus.
2822 *
2823 *	LOCKING:
2824 *	PCI/etc. bus probe sem.
2825 *
2826 *	RETURNS:
2827 *	Zero on success, negative errno otherwise.
2828 */
2829
2830int ata_bus_probe(struct ata_port *ap)
2831{
2832	unsigned int classes[ATA_MAX_DEVICES];
2833	int tries[ATA_MAX_DEVICES];
2834	int rc;
2835	struct ata_device *dev;
2836
2837	ata_for_each_dev(dev, &ap->link, ALL)
2838		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2839
2840 retry:
2841	ata_for_each_dev(dev, &ap->link, ALL) {
2842		/* If we issue an SRST then an ATA drive (not ATAPI)
2843		 * may change configuration and be in PIO0 timing. If
2844		 * we do a hard reset (or are coming from power on)
2845		 * this is true for ATA or ATAPI. Until we've set a
2846		 * suitable controller mode we should not touch the
2847		 * bus as we may be talking too fast.
2848		 */
2849		dev->pio_mode = XFER_PIO_0;
2850		dev->dma_mode = 0xff;
2851
2852		/* If the controller has a pio mode setup function
2853		 * then use it to set the chipset to rights. Don't
2854		 * touch the DMA setup as that will be dealt with when
2855		 * configuring devices.
2856		 */
2857		if (ap->ops->set_piomode)
2858			ap->ops->set_piomode(ap, dev);
2859	}
2860
2861	/* reset and determine device classes */
2862	ap->ops->phy_reset(ap);
2863
2864	ata_for_each_dev(dev, &ap->link, ALL) {
2865		if (dev->class != ATA_DEV_UNKNOWN)
2866			classes[dev->devno] = dev->class;
2867		else
2868			classes[dev->devno] = ATA_DEV_NONE;
2869
2870		dev->class = ATA_DEV_UNKNOWN;
2871	}
2872
2873	/* read IDENTIFY page and configure devices. We have to do the identify
2874	   specific sequence bass-ackwards so that PDIAG- is released by
2875	   the slave device */
2876
2877	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2878		if (tries[dev->devno])
2879			dev->class = classes[dev->devno];
2880
2881		if (!ata_dev_enabled(dev))
2882			continue;
2883
2884		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2885				     dev->id);
2886		if (rc)
2887			goto fail;
2888	}
2889
2890	/* Now ask for the cable type as PDIAG- should have been released */
2891	if (ap->ops->cable_detect)
2892		ap->cbl = ap->ops->cable_detect(ap);
2893
2894	/* We may have SATA bridge glue hiding here irrespective of
2895	 * the reported cable types and sensed types.  When SATA
2896	 * drives indicate we have a bridge, we don't know which end
2897	 * of the link the bridge is which is a problem.
2898	 */
2899	ata_for_each_dev(dev, &ap->link, ENABLED)
2900		if (ata_id_is_sata(dev->id))
2901			ap->cbl = ATA_CBL_SATA;
2902
2903	/* After the identify sequence we can now set up the devices. We do
2904	   this in the normal order so that the user doesn't get confused */
2905
2906	ata_for_each_dev(dev, &ap->link, ENABLED) {
2907		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2908		rc = ata_dev_configure(dev);
2909		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2910		if (rc)
2911			goto fail;
2912	}
2913
2914	/* configure transfer mode */
2915	rc = ata_set_mode(&ap->link, &dev);
2916	if (rc)
2917		goto fail;
2918
2919	ata_for_each_dev(dev, &ap->link, ENABLED)
2920		return 0;
2921
2922	return -ENODEV;
2923
2924 fail:
2925	tries[dev->devno]--;
2926
2927	switch (rc) {
2928	case -EINVAL:
2929		/* eeek, something went very wrong, give up */
2930		tries[dev->devno] = 0;
2931		break;
2932
2933	case -ENODEV:
2934		/* give it just one more chance */
2935		tries[dev->devno] = min(tries[dev->devno], 1);
2936		fallthrough;
2937	case -EIO:
2938		if (tries[dev->devno] == 1) {
2939			/* This is the last chance, better to slow
2940			 * down than lose it.
2941			 */
2942			sata_down_spd_limit(&ap->link, 0);
2943			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2944		}
2945	}
2946
2947	if (!tries[dev->devno])
2948		ata_dev_disable(dev);
2949
2950	goto retry;
2951}
2952
2953/**
2954 *	sata_print_link_status - Print SATA link status
2955 *	@link: SATA link to printk link status about
2956 *
2957 *	This function prints link speed and status of a SATA link.
2958 *
2959 *	LOCKING:
2960 *	None.
2961 */
2962static void sata_print_link_status(struct ata_link *link)
2963{
2964	u32 sstatus, scontrol, tmp;
2965
2966	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2967		return;
2968	sata_scr_read(link, SCR_CONTROL, &scontrol);
2969
2970	if (ata_phys_link_online(link)) {
2971		tmp = (sstatus >> 4) & 0xf;
2972		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2973			      sata_spd_string(tmp), sstatus, scontrol);
2974	} else {
2975		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2976			      sstatus, scontrol);
2977	}
2978}
2979
2980/**
2981 *	ata_dev_pair		-	return other device on cable
2982 *	@adev: device
2983 *
2984 *	Obtain the other device on the same cable, or if none is
2985 *	present NULL is returned
2986 */
2987
2988struct ata_device *ata_dev_pair(struct ata_device *adev)
2989{
2990	struct ata_link *link = adev->link;
2991	struct ata_device *pair = &link->device[1 - adev->devno];
2992	if (!ata_dev_enabled(pair))
2993		return NULL;
2994	return pair;
2995}
2996EXPORT_SYMBOL_GPL(ata_dev_pair);
2997
2998/**
2999 *	sata_down_spd_limit - adjust SATA spd limit downward
3000 *	@link: Link to adjust SATA spd limit for
3001 *	@spd_limit: Additional limit
3002 *
3003 *	Adjust SATA spd limit of @link downward.  Note that this
3004 *	function only adjusts the limit.  The change must be applied
3005 *	using sata_set_spd().
3006 *
3007 *	If @spd_limit is non-zero, the speed is limited to equal to or
3008 *	lower than @spd_limit if such speed is supported.  If
3009 *	@spd_limit is slower than any supported speed, only the lowest
3010 *	supported speed is allowed.
3011 *
3012 *	LOCKING:
3013 *	Inherited from caller.
3014 *
3015 *	RETURNS:
3016 *	0 on success, negative errno on failure
3017 */
3018int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3019{
3020	u32 sstatus, spd, mask;
3021	int rc, bit;
3022
3023	if (!sata_scr_valid(link))
3024		return -EOPNOTSUPP;
3025
3026	/* If SCR can be read, use it to determine the current SPD.
3027	 * If not, use cached value in link->sata_spd.
3028	 */
3029	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3030	if (rc == 0 && ata_sstatus_online(sstatus))
3031		spd = (sstatus >> 4) & 0xf;
3032	else
3033		spd = link->sata_spd;
3034
3035	mask = link->sata_spd_limit;
3036	if (mask <= 1)
3037		return -EINVAL;
3038
3039	/* unconditionally mask off the highest bit */
3040	bit = fls(mask) - 1;
3041	mask &= ~(1 << bit);
3042
3043	/*
3044	 * Mask off all speeds higher than or equal to the current one.  At
3045	 * this point, if current SPD is not available and we previously
3046	 * recorded the link speed from SStatus, the driver has already
3047	 * masked off the highest bit so mask should already be 1 or 0.
3048	 * Otherwise, we should not force 1.5Gbps on a link where we have
3049	 * not previously recorded speed from SStatus.  Just return in this
3050	 * case.
3051	 */
3052	if (spd > 1)
3053		mask &= (1 << (spd - 1)) - 1;
3054	else
3055		return -EINVAL;
3056
3057	/* were we already at the bottom? */
3058	if (!mask)
3059		return -EINVAL;
3060
3061	if (spd_limit) {
3062		if (mask & ((1 << spd_limit) - 1))
3063			mask &= (1 << spd_limit) - 1;
3064		else {
3065			bit = ffs(mask) - 1;
3066			mask = 1 << bit;
3067		}
3068	}
3069
3070	link->sata_spd_limit = mask;
3071
3072	ata_link_warn(link, "limiting SATA link speed to %s\n",
3073		      sata_spd_string(fls(mask)));
3074
3075	return 0;
3076}
3077
3078#ifdef CONFIG_ATA_ACPI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3079/**
3080 *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3081 *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3082 *	@cycle: cycle duration in ns
3083 *
3084 *	Return matching xfer mode for @cycle.  The returned mode is of
3085 *	the transfer type specified by @xfer_shift.  If @cycle is too
3086 *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3087 *	than the fastest known mode, the fasted mode is returned.
3088 *
3089 *	LOCKING:
3090 *	None.
3091 *
3092 *	RETURNS:
3093 *	Matching xfer_mode, 0xff if no match found.
3094 */
3095u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3096{
3097	u8 base_mode = 0xff, last_mode = 0xff;
3098	const struct ata_xfer_ent *ent;
3099	const struct ata_timing *t;
3100
3101	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3102		if (ent->shift == xfer_shift)
3103			base_mode = ent->base;
3104
3105	for (t = ata_timing_find_mode(base_mode);
3106	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3107		unsigned short this_cycle;
3108
3109		switch (xfer_shift) {
3110		case ATA_SHIFT_PIO:
3111		case ATA_SHIFT_MWDMA:
3112			this_cycle = t->cycle;
3113			break;
3114		case ATA_SHIFT_UDMA:
3115			this_cycle = t->udma;
3116			break;
3117		default:
3118			return 0xff;
3119		}
3120
3121		if (cycle > this_cycle)
3122			break;
3123
3124		last_mode = t->mode;
3125	}
3126
3127	return last_mode;
3128}
3129#endif
3130
3131/**
3132 *	ata_down_xfermask_limit - adjust dev xfer masks downward
3133 *	@dev: Device to adjust xfer masks
3134 *	@sel: ATA_DNXFER_* selector
3135 *
3136 *	Adjust xfer masks of @dev downward.  Note that this function
3137 *	does not apply the change.  Invoking ata_set_mode() afterwards
3138 *	will apply the limit.
3139 *
3140 *	LOCKING:
3141 *	Inherited from caller.
3142 *
3143 *	RETURNS:
3144 *	0 on success, negative errno on failure
3145 */
3146int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3147{
3148	char buf[32];
3149	unsigned long orig_mask, xfer_mask;
3150	unsigned long pio_mask, mwdma_mask, udma_mask;
3151	int quiet, highbit;
3152
3153	quiet = !!(sel & ATA_DNXFER_QUIET);
3154	sel &= ~ATA_DNXFER_QUIET;
3155
3156	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3157						  dev->mwdma_mask,
3158						  dev->udma_mask);
3159	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3160
3161	switch (sel) {
3162	case ATA_DNXFER_PIO:
3163		highbit = fls(pio_mask) - 1;
3164		pio_mask &= ~(1 << highbit);
3165		break;
3166
3167	case ATA_DNXFER_DMA:
3168		if (udma_mask) {
3169			highbit = fls(udma_mask) - 1;
3170			udma_mask &= ~(1 << highbit);
3171			if (!udma_mask)
3172				return -ENOENT;
3173		} else if (mwdma_mask) {
3174			highbit = fls(mwdma_mask) - 1;
3175			mwdma_mask &= ~(1 << highbit);
3176			if (!mwdma_mask)
3177				return -ENOENT;
3178		}
3179		break;
3180
3181	case ATA_DNXFER_40C:
3182		udma_mask &= ATA_UDMA_MASK_40C;
3183		break;
3184
3185	case ATA_DNXFER_FORCE_PIO0:
3186		pio_mask &= 1;
3187		fallthrough;
3188	case ATA_DNXFER_FORCE_PIO:
3189		mwdma_mask = 0;
3190		udma_mask = 0;
3191		break;
3192
3193	default:
3194		BUG();
3195	}
3196
3197	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3198
3199	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3200		return -ENOENT;
3201
3202	if (!quiet) {
3203		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3204			snprintf(buf, sizeof(buf), "%s:%s",
3205				 ata_mode_string(xfer_mask),
3206				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3207		else
3208			snprintf(buf, sizeof(buf), "%s",
3209				 ata_mode_string(xfer_mask));
3210
3211		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3212	}
3213
3214	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3215			    &dev->udma_mask);
3216
3217	return 0;
3218}
3219
3220static int ata_dev_set_mode(struct ata_device *dev)
3221{
3222	struct ata_port *ap = dev->link->ap;
3223	struct ata_eh_context *ehc = &dev->link->eh_context;
3224	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3225	const char *dev_err_whine = "";
3226	int ign_dev_err = 0;
3227	unsigned int err_mask = 0;
3228	int rc;
3229
3230	dev->flags &= ~ATA_DFLAG_PIO;
3231	if (dev->xfer_shift == ATA_SHIFT_PIO)
3232		dev->flags |= ATA_DFLAG_PIO;
3233
3234	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3235		dev_err_whine = " (SET_XFERMODE skipped)";
3236	else {
3237		if (nosetxfer)
3238			ata_dev_warn(dev,
3239				     "NOSETXFER but PATA detected - can't "
3240				     "skip SETXFER, might malfunction\n");
3241		err_mask = ata_dev_set_xfermode(dev);
3242	}
3243
3244	if (err_mask & ~AC_ERR_DEV)
3245		goto fail;
3246
3247	/* revalidate */
3248	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3249	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3250	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3251	if (rc)
3252		return rc;
3253
3254	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3255		/* Old CFA may refuse this command, which is just fine */
3256		if (ata_id_is_cfa(dev->id))
3257			ign_dev_err = 1;
3258		/* Catch several broken garbage emulations plus some pre
3259		   ATA devices */
3260		if (ata_id_major_version(dev->id) == 0 &&
3261					dev->pio_mode <= XFER_PIO_2)
3262			ign_dev_err = 1;
3263		/* Some very old devices and some bad newer ones fail
3264		   any kind of SET_XFERMODE request but support PIO0-2
3265		   timings and no IORDY */
3266		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3267			ign_dev_err = 1;
3268	}
3269	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3270	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3271	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3272	    dev->dma_mode == XFER_MW_DMA_0 &&
3273	    (dev->id[63] >> 8) & 1)
3274		ign_dev_err = 1;
3275
3276	/* if the device is actually configured correctly, ignore dev err */
3277	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3278		ign_dev_err = 1;
3279
3280	if (err_mask & AC_ERR_DEV) {
3281		if (!ign_dev_err)
3282			goto fail;
3283		else
3284			dev_err_whine = " (device error ignored)";
3285	}
3286
3287	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3288		dev->xfer_shift, (int)dev->xfer_mode);
3289
3290	if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3291	    ehc->i.flags & ATA_EHI_DID_HARDRESET)
3292		ata_dev_info(dev, "configured for %s%s\n",
3293			     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3294			     dev_err_whine);
3295
3296	return 0;
3297
3298 fail:
3299	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3300	return -EIO;
3301}
3302
3303/**
3304 *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3305 *	@link: link on which timings will be programmed
3306 *	@r_failed_dev: out parameter for failed device
3307 *
3308 *	Standard implementation of the function used to tune and set
3309 *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3310 *	ata_dev_set_mode() fails, pointer to the failing device is
3311 *	returned in @r_failed_dev.
3312 *
3313 *	LOCKING:
3314 *	PCI/etc. bus probe sem.
3315 *
3316 *	RETURNS:
3317 *	0 on success, negative errno otherwise
3318 */
3319
3320int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3321{
3322	struct ata_port *ap = link->ap;
3323	struct ata_device *dev;
3324	int rc = 0, used_dma = 0, found = 0;
3325
3326	/* step 1: calculate xfer_mask */
3327	ata_for_each_dev(dev, link, ENABLED) {
3328		unsigned long pio_mask, dma_mask;
3329		unsigned int mode_mask;
3330
3331		mode_mask = ATA_DMA_MASK_ATA;
3332		if (dev->class == ATA_DEV_ATAPI)
3333			mode_mask = ATA_DMA_MASK_ATAPI;
3334		else if (ata_id_is_cfa(dev->id))
3335			mode_mask = ATA_DMA_MASK_CFA;
3336
3337		ata_dev_xfermask(dev);
3338		ata_force_xfermask(dev);
3339
3340		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3341
3342		if (libata_dma_mask & mode_mask)
3343			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3344						     dev->udma_mask);
3345		else
3346			dma_mask = 0;
3347
3348		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3349		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3350
3351		found = 1;
3352		if (ata_dma_enabled(dev))
3353			used_dma = 1;
3354	}
3355	if (!found)
3356		goto out;
3357
3358	/* step 2: always set host PIO timings */
3359	ata_for_each_dev(dev, link, ENABLED) {
3360		if (dev->pio_mode == 0xff) {
3361			ata_dev_warn(dev, "no PIO support\n");
3362			rc = -EINVAL;
3363			goto out;
3364		}
3365
3366		dev->xfer_mode = dev->pio_mode;
3367		dev->xfer_shift = ATA_SHIFT_PIO;
3368		if (ap->ops->set_piomode)
3369			ap->ops->set_piomode(ap, dev);
3370	}
3371
3372	/* step 3: set host DMA timings */
3373	ata_for_each_dev(dev, link, ENABLED) {
3374		if (!ata_dma_enabled(dev))
3375			continue;
3376
3377		dev->xfer_mode = dev->dma_mode;
3378		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3379		if (ap->ops->set_dmamode)
3380			ap->ops->set_dmamode(ap, dev);
3381	}
3382
3383	/* step 4: update devices' xfer mode */
3384	ata_for_each_dev(dev, link, ENABLED) {
3385		rc = ata_dev_set_mode(dev);
3386		if (rc)
3387			goto out;
3388	}
3389
3390	/* Record simplex status. If we selected DMA then the other
3391	 * host channels are not permitted to do so.
3392	 */
3393	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3394		ap->host->simplex_claimed = ap;
3395
3396 out:
3397	if (rc)
3398		*r_failed_dev = dev;
3399	return rc;
3400}
3401EXPORT_SYMBOL_GPL(ata_do_set_mode);
3402
3403/**
3404 *	ata_wait_ready - wait for link to become ready
3405 *	@link: link to be waited on
3406 *	@deadline: deadline jiffies for the operation
3407 *	@check_ready: callback to check link readiness
3408 *
3409 *	Wait for @link to become ready.  @check_ready should return
3410 *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3411 *	link doesn't seem to be occupied, other errno for other error
3412 *	conditions.
3413 *
3414 *	Transient -ENODEV conditions are allowed for
3415 *	ATA_TMOUT_FF_WAIT.
3416 *
3417 *	LOCKING:
3418 *	EH context.
3419 *
3420 *	RETURNS:
3421 *	0 if @link is ready before @deadline; otherwise, -errno.
3422 */
3423int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3424		   int (*check_ready)(struct ata_link *link))
3425{
3426	unsigned long start = jiffies;
3427	unsigned long nodev_deadline;
3428	int warned = 0;
3429
3430	/* choose which 0xff timeout to use, read comment in libata.h */
3431	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3432		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3433	else
3434		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3435
3436	/* Slave readiness can't be tested separately from master.  On
3437	 * M/S emulation configuration, this function should be called
3438	 * only on the master and it will handle both master and slave.
3439	 */
3440	WARN_ON(link == link->ap->slave_link);
3441
3442	if (time_after(nodev_deadline, deadline))
3443		nodev_deadline = deadline;
3444
3445	while (1) {
3446		unsigned long now = jiffies;
3447		int ready, tmp;
3448
3449		ready = tmp = check_ready(link);
3450		if (ready > 0)
3451			return 0;
3452
3453		/*
3454		 * -ENODEV could be transient.  Ignore -ENODEV if link
3455		 * is online.  Also, some SATA devices take a long
3456		 * time to clear 0xff after reset.  Wait for
3457		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3458		 * offline.
3459		 *
3460		 * Note that some PATA controllers (pata_ali) explode
3461		 * if status register is read more than once when
3462		 * there's no device attached.
3463		 */
3464		if (ready == -ENODEV) {
3465			if (ata_link_online(link))
3466				ready = 0;
3467			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3468				 !ata_link_offline(link) &&
3469				 time_before(now, nodev_deadline))
3470				ready = 0;
3471		}
3472
3473		if (ready)
3474			return ready;
3475		if (time_after(now, deadline))
3476			return -EBUSY;
3477
3478		if (!warned && time_after(now, start + 5 * HZ) &&
3479		    (deadline - now > 3 * HZ)) {
3480			ata_link_warn(link,
3481				"link is slow to respond, please be patient "
3482				"(ready=%d)\n", tmp);
3483			warned = 1;
3484		}
3485
3486		ata_msleep(link->ap, 50);
3487	}
3488}
3489
3490/**
3491 *	ata_wait_after_reset - wait for link to become ready after reset
3492 *	@link: link to be waited on
3493 *	@deadline: deadline jiffies for the operation
3494 *	@check_ready: callback to check link readiness
3495 *
3496 *	Wait for @link to become ready after reset.
3497 *
3498 *	LOCKING:
3499 *	EH context.
3500 *
3501 *	RETURNS:
3502 *	0 if @link is ready before @deadline; otherwise, -errno.
3503 */
3504int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3505				int (*check_ready)(struct ata_link *link))
3506{
3507	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3508
3509	return ata_wait_ready(link, deadline, check_ready);
3510}
3511EXPORT_SYMBOL_GPL(ata_wait_after_reset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3512
3513/**
3514 *	ata_std_prereset - prepare for reset
3515 *	@link: ATA link to be reset
3516 *	@deadline: deadline jiffies for the operation
3517 *
3518 *	@link is about to be reset.  Initialize it.  Failure from
3519 *	prereset makes libata abort whole reset sequence and give up
3520 *	that port, so prereset should be best-effort.  It does its
3521 *	best to prepare for reset sequence but if things go wrong, it
3522 *	should just whine, not fail.
3523 *
3524 *	LOCKING:
3525 *	Kernel thread context (may sleep)
3526 *
3527 *	RETURNS:
3528 *	0 on success, -errno otherwise.
3529 */
3530int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3531{
3532	struct ata_port *ap = link->ap;
3533	struct ata_eh_context *ehc = &link->eh_context;
3534	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3535	int rc;
3536
3537	/* if we're about to do hardreset, nothing more to do */
3538	if (ehc->i.action & ATA_EH_HARDRESET)
3539		return 0;
3540
3541	/* if SATA, resume link */
3542	if (ap->flags & ATA_FLAG_SATA) {
3543		rc = sata_link_resume(link, timing, deadline);
3544		/* whine about phy resume failure but proceed */
3545		if (rc && rc != -EOPNOTSUPP)
3546			ata_link_warn(link,
3547				      "failed to resume link for reset (errno=%d)\n",
3548				      rc);
3549	}
3550
3551	/* no point in trying softreset on offline link */
3552	if (ata_phys_link_offline(link))
3553		ehc->i.action &= ~ATA_EH_SOFTRESET;
3554
3555	return 0;
3556}
3557EXPORT_SYMBOL_GPL(ata_std_prereset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3558
3559/**
3560 *	sata_std_hardreset - COMRESET w/o waiting or classification
3561 *	@link: link to reset
3562 *	@class: resulting class of attached device
3563 *	@deadline: deadline jiffies for the operation
3564 *
3565 *	Standard SATA COMRESET w/o waiting or classification.
3566 *
3567 *	LOCKING:
3568 *	Kernel thread context (may sleep)
3569 *
3570 *	RETURNS:
3571 *	0 if link offline, -EAGAIN if link online, -errno on errors.
3572 */
3573int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3574		       unsigned long deadline)
3575{
3576	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3577	bool online;
3578	int rc;
3579
3580	/* do hardreset */
3581	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3582	return online ? -EAGAIN : rc;
3583}
3584EXPORT_SYMBOL_GPL(sata_std_hardreset);
3585
3586/**
3587 *	ata_std_postreset - standard postreset callback
3588 *	@link: the target ata_link
3589 *	@classes: classes of attached devices
3590 *
3591 *	This function is invoked after a successful reset.  Note that
3592 *	the device might have been reset more than once using
3593 *	different reset methods before postreset is invoked.
3594 *
3595 *	LOCKING:
3596 *	Kernel thread context (may sleep)
3597 */
3598void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3599{
3600	u32 serror;
3601
3602	DPRINTK("ENTER\n");
3603
3604	/* reset complete, clear SError */
3605	if (!sata_scr_read(link, SCR_ERROR, &serror))
3606		sata_scr_write(link, SCR_ERROR, serror);
3607
3608	/* print link status */
3609	sata_print_link_status(link);
3610
3611	DPRINTK("EXIT\n");
3612}
3613EXPORT_SYMBOL_GPL(ata_std_postreset);
3614
3615/**
3616 *	ata_dev_same_device - Determine whether new ID matches configured device
3617 *	@dev: device to compare against
3618 *	@new_class: class of the new device
3619 *	@new_id: IDENTIFY page of the new device
3620 *
3621 *	Compare @new_class and @new_id against @dev and determine
3622 *	whether @dev is the device indicated by @new_class and
3623 *	@new_id.
3624 *
3625 *	LOCKING:
3626 *	None.
3627 *
3628 *	RETURNS:
3629 *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3630 */
3631static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3632			       const u16 *new_id)
3633{
3634	const u16 *old_id = dev->id;
3635	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3636	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3637
3638	if (dev->class != new_class) {
3639		ata_dev_info(dev, "class mismatch %d != %d\n",
3640			     dev->class, new_class);
3641		return 0;
3642	}
3643
3644	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3645	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3646	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3647	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3648
3649	if (strcmp(model[0], model[1])) {
3650		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3651			     model[0], model[1]);
3652		return 0;
3653	}
3654
3655	if (strcmp(serial[0], serial[1])) {
3656		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3657			     serial[0], serial[1]);
3658		return 0;
3659	}
3660
3661	return 1;
3662}
3663
3664/**
3665 *	ata_dev_reread_id - Re-read IDENTIFY data
3666 *	@dev: target ATA device
3667 *	@readid_flags: read ID flags
3668 *
3669 *	Re-read IDENTIFY page and make sure @dev is still attached to
3670 *	the port.
3671 *
3672 *	LOCKING:
3673 *	Kernel thread context (may sleep)
3674 *
3675 *	RETURNS:
3676 *	0 on success, negative errno otherwise
3677 */
3678int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3679{
3680	unsigned int class = dev->class;
3681	u16 *id = (void *)dev->link->ap->sector_buf;
3682	int rc;
3683
3684	/* read ID data */
3685	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3686	if (rc)
3687		return rc;
3688
3689	/* is the device still there? */
3690	if (!ata_dev_same_device(dev, class, id))
3691		return -ENODEV;
3692
3693	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3694	return 0;
3695}
3696
3697/**
3698 *	ata_dev_revalidate - Revalidate ATA device
3699 *	@dev: device to revalidate
3700 *	@new_class: new class code
3701 *	@readid_flags: read ID flags
3702 *
3703 *	Re-read IDENTIFY page, make sure @dev is still attached to the
3704 *	port and reconfigure it according to the new IDENTIFY page.
3705 *
3706 *	LOCKING:
3707 *	Kernel thread context (may sleep)
3708 *
3709 *	RETURNS:
3710 *	0 on success, negative errno otherwise
3711 */
3712int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3713		       unsigned int readid_flags)
3714{
3715	u64 n_sectors = dev->n_sectors;
3716	u64 n_native_sectors = dev->n_native_sectors;
3717	int rc;
3718
3719	if (!ata_dev_enabled(dev))
3720		return -ENODEV;
3721
3722	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3723	if (ata_class_enabled(new_class) &&
3724	    new_class != ATA_DEV_ATA &&
3725	    new_class != ATA_DEV_ATAPI &&
3726	    new_class != ATA_DEV_ZAC &&
3727	    new_class != ATA_DEV_SEMB) {
3728		ata_dev_info(dev, "class mismatch %u != %u\n",
3729			     dev->class, new_class);
3730		rc = -ENODEV;
3731		goto fail;
3732	}
3733
3734	/* re-read ID */
3735	rc = ata_dev_reread_id(dev, readid_flags);
3736	if (rc)
3737		goto fail;
3738
3739	/* configure device according to the new ID */
3740	rc = ata_dev_configure(dev);
3741	if (rc)
3742		goto fail;
3743
3744	/* verify n_sectors hasn't changed */
3745	if (dev->class != ATA_DEV_ATA || !n_sectors ||
3746	    dev->n_sectors == n_sectors)
3747		return 0;
3748
3749	/* n_sectors has changed */
3750	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3751		     (unsigned long long)n_sectors,
3752		     (unsigned long long)dev->n_sectors);
3753
3754	/*
3755	 * Something could have caused HPA to be unlocked
3756	 * involuntarily.  If n_native_sectors hasn't changed and the
3757	 * new size matches it, keep the device.
3758	 */
3759	if (dev->n_native_sectors == n_native_sectors &&
3760	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3761		ata_dev_warn(dev,
3762			     "new n_sectors matches native, probably "
3763			     "late HPA unlock, n_sectors updated\n");
3764		/* use the larger n_sectors */
3765		return 0;
3766	}
3767
3768	/*
3769	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
3770	 * unlocking HPA in those cases.
3771	 *
3772	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3773	 */
3774	if (dev->n_native_sectors == n_native_sectors &&
3775	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3776	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
3777		ata_dev_warn(dev,
3778			     "old n_sectors matches native, probably "
3779			     "late HPA lock, will try to unlock HPA\n");
3780		/* try unlocking HPA */
3781		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
3782		rc = -EIO;
3783	} else
3784		rc = -ENODEV;
3785
3786	/* restore original n_[native_]sectors and fail */
3787	dev->n_native_sectors = n_native_sectors;
3788	dev->n_sectors = n_sectors;
3789 fail:
3790	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
3791	return rc;
3792}
3793
3794struct ata_blacklist_entry {
3795	const char *model_num;
3796	const char *model_rev;
3797	unsigned long horkage;
3798};
3799
3800static const struct ata_blacklist_entry ata_device_blacklist [] = {
3801	/* Devices with DMA related problems under Linux */
3802	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
3803	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
3804	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
3805	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
3806	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
3807	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
3808	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
3809	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
3810	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
3811	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
3812	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
3813	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
3814	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
3815	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
3816	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
3817	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
3818	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
3819	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
3820	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
3821	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
3822	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
3823	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
3824	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
3825	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
3826	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3827	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
3828	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
3829	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
3830	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
3831	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
3832	/* Odd clown on sil3726/4726 PMPs */
3833	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
3834
3835	/* Weird ATAPI devices */
3836	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
3837	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
3838	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
3839	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
3840
3841	/*
3842	 * Causes silent data corruption with higher max sects.
3843	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
3844	 */
3845	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
3846
3847	/*
3848	 * These devices time out with higher max sects.
3849	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
3850	 */
3851	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
3852	{ "LITEON EP1-*",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
3853
3854	/* Devices we expect to fail diagnostics */
3855
3856	/* Devices where NCQ should be avoided */
3857	/* NCQ is slow */
3858	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
3859	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
3860	/* http://thread.gmane.org/gmane.linux.ide/14907 */
3861	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
3862	/* NCQ is broken */
3863	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
3864	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
3865	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
3866	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
3867	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
3868
3869	/* Seagate NCQ + FLUSH CACHE firmware bug */
3870	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
3871						ATA_HORKAGE_FIRMWARE_WARN },
3872
3873	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
3874						ATA_HORKAGE_FIRMWARE_WARN },
3875
3876	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
3877						ATA_HORKAGE_FIRMWARE_WARN },
3878
3879	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
3880						ATA_HORKAGE_FIRMWARE_WARN },
3881
3882	/* drives which fail FPDMA_AA activation (some may freeze afterwards)
3883	   the ST disks also have LPM issues */
3884	{ "ST1000LM024 HN-M101MBB", NULL,	ATA_HORKAGE_BROKEN_FPDMA_AA |
3885						ATA_HORKAGE_NOLPM, },
3886	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
3887
3888	/* Blacklist entries taken from Silicon Image 3124/3132
3889	   Windows driver .inf file - also several Linux problem reports */
3890	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
3891	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
3892	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3893
3894	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
3895	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
3896
3897	/* Sandisk SD7/8/9s lock up hard on large trims */
3898	{ "SanDisk SD[789]*",	NULL,		ATA_HORKAGE_MAX_TRIM_128M, },
 
3899
3900	/* devices which puke on READ_NATIVE_MAX */
3901	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
3902	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3903	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3904	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
3905
3906	/* this one allows HPA unlocking but fails IOs on the area */
3907	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
3908
3909	/* Devices which report 1 sector over size HPA */
3910	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3911	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3912	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3913
3914	/* Devices which get the IVB wrong */
3915	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3916	/* Maybe we should just blacklist TSSTcorp... */
3917	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
3918
3919	/* Devices that do not need bridging limits applied */
3920	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
3921	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
3922
3923	/* Devices which aren't very happy with higher link speeds */
3924	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
3925	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
3926
3927	/*
3928	 * Devices which choke on SETXFER.  Applies only if both the
3929	 * device and controller are SATA.
3930	 */
3931	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
3932	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
3933	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
3934	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
3935	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
3936
3937	/* Crucial BX100 SSD 500GB has broken LPM support */
3938	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
3939
3940	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
3941	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
3942						ATA_HORKAGE_ZERO_AFTER_TRIM |
3943						ATA_HORKAGE_NOLPM, },
3944	/* 512GB MX100 with newer firmware has only LPM issues */
3945	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
3946						ATA_HORKAGE_NOLPM, },
3947
3948	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
3949	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
3950						ATA_HORKAGE_ZERO_AFTER_TRIM |
3951						ATA_HORKAGE_NOLPM, },
3952	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
3953						ATA_HORKAGE_ZERO_AFTER_TRIM |
3954						ATA_HORKAGE_NOLPM, },
3955
3956	/* These specific Samsung models/firmware-revs do not handle LPM well */
3957	{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
3958	{ "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM, },
3959	{ "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM, },
3960	{ "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
3961
3962	/* devices that don't properly handle queued TRIM commands */
3963	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
3964						ATA_HORKAGE_ZERO_AFTER_TRIM, },
3965	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
3966						ATA_HORKAGE_ZERO_AFTER_TRIM, },
3967	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
3968						ATA_HORKAGE_ZERO_AFTER_TRIM, },
3969	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
3970						ATA_HORKAGE_ZERO_AFTER_TRIM, },
3971	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
3972						ATA_HORKAGE_ZERO_AFTER_TRIM, },
3973	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
3974						ATA_HORKAGE_ZERO_AFTER_TRIM, },
3975	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
3976						ATA_HORKAGE_ZERO_AFTER_TRIM, },
3977	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
3978						ATA_HORKAGE_ZERO_AFTER_TRIM, },
3979	{ "Samsung SSD 860*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
3980						ATA_HORKAGE_ZERO_AFTER_TRIM |
3981						ATA_HORKAGE_NO_NCQ_ON_ATI, },
3982	{ "Samsung SSD 870*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
3983						ATA_HORKAGE_ZERO_AFTER_TRIM |
3984						ATA_HORKAGE_NO_NCQ_ON_ATI, },
3985	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
3986						ATA_HORKAGE_ZERO_AFTER_TRIM, },
3987
3988	/* devices that don't properly handle TRIM commands */
3989	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
3990
3991	/*
3992	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
3993	 * (Return Zero After Trim) flags in the ATA Command Set are
3994	 * unreliable in the sense that they only define what happens if
3995	 * the device successfully executed the DSM TRIM command. TRIM
3996	 * is only advisory, however, and the device is free to silently
3997	 * ignore all or parts of the request.
3998	 *
3999	 * Whitelist drives that are known to reliably return zeroes
4000	 * after TRIM.
4001	 */
4002
4003	/*
4004	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4005	 * that model before whitelisting all other intel SSDs.
4006	 */
4007	{ "INTEL*SSDSC2MH*",		NULL,	0, },
4008
4009	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4010	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4011	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4012	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4013	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4014	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4015	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4016	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4017
4018	/*
4019	 * Some WD SATA-I drives spin up and down erratically when the link
4020	 * is put into the slumber mode.  We don't have full list of the
4021	 * affected devices.  Disable LPM if the device matches one of the
4022	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4023	 * lost too.
4024	 *
4025	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4026	 */
4027	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4028	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4029	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4030	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4031	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4032	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4033	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4034
4035	/* End Marker */
4036	{ }
4037};
4038
4039static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4040{
4041	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4042	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4043	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4044
4045	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4046	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4047
4048	while (ad->model_num) {
4049		if (glob_match(ad->model_num, model_num)) {
4050			if (ad->model_rev == NULL)
4051				return ad->horkage;
4052			if (glob_match(ad->model_rev, model_rev))
4053				return ad->horkage;
4054		}
4055		ad++;
4056	}
4057	return 0;
4058}
4059
4060static int ata_dma_blacklisted(const struct ata_device *dev)
4061{
4062	/* We don't support polling DMA.
4063	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4064	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4065	 */
4066	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4067	    (dev->flags & ATA_DFLAG_CDB_INTR))
4068		return 1;
4069	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4070}
4071
4072/**
4073 *	ata_is_40wire		-	check drive side detection
4074 *	@dev: device
4075 *
4076 *	Perform drive side detection decoding, allowing for device vendors
4077 *	who can't follow the documentation.
4078 */
4079
4080static int ata_is_40wire(struct ata_device *dev)
4081{
4082	if (dev->horkage & ATA_HORKAGE_IVB)
4083		return ata_drive_40wire_relaxed(dev->id);
4084	return ata_drive_40wire(dev->id);
4085}
4086
4087/**
4088 *	cable_is_40wire		-	40/80/SATA decider
4089 *	@ap: port to consider
4090 *
4091 *	This function encapsulates the policy for speed management
4092 *	in one place. At the moment we don't cache the result but
4093 *	there is a good case for setting ap->cbl to the result when
4094 *	we are called with unknown cables (and figuring out if it
4095 *	impacts hotplug at all).
4096 *
4097 *	Return 1 if the cable appears to be 40 wire.
4098 */
4099
4100static int cable_is_40wire(struct ata_port *ap)
4101{
4102	struct ata_link *link;
4103	struct ata_device *dev;
4104
4105	/* If the controller thinks we are 40 wire, we are. */
4106	if (ap->cbl == ATA_CBL_PATA40)
4107		return 1;
4108
4109	/* If the controller thinks we are 80 wire, we are. */
4110	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4111		return 0;
4112
4113	/* If the system is known to be 40 wire short cable (eg
4114	 * laptop), then we allow 80 wire modes even if the drive
4115	 * isn't sure.
4116	 */
4117	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4118		return 0;
4119
4120	/* If the controller doesn't know, we scan.
4121	 *
4122	 * Note: We look for all 40 wire detects at this point.  Any
4123	 *       80 wire detect is taken to be 80 wire cable because
4124	 * - in many setups only the one drive (slave if present) will
4125	 *   give a valid detect
4126	 * - if you have a non detect capable drive you don't want it
4127	 *   to colour the choice
4128	 */
4129	ata_for_each_link(link, ap, EDGE) {
4130		ata_for_each_dev(dev, link, ENABLED) {
4131			if (!ata_is_40wire(dev))
4132				return 0;
4133		}
4134	}
4135	return 1;
4136}
4137
4138/**
4139 *	ata_dev_xfermask - Compute supported xfermask of the given device
4140 *	@dev: Device to compute xfermask for
4141 *
4142 *	Compute supported xfermask of @dev and store it in
4143 *	dev->*_mask.  This function is responsible for applying all
4144 *	known limits including host controller limits, device
4145 *	blacklist, etc...
4146 *
4147 *	LOCKING:
4148 *	None.
4149 */
4150static void ata_dev_xfermask(struct ata_device *dev)
4151{
4152	struct ata_link *link = dev->link;
4153	struct ata_port *ap = link->ap;
4154	struct ata_host *host = ap->host;
4155	unsigned long xfer_mask;
4156
4157	/* controller modes available */
4158	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4159				      ap->mwdma_mask, ap->udma_mask);
4160
4161	/* drive modes available */
4162	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4163				       dev->mwdma_mask, dev->udma_mask);
4164	xfer_mask &= ata_id_xfermask(dev->id);
4165
4166	/*
4167	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4168	 *	cable
4169	 */
4170	if (ata_dev_pair(dev)) {
4171		/* No PIO5 or PIO6 */
4172		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4173		/* No MWDMA3 or MWDMA 4 */
4174		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4175	}
4176
4177	if (ata_dma_blacklisted(dev)) {
4178		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4179		ata_dev_warn(dev,
4180			     "device is on DMA blacklist, disabling DMA\n");
4181	}
4182
4183	if ((host->flags & ATA_HOST_SIMPLEX) &&
4184	    host->simplex_claimed && host->simplex_claimed != ap) {
4185		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4186		ata_dev_warn(dev,
4187			     "simplex DMA is claimed by other device, disabling DMA\n");
4188	}
4189
4190	if (ap->flags & ATA_FLAG_NO_IORDY)
4191		xfer_mask &= ata_pio_mask_no_iordy(dev);
4192
4193	if (ap->ops->mode_filter)
4194		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4195
4196	/* Apply cable rule here.  Don't apply it early because when
4197	 * we handle hot plug the cable type can itself change.
4198	 * Check this last so that we know if the transfer rate was
4199	 * solely limited by the cable.
4200	 * Unknown or 80 wire cables reported host side are checked
4201	 * drive side as well. Cases where we know a 40wire cable
4202	 * is used safely for 80 are not checked here.
4203	 */
4204	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4205		/* UDMA/44 or higher would be available */
4206		if (cable_is_40wire(ap)) {
4207			ata_dev_warn(dev,
4208				     "limited to UDMA/33 due to 40-wire cable\n");
4209			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4210		}
4211
4212	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4213			    &dev->mwdma_mask, &dev->udma_mask);
4214}
4215
4216/**
4217 *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4218 *	@dev: Device to which command will be sent
4219 *
4220 *	Issue SET FEATURES - XFER MODE command to device @dev
4221 *	on port @ap.
4222 *
4223 *	LOCKING:
4224 *	PCI/etc. bus probe sem.
4225 *
4226 *	RETURNS:
4227 *	0 on success, AC_ERR_* mask otherwise.
4228 */
4229
4230static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4231{
4232	struct ata_taskfile tf;
4233	unsigned int err_mask;
4234
4235	/* set up set-features taskfile */
4236	DPRINTK("set features - xfer mode\n");
4237
4238	/* Some controllers and ATAPI devices show flaky interrupt
4239	 * behavior after setting xfer mode.  Use polling instead.
4240	 */
4241	ata_tf_init(dev, &tf);
4242	tf.command = ATA_CMD_SET_FEATURES;
4243	tf.feature = SETFEATURES_XFER;
4244	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4245	tf.protocol = ATA_PROT_NODATA;
4246	/* If we are using IORDY we must send the mode setting command */
4247	if (ata_pio_need_iordy(dev))
4248		tf.nsect = dev->xfer_mode;
4249	/* If the device has IORDY and the controller does not - turn it off */
4250 	else if (ata_id_has_iordy(dev->id))
4251		tf.nsect = 0x01;
4252	else /* In the ancient relic department - skip all of this */
4253		return 0;
4254
4255	/* On some disks, this command causes spin-up, so we need longer timeout */
4256	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4257
4258	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4259	return err_mask;
4260}
4261
4262/**
4263 *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4264 *	@dev: Device to which command will be sent
4265 *	@enable: Whether to enable or disable the feature
4266 *	@feature: The sector count represents the feature to set
4267 *
4268 *	Issue SET FEATURES - SATA FEATURES command to device @dev
4269 *	on port @ap with sector count
4270 *
4271 *	LOCKING:
4272 *	PCI/etc. bus probe sem.
4273 *
4274 *	RETURNS:
4275 *	0 on success, AC_ERR_* mask otherwise.
4276 */
4277unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4278{
4279	struct ata_taskfile tf;
4280	unsigned int err_mask;
4281	unsigned long timeout = 0;
4282
4283	/* set up set-features taskfile */
4284	DPRINTK("set features - SATA features\n");
4285
4286	ata_tf_init(dev, &tf);
4287	tf.command = ATA_CMD_SET_FEATURES;
4288	tf.feature = enable;
4289	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4290	tf.protocol = ATA_PROT_NODATA;
4291	tf.nsect = feature;
4292
4293	if (enable == SETFEATURES_SPINUP)
4294		timeout = ata_probe_timeout ?
4295			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4296	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4297
4298	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4299	return err_mask;
4300}
4301EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4302
4303/**
4304 *	ata_dev_init_params - Issue INIT DEV PARAMS command
4305 *	@dev: Device to which command will be sent
4306 *	@heads: Number of heads (taskfile parameter)
4307 *	@sectors: Number of sectors (taskfile parameter)
4308 *
4309 *	LOCKING:
4310 *	Kernel thread context (may sleep)
4311 *
4312 *	RETURNS:
4313 *	0 on success, AC_ERR_* mask otherwise.
4314 */
4315static unsigned int ata_dev_init_params(struct ata_device *dev,
4316					u16 heads, u16 sectors)
4317{
4318	struct ata_taskfile tf;
4319	unsigned int err_mask;
4320
4321	/* Number of sectors per track 1-255. Number of heads 1-16 */
4322	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4323		return AC_ERR_INVALID;
4324
4325	/* set up init dev params taskfile */
4326	DPRINTK("init dev params \n");
4327
4328	ata_tf_init(dev, &tf);
4329	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4330	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4331	tf.protocol = ATA_PROT_NODATA;
4332	tf.nsect = sectors;
4333	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4334
4335	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4336	/* A clean abort indicates an original or just out of spec drive
4337	   and we should continue as we issue the setup based on the
4338	   drive reported working geometry */
4339	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4340		err_mask = 0;
4341
4342	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4343	return err_mask;
4344}
4345
4346/**
4347 *	atapi_check_dma - Check whether ATAPI DMA can be supported
4348 *	@qc: Metadata associated with taskfile to check
4349 *
4350 *	Allow low-level driver to filter ATA PACKET commands, returning
4351 *	a status indicating whether or not it is OK to use DMA for the
4352 *	supplied PACKET command.
4353 *
4354 *	LOCKING:
4355 *	spin_lock_irqsave(host lock)
4356 *
4357 *	RETURNS: 0 when ATAPI DMA can be used
4358 *               nonzero otherwise
4359 */
4360int atapi_check_dma(struct ata_queued_cmd *qc)
4361{
4362	struct ata_port *ap = qc->ap;
4363
4364	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4365	 * few ATAPI devices choke on such DMA requests.
4366	 */
4367	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4368	    unlikely(qc->nbytes & 15))
4369		return 1;
4370
4371	if (ap->ops->check_atapi_dma)
4372		return ap->ops->check_atapi_dma(qc);
4373
4374	return 0;
4375}
4376
4377/**
4378 *	ata_std_qc_defer - Check whether a qc needs to be deferred
4379 *	@qc: ATA command in question
4380 *
4381 *	Non-NCQ commands cannot run with any other command, NCQ or
4382 *	not.  As upper layer only knows the queue depth, we are
4383 *	responsible for maintaining exclusion.  This function checks
4384 *	whether a new command @qc can be issued.
4385 *
4386 *	LOCKING:
4387 *	spin_lock_irqsave(host lock)
4388 *
4389 *	RETURNS:
4390 *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4391 */
4392int ata_std_qc_defer(struct ata_queued_cmd *qc)
4393{
4394	struct ata_link *link = qc->dev->link;
4395
4396	if (ata_is_ncq(qc->tf.protocol)) {
4397		if (!ata_tag_valid(link->active_tag))
4398			return 0;
4399	} else {
4400		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4401			return 0;
4402	}
4403
4404	return ATA_DEFER_LINK;
4405}
4406EXPORT_SYMBOL_GPL(ata_std_qc_defer);
4407
4408enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4409{
4410	return AC_ERR_OK;
4411}
4412EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4413
4414/**
4415 *	ata_sg_init - Associate command with scatter-gather table.
4416 *	@qc: Command to be associated
4417 *	@sg: Scatter-gather table.
4418 *	@n_elem: Number of elements in s/g table.
4419 *
4420 *	Initialize the data-related elements of queued_cmd @qc
4421 *	to point to a scatter-gather table @sg, containing @n_elem
4422 *	elements.
4423 *
4424 *	LOCKING:
4425 *	spin_lock_irqsave(host lock)
4426 */
4427void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4428		 unsigned int n_elem)
4429{
4430	qc->sg = sg;
4431	qc->n_elem = n_elem;
4432	qc->cursg = qc->sg;
4433}
4434
4435#ifdef CONFIG_HAS_DMA
4436
4437/**
4438 *	ata_sg_clean - Unmap DMA memory associated with command
4439 *	@qc: Command containing DMA memory to be released
4440 *
4441 *	Unmap all mapped DMA memory associated with this command.
4442 *
4443 *	LOCKING:
4444 *	spin_lock_irqsave(host lock)
4445 */
4446static void ata_sg_clean(struct ata_queued_cmd *qc)
4447{
4448	struct ata_port *ap = qc->ap;
4449	struct scatterlist *sg = qc->sg;
4450	int dir = qc->dma_dir;
4451
4452	WARN_ON_ONCE(sg == NULL);
4453
4454	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4455
4456	if (qc->n_elem)
4457		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4458
4459	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4460	qc->sg = NULL;
4461}
4462
4463/**
4464 *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4465 *	@qc: Command with scatter-gather table to be mapped.
4466 *
4467 *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4468 *
4469 *	LOCKING:
4470 *	spin_lock_irqsave(host lock)
4471 *
4472 *	RETURNS:
4473 *	Zero on success, negative on error.
4474 *
4475 */
4476static int ata_sg_setup(struct ata_queued_cmd *qc)
4477{
4478	struct ata_port *ap = qc->ap;
4479	unsigned int n_elem;
4480
4481	VPRINTK("ENTER, ata%u\n", ap->print_id);
4482
4483	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4484	if (n_elem < 1)
4485		return -1;
4486
4487	VPRINTK("%d sg elements mapped\n", n_elem);
4488	qc->orig_n_elem = qc->n_elem;
4489	qc->n_elem = n_elem;
4490	qc->flags |= ATA_QCFLAG_DMAMAP;
4491
4492	return 0;
4493}
4494
4495#else /* !CONFIG_HAS_DMA */
4496
4497static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4498static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4499
4500#endif /* !CONFIG_HAS_DMA */
4501
4502/**
4503 *	swap_buf_le16 - swap halves of 16-bit words in place
4504 *	@buf:  Buffer to swap
4505 *	@buf_words:  Number of 16-bit words in buffer.
4506 *
4507 *	Swap halves of 16-bit words if needed to convert from
4508 *	little-endian byte order to native cpu byte order, or
4509 *	vice-versa.
4510 *
4511 *	LOCKING:
4512 *	Inherited from caller.
4513 */
4514void swap_buf_le16(u16 *buf, unsigned int buf_words)
4515{
4516#ifdef __BIG_ENDIAN
4517	unsigned int i;
4518
4519	for (i = 0; i < buf_words; i++)
4520		buf[i] = le16_to_cpu(buf[i]);
4521#endif /* __BIG_ENDIAN */
4522}
4523
4524/**
4525 *	ata_qc_new_init - Request an available ATA command, and initialize it
4526 *	@dev: Device from whom we request an available command structure
4527 *	@tag: tag
4528 *
4529 *	LOCKING:
4530 *	None.
4531 */
4532
4533struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4534{
4535	struct ata_port *ap = dev->link->ap;
4536	struct ata_queued_cmd *qc;
4537
4538	/* no command while frozen */
4539	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4540		return NULL;
4541
4542	/* libsas case */
4543	if (ap->flags & ATA_FLAG_SAS_HOST) {
4544		tag = ata_sas_allocate_tag(ap);
4545		if (tag < 0)
4546			return NULL;
4547	}
4548
4549	qc = __ata_qc_from_tag(ap, tag);
4550	qc->tag = qc->hw_tag = tag;
4551	qc->scsicmd = NULL;
4552	qc->ap = ap;
4553	qc->dev = dev;
4554
4555	ata_qc_reinit(qc);
4556
4557	return qc;
4558}
4559
4560/**
4561 *	ata_qc_free - free unused ata_queued_cmd
4562 *	@qc: Command to complete
4563 *
4564 *	Designed to free unused ata_queued_cmd object
4565 *	in case something prevents using it.
4566 *
4567 *	LOCKING:
4568 *	spin_lock_irqsave(host lock)
4569 */
4570void ata_qc_free(struct ata_queued_cmd *qc)
4571{
4572	struct ata_port *ap;
4573	unsigned int tag;
4574
4575	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4576	ap = qc->ap;
4577
4578	qc->flags = 0;
4579	tag = qc->tag;
4580	if (ata_tag_valid(tag)) {
4581		qc->tag = ATA_TAG_POISON;
4582		if (ap->flags & ATA_FLAG_SAS_HOST)
4583			ata_sas_free_tag(tag, ap);
4584	}
4585}
4586
4587void __ata_qc_complete(struct ata_queued_cmd *qc)
4588{
4589	struct ata_port *ap;
4590	struct ata_link *link;
4591
4592	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4593	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4594	ap = qc->ap;
4595	link = qc->dev->link;
4596
4597	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4598		ata_sg_clean(qc);
4599
4600	/* command should be marked inactive atomically with qc completion */
4601	if (ata_is_ncq(qc->tf.protocol)) {
4602		link->sactive &= ~(1 << qc->hw_tag);
4603		if (!link->sactive)
4604			ap->nr_active_links--;
4605	} else {
4606		link->active_tag = ATA_TAG_POISON;
4607		ap->nr_active_links--;
4608	}
4609
4610	/* clear exclusive status */
4611	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4612		     ap->excl_link == link))
4613		ap->excl_link = NULL;
4614
4615	/* atapi: mark qc as inactive to prevent the interrupt handler
4616	 * from completing the command twice later, before the error handler
4617	 * is called. (when rc != 0 and atapi request sense is needed)
4618	 */
4619	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4620	ap->qc_active &= ~(1ULL << qc->tag);
4621
4622	/* call completion callback */
4623	qc->complete_fn(qc);
4624}
4625
4626static void fill_result_tf(struct ata_queued_cmd *qc)
4627{
4628	struct ata_port *ap = qc->ap;
4629
4630	qc->result_tf.flags = qc->tf.flags;
4631	ap->ops->qc_fill_rtf(qc);
4632}
4633
4634static void ata_verify_xfer(struct ata_queued_cmd *qc)
4635{
4636	struct ata_device *dev = qc->dev;
4637
4638	if (!ata_is_data(qc->tf.protocol))
4639		return;
4640
4641	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4642		return;
4643
4644	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4645}
4646
4647/**
4648 *	ata_qc_complete - Complete an active ATA command
4649 *	@qc: Command to complete
4650 *
4651 *	Indicate to the mid and upper layers that an ATA command has
4652 *	completed, with either an ok or not-ok status.
4653 *
4654 *	Refrain from calling this function multiple times when
4655 *	successfully completing multiple NCQ commands.
4656 *	ata_qc_complete_multiple() should be used instead, which will
4657 *	properly update IRQ expect state.
4658 *
4659 *	LOCKING:
4660 *	spin_lock_irqsave(host lock)
4661 */
4662void ata_qc_complete(struct ata_queued_cmd *qc)
4663{
4664	struct ata_port *ap = qc->ap;
4665
4666	/* Trigger the LED (if available) */
4667	ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
4668
4669	/* XXX: New EH and old EH use different mechanisms to
4670	 * synchronize EH with regular execution path.
4671	 *
4672	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4673	 * Normal execution path is responsible for not accessing a
4674	 * failed qc.  libata core enforces the rule by returning NULL
4675	 * from ata_qc_from_tag() for failed qcs.
4676	 *
4677	 * Old EH depends on ata_qc_complete() nullifying completion
4678	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4679	 * not synchronize with interrupt handler.  Only PIO task is
4680	 * taken care of.
4681	 */
4682	if (ap->ops->error_handler) {
4683		struct ata_device *dev = qc->dev;
4684		struct ata_eh_info *ehi = &dev->link->eh_info;
4685
4686		if (unlikely(qc->err_mask))
4687			qc->flags |= ATA_QCFLAG_FAILED;
4688
4689		/*
4690		 * Finish internal commands without any further processing
4691		 * and always with the result TF filled.
4692		 */
4693		if (unlikely(ata_tag_internal(qc->tag))) {
4694			fill_result_tf(qc);
4695			trace_ata_qc_complete_internal(qc);
4696			__ata_qc_complete(qc);
4697			return;
4698		}
4699
4700		/*
4701		 * Non-internal qc has failed.  Fill the result TF and
4702		 * summon EH.
4703		 */
4704		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4705			fill_result_tf(qc);
4706			trace_ata_qc_complete_failed(qc);
4707			ata_qc_schedule_eh(qc);
4708			return;
4709		}
4710
4711		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4712
4713		/* read result TF if requested */
4714		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4715			fill_result_tf(qc);
4716
4717		trace_ata_qc_complete_done(qc);
4718		/* Some commands need post-processing after successful
4719		 * completion.
4720		 */
4721		switch (qc->tf.command) {
4722		case ATA_CMD_SET_FEATURES:
4723			if (qc->tf.feature != SETFEATURES_WC_ON &&
4724			    qc->tf.feature != SETFEATURES_WC_OFF &&
4725			    qc->tf.feature != SETFEATURES_RA_ON &&
4726			    qc->tf.feature != SETFEATURES_RA_OFF)
4727				break;
4728			fallthrough;
4729		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4730		case ATA_CMD_SET_MULTI: /* multi_count changed */
4731			/* revalidate device */
4732			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4733			ata_port_schedule_eh(ap);
4734			break;
4735
4736		case ATA_CMD_SLEEP:
4737			dev->flags |= ATA_DFLAG_SLEEPING;
4738			break;
4739		}
4740
4741		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4742			ata_verify_xfer(qc);
4743
4744		__ata_qc_complete(qc);
4745	} else {
4746		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4747			return;
4748
4749		/* read result TF if failed or requested */
4750		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4751			fill_result_tf(qc);
4752
4753		__ata_qc_complete(qc);
4754	}
4755}
4756EXPORT_SYMBOL_GPL(ata_qc_complete);
4757
4758/**
4759 *	ata_qc_get_active - get bitmask of active qcs
4760 *	@ap: port in question
 
 
 
 
 
 
 
 
 
 
4761 *
4762 *	LOCKING:
4763 *	spin_lock_irqsave(host lock)
4764 *
4765 *	RETURNS:
4766 *	Bitmask of active qcs
4767 */
4768u64 ata_qc_get_active(struct ata_port *ap)
4769{
4770	u64 qc_active = ap->qc_active;
 
 
 
 
 
 
 
 
 
 
 
 
 
4771
4772	/* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4773	if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4774		qc_active |= (1 << 0);
4775		qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4776	}
4777
4778	return qc_active;
 
 
 
 
 
 
 
 
 
 
 
 
4779}
4780EXPORT_SYMBOL_GPL(ata_qc_get_active);
4781
4782/**
4783 *	ata_qc_issue - issue taskfile to device
4784 *	@qc: command to issue to device
4785 *
4786 *	Prepare an ATA command to submission to device.
4787 *	This includes mapping the data into a DMA-able
4788 *	area, filling in the S/G table, and finally
4789 *	writing the taskfile to hardware, starting the command.
4790 *
4791 *	LOCKING:
4792 *	spin_lock_irqsave(host lock)
4793 */
4794void ata_qc_issue(struct ata_queued_cmd *qc)
4795{
4796	struct ata_port *ap = qc->ap;
4797	struct ata_link *link = qc->dev->link;
4798	u8 prot = qc->tf.protocol;
4799
4800	/* Make sure only one non-NCQ command is outstanding.  The
4801	 * check is skipped for old EH because it reuses active qc to
4802	 * request ATAPI sense.
4803	 */
4804	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4805
4806	if (ata_is_ncq(prot)) {
4807		WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
4808
4809		if (!link->sactive)
4810			ap->nr_active_links++;
4811		link->sactive |= 1 << qc->hw_tag;
4812	} else {
4813		WARN_ON_ONCE(link->sactive);
4814
4815		ap->nr_active_links++;
4816		link->active_tag = qc->tag;
4817	}
4818
4819	qc->flags |= ATA_QCFLAG_ACTIVE;
4820	ap->qc_active |= 1ULL << qc->tag;
4821
4822	/*
4823	 * We guarantee to LLDs that they will have at least one
4824	 * non-zero sg if the command is a data command.
4825	 */
4826	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
4827		goto sys_err;
4828
4829	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4830				 (ap->flags & ATA_FLAG_PIO_DMA)))
4831		if (ata_sg_setup(qc))
4832			goto sys_err;
4833
4834	/* if device is sleeping, schedule reset and abort the link */
4835	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4836		link->eh_info.action |= ATA_EH_RESET;
4837		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4838		ata_link_abort(link);
4839		return;
4840	}
4841
4842	qc->err_mask |= ap->ops->qc_prep(qc);
4843	if (unlikely(qc->err_mask))
4844		goto err;
4845	trace_ata_qc_issue(qc);
4846	qc->err_mask |= ap->ops->qc_issue(qc);
4847	if (unlikely(qc->err_mask))
4848		goto err;
4849	return;
4850
4851sys_err:
4852	qc->err_mask |= AC_ERR_SYSTEM;
4853err:
4854	ata_qc_complete(qc);
4855}
4856
4857/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4858 *	ata_phys_link_online - test whether the given link is online
4859 *	@link: ATA link to test
4860 *
4861 *	Test whether @link is online.  Note that this function returns
4862 *	0 if online status of @link cannot be obtained, so
4863 *	ata_link_online(link) != !ata_link_offline(link).
4864 *
4865 *	LOCKING:
4866 *	None.
4867 *
4868 *	RETURNS:
4869 *	True if the port online status is available and online.
4870 */
4871bool ata_phys_link_online(struct ata_link *link)
4872{
4873	u32 sstatus;
4874
4875	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4876	    ata_sstatus_online(sstatus))
4877		return true;
4878	return false;
4879}
4880
4881/**
4882 *	ata_phys_link_offline - test whether the given link is offline
4883 *	@link: ATA link to test
4884 *
4885 *	Test whether @link is offline.  Note that this function
4886 *	returns 0 if offline status of @link cannot be obtained, so
4887 *	ata_link_online(link) != !ata_link_offline(link).
4888 *
4889 *	LOCKING:
4890 *	None.
4891 *
4892 *	RETURNS:
4893 *	True if the port offline status is available and offline.
4894 */
4895bool ata_phys_link_offline(struct ata_link *link)
4896{
4897	u32 sstatus;
4898
4899	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4900	    !ata_sstatus_online(sstatus))
4901		return true;
4902	return false;
4903}
4904
4905/**
4906 *	ata_link_online - test whether the given link is online
4907 *	@link: ATA link to test
4908 *
4909 *	Test whether @link is online.  This is identical to
4910 *	ata_phys_link_online() when there's no slave link.  When
4911 *	there's a slave link, this function should only be called on
4912 *	the master link and will return true if any of M/S links is
4913 *	online.
4914 *
4915 *	LOCKING:
4916 *	None.
4917 *
4918 *	RETURNS:
4919 *	True if the port online status is available and online.
4920 */
4921bool ata_link_online(struct ata_link *link)
4922{
4923	struct ata_link *slave = link->ap->slave_link;
4924
4925	WARN_ON(link == slave);	/* shouldn't be called on slave link */
4926
4927	return ata_phys_link_online(link) ||
4928		(slave && ata_phys_link_online(slave));
4929}
4930EXPORT_SYMBOL_GPL(ata_link_online);
4931
4932/**
4933 *	ata_link_offline - test whether the given link is offline
4934 *	@link: ATA link to test
4935 *
4936 *	Test whether @link is offline.  This is identical to
4937 *	ata_phys_link_offline() when there's no slave link.  When
4938 *	there's a slave link, this function should only be called on
4939 *	the master link and will return true if both M/S links are
4940 *	offline.
4941 *
4942 *	LOCKING:
4943 *	None.
4944 *
4945 *	RETURNS:
4946 *	True if the port offline status is available and offline.
4947 */
4948bool ata_link_offline(struct ata_link *link)
4949{
4950	struct ata_link *slave = link->ap->slave_link;
4951
4952	WARN_ON(link == slave);	/* shouldn't be called on slave link */
4953
4954	return ata_phys_link_offline(link) &&
4955		(!slave || ata_phys_link_offline(slave));
4956}
4957EXPORT_SYMBOL_GPL(ata_link_offline);
4958
4959#ifdef CONFIG_PM
4960static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
4961				unsigned int action, unsigned int ehi_flags,
4962				bool async)
4963{
4964	struct ata_link *link;
4965	unsigned long flags;
4966
4967	/* Previous resume operation might still be in
4968	 * progress.  Wait for PM_PENDING to clear.
4969	 */
4970	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4971		ata_port_wait_eh(ap);
4972		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4973	}
4974
4975	/* request PM ops to EH */
4976	spin_lock_irqsave(ap->lock, flags);
4977
4978	ap->pm_mesg = mesg;
4979	ap->pflags |= ATA_PFLAG_PM_PENDING;
4980	ata_for_each_link(link, ap, HOST_FIRST) {
4981		link->eh_info.action |= action;
4982		link->eh_info.flags |= ehi_flags;
4983	}
4984
4985	ata_port_schedule_eh(ap);
4986
4987	spin_unlock_irqrestore(ap->lock, flags);
4988
4989	if (!async) {
4990		ata_port_wait_eh(ap);
4991		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4992	}
4993}
4994
4995/*
4996 * On some hardware, device fails to respond after spun down for suspend.  As
4997 * the device won't be used before being resumed, we don't need to touch the
4998 * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
4999 *
5000 * http://thread.gmane.org/gmane.linux.ide/46764
5001 */
5002static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5003						 | ATA_EHI_NO_AUTOPSY
5004						 | ATA_EHI_NO_RECOVERY;
5005
5006static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5007{
5008	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5009}
5010
5011static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5012{
5013	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5014}
5015
5016static int ata_port_pm_suspend(struct device *dev)
5017{
5018	struct ata_port *ap = to_ata_port(dev);
5019
5020	if (pm_runtime_suspended(dev))
5021		return 0;
5022
5023	ata_port_suspend(ap, PMSG_SUSPEND);
5024	return 0;
5025}
5026
5027static int ata_port_pm_freeze(struct device *dev)
5028{
5029	struct ata_port *ap = to_ata_port(dev);
5030
5031	if (pm_runtime_suspended(dev))
5032		return 0;
5033
5034	ata_port_suspend(ap, PMSG_FREEZE);
5035	return 0;
5036}
5037
5038static int ata_port_pm_poweroff(struct device *dev)
5039{
5040	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5041	return 0;
5042}
5043
5044static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5045						| ATA_EHI_QUIET;
5046
5047static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5048{
5049	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5050}
5051
5052static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5053{
5054	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5055}
5056
5057static int ata_port_pm_resume(struct device *dev)
5058{
5059	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5060	pm_runtime_disable(dev);
5061	pm_runtime_set_active(dev);
5062	pm_runtime_enable(dev);
5063	return 0;
5064}
5065
5066/*
5067 * For ODDs, the upper layer will poll for media change every few seconds,
5068 * which will make it enter and leave suspend state every few seconds. And
5069 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5070 * is very little and the ODD may malfunction after constantly being reset.
5071 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5072 * ODD is attached to the port.
5073 */
5074static int ata_port_runtime_idle(struct device *dev)
5075{
5076	struct ata_port *ap = to_ata_port(dev);
5077	struct ata_link *link;
5078	struct ata_device *adev;
5079
5080	ata_for_each_link(link, ap, HOST_FIRST) {
5081		ata_for_each_dev(adev, link, ENABLED)
5082			if (adev->class == ATA_DEV_ATAPI &&
5083			    !zpodd_dev_enabled(adev))
5084				return -EBUSY;
5085	}
5086
5087	return 0;
5088}
5089
5090static int ata_port_runtime_suspend(struct device *dev)
5091{
5092	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5093	return 0;
5094}
5095
5096static int ata_port_runtime_resume(struct device *dev)
5097{
5098	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5099	return 0;
5100}
5101
5102static const struct dev_pm_ops ata_port_pm_ops = {
5103	.suspend = ata_port_pm_suspend,
5104	.resume = ata_port_pm_resume,
5105	.freeze = ata_port_pm_freeze,
5106	.thaw = ata_port_pm_resume,
5107	.poweroff = ata_port_pm_poweroff,
5108	.restore = ata_port_pm_resume,
5109
5110	.runtime_suspend = ata_port_runtime_suspend,
5111	.runtime_resume = ata_port_runtime_resume,
5112	.runtime_idle = ata_port_runtime_idle,
5113};
5114
5115/* sas ports don't participate in pm runtime management of ata_ports,
5116 * and need to resume ata devices at the domain level, not the per-port
5117 * level. sas suspend/resume is async to allow parallel port recovery
5118 * since sas has multiple ata_port instances per Scsi_Host.
5119 */
5120void ata_sas_port_suspend(struct ata_port *ap)
5121{
5122	ata_port_suspend_async(ap, PMSG_SUSPEND);
5123}
5124EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5125
5126void ata_sas_port_resume(struct ata_port *ap)
5127{
5128	ata_port_resume_async(ap, PMSG_RESUME);
5129}
5130EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5131
5132/**
5133 *	ata_host_suspend - suspend host
5134 *	@host: host to suspend
5135 *	@mesg: PM message
5136 *
5137 *	Suspend @host.  Actual operation is performed by port suspend.
5138 */
5139int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5140{
5141	host->dev->power.power_state = mesg;
5142	return 0;
5143}
5144EXPORT_SYMBOL_GPL(ata_host_suspend);
5145
5146/**
5147 *	ata_host_resume - resume host
5148 *	@host: host to resume
5149 *
5150 *	Resume @host.  Actual operation is performed by port resume.
5151 */
5152void ata_host_resume(struct ata_host *host)
5153{
5154	host->dev->power.power_state = PMSG_ON;
5155}
5156EXPORT_SYMBOL_GPL(ata_host_resume);
5157#endif
5158
5159const struct device_type ata_port_type = {
5160	.name = "ata_port",
5161#ifdef CONFIG_PM
5162	.pm = &ata_port_pm_ops,
5163#endif
5164};
5165
5166/**
5167 *	ata_dev_init - Initialize an ata_device structure
5168 *	@dev: Device structure to initialize
5169 *
5170 *	Initialize @dev in preparation for probing.
5171 *
5172 *	LOCKING:
5173 *	Inherited from caller.
5174 */
5175void ata_dev_init(struct ata_device *dev)
5176{
5177	struct ata_link *link = ata_dev_phys_link(dev);
5178	struct ata_port *ap = link->ap;
5179	unsigned long flags;
5180
5181	/* SATA spd limit is bound to the attached device, reset together */
5182	link->sata_spd_limit = link->hw_sata_spd_limit;
5183	link->sata_spd = 0;
5184
5185	/* High bits of dev->flags are used to record warm plug
5186	 * requests which occur asynchronously.  Synchronize using
5187	 * host lock.
5188	 */
5189	spin_lock_irqsave(ap->lock, flags);
5190	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5191	dev->horkage = 0;
5192	spin_unlock_irqrestore(ap->lock, flags);
5193
5194	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5195	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5196	dev->pio_mask = UINT_MAX;
5197	dev->mwdma_mask = UINT_MAX;
5198	dev->udma_mask = UINT_MAX;
5199}
5200
5201/**
5202 *	ata_link_init - Initialize an ata_link structure
5203 *	@ap: ATA port link is attached to
5204 *	@link: Link structure to initialize
5205 *	@pmp: Port multiplier port number
5206 *
5207 *	Initialize @link.
5208 *
5209 *	LOCKING:
5210 *	Kernel thread context (may sleep)
5211 */
5212void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5213{
5214	int i;
5215
5216	/* clear everything except for devices */
5217	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5218	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5219
5220	link->ap = ap;
5221	link->pmp = pmp;
5222	link->active_tag = ATA_TAG_POISON;
5223	link->hw_sata_spd_limit = UINT_MAX;
5224
5225	/* can't use iterator, ap isn't initialized yet */
5226	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5227		struct ata_device *dev = &link->device[i];
5228
5229		dev->link = link;
5230		dev->devno = dev - link->device;
5231#ifdef CONFIG_ATA_ACPI
5232		dev->gtf_filter = ata_acpi_gtf_filter;
5233#endif
5234		ata_dev_init(dev);
5235	}
5236}
5237
5238/**
5239 *	sata_link_init_spd - Initialize link->sata_spd_limit
5240 *	@link: Link to configure sata_spd_limit for
5241 *
5242 *	Initialize ``link->[hw_]sata_spd_limit`` to the currently
5243 *	configured value.
5244 *
5245 *	LOCKING:
5246 *	Kernel thread context (may sleep).
5247 *
5248 *	RETURNS:
5249 *	0 on success, -errno on failure.
5250 */
5251int sata_link_init_spd(struct ata_link *link)
5252{
5253	u8 spd;
5254	int rc;
5255
5256	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5257	if (rc)
5258		return rc;
5259
5260	spd = (link->saved_scontrol >> 4) & 0xf;
5261	if (spd)
5262		link->hw_sata_spd_limit &= (1 << spd) - 1;
5263
5264	ata_force_link_limits(link);
5265
5266	link->sata_spd_limit = link->hw_sata_spd_limit;
5267
5268	return 0;
5269}
5270
5271/**
5272 *	ata_port_alloc - allocate and initialize basic ATA port resources
5273 *	@host: ATA host this allocated port belongs to
5274 *
5275 *	Allocate and initialize basic ATA port resources.
5276 *
5277 *	RETURNS:
5278 *	Allocate ATA port on success, NULL on failure.
5279 *
5280 *	LOCKING:
5281 *	Inherited from calling layer (may sleep).
5282 */
5283struct ata_port *ata_port_alloc(struct ata_host *host)
5284{
5285	struct ata_port *ap;
5286
5287	DPRINTK("ENTER\n");
5288
5289	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5290	if (!ap)
5291		return NULL;
5292
5293	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5294	ap->lock = &host->lock;
5295	ap->print_id = -1;
5296	ap->local_port_no = -1;
5297	ap->host = host;
5298	ap->dev = host->dev;
5299
5300#if defined(ATA_VERBOSE_DEBUG)
5301	/* turn on all debugging levels */
5302	ap->msg_enable = 0x00FF;
5303#elif defined(ATA_DEBUG)
5304	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5305#else
5306	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5307#endif
5308
5309	mutex_init(&ap->scsi_scan_mutex);
5310	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5311	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5312	INIT_LIST_HEAD(&ap->eh_done_q);
5313	init_waitqueue_head(&ap->eh_wait_q);
5314	init_completion(&ap->park_req_pending);
5315	timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5316		    TIMER_DEFERRABLE);
5317
5318	ap->cbl = ATA_CBL_NONE;
5319
5320	ata_link_init(ap, &ap->link, 0);
5321
5322#ifdef ATA_IRQ_TRAP
5323	ap->stats.unhandled_irq = 1;
5324	ap->stats.idle_irq = 1;
5325#endif
5326	ata_sff_port_init(ap);
5327
5328	return ap;
5329}
5330
5331static void ata_devres_release(struct device *gendev, void *res)
5332{
5333	struct ata_host *host = dev_get_drvdata(gendev);
5334	int i;
5335
5336	for (i = 0; i < host->n_ports; i++) {
5337		struct ata_port *ap = host->ports[i];
5338
5339		if (!ap)
5340			continue;
5341
5342		if (ap->scsi_host)
5343			scsi_host_put(ap->scsi_host);
5344
5345	}
5346
5347	dev_set_drvdata(gendev, NULL);
5348	ata_host_put(host);
5349}
5350
5351static void ata_host_release(struct kref *kref)
5352{
5353	struct ata_host *host = container_of(kref, struct ata_host, kref);
5354	int i;
5355
5356	for (i = 0; i < host->n_ports; i++) {
5357		struct ata_port *ap = host->ports[i];
5358
5359		kfree(ap->pmp_link);
5360		kfree(ap->slave_link);
5361		kfree(ap);
5362		host->ports[i] = NULL;
5363	}
5364	kfree(host);
5365}
5366
5367void ata_host_get(struct ata_host *host)
5368{
5369	kref_get(&host->kref);
5370}
5371
5372void ata_host_put(struct ata_host *host)
5373{
5374	kref_put(&host->kref, ata_host_release);
5375}
5376EXPORT_SYMBOL_GPL(ata_host_put);
5377
5378/**
5379 *	ata_host_alloc - allocate and init basic ATA host resources
5380 *	@dev: generic device this host is associated with
5381 *	@max_ports: maximum number of ATA ports associated with this host
5382 *
5383 *	Allocate and initialize basic ATA host resources.  LLD calls
5384 *	this function to allocate a host, initializes it fully and
5385 *	attaches it using ata_host_register().
5386 *
5387 *	@max_ports ports are allocated and host->n_ports is
5388 *	initialized to @max_ports.  The caller is allowed to decrease
5389 *	host->n_ports before calling ata_host_register().  The unused
5390 *	ports will be automatically freed on registration.
5391 *
5392 *	RETURNS:
5393 *	Allocate ATA host on success, NULL on failure.
5394 *
5395 *	LOCKING:
5396 *	Inherited from calling layer (may sleep).
5397 */
5398struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5399{
5400	struct ata_host *host;
5401	size_t sz;
5402	int i;
5403	void *dr;
5404
5405	DPRINTK("ENTER\n");
5406
5407	/* alloc a container for our list of ATA ports (buses) */
5408	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5409	host = kzalloc(sz, GFP_KERNEL);
5410	if (!host)
5411		return NULL;
5412
5413	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5414		goto err_free;
5415
5416	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5417	if (!dr)
5418		goto err_out;
5419
5420	devres_add(dev, dr);
5421	dev_set_drvdata(dev, host);
5422
5423	spin_lock_init(&host->lock);
5424	mutex_init(&host->eh_mutex);
5425	host->dev = dev;
5426	host->n_ports = max_ports;
5427	kref_init(&host->kref);
5428
5429	/* allocate ports bound to this host */
5430	for (i = 0; i < max_ports; i++) {
5431		struct ata_port *ap;
5432
5433		ap = ata_port_alloc(host);
5434		if (!ap)
5435			goto err_out;
5436
5437		ap->port_no = i;
5438		host->ports[i] = ap;
5439	}
5440
5441	devres_remove_group(dev, NULL);
5442	return host;
5443
5444 err_out:
5445	devres_release_group(dev, NULL);
5446 err_free:
5447	kfree(host);
5448	return NULL;
5449}
5450EXPORT_SYMBOL_GPL(ata_host_alloc);
5451
5452/**
5453 *	ata_host_alloc_pinfo - alloc host and init with port_info array
5454 *	@dev: generic device this host is associated with
5455 *	@ppi: array of ATA port_info to initialize host with
5456 *	@n_ports: number of ATA ports attached to this host
5457 *
5458 *	Allocate ATA host and initialize with info from @ppi.  If NULL
5459 *	terminated, @ppi may contain fewer entries than @n_ports.  The
5460 *	last entry will be used for the remaining ports.
5461 *
5462 *	RETURNS:
5463 *	Allocate ATA host on success, NULL on failure.
5464 *
5465 *	LOCKING:
5466 *	Inherited from calling layer (may sleep).
5467 */
5468struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5469				      const struct ata_port_info * const * ppi,
5470				      int n_ports)
5471{
5472	const struct ata_port_info *pi;
5473	struct ata_host *host;
5474	int i, j;
5475
5476	host = ata_host_alloc(dev, n_ports);
5477	if (!host)
5478		return NULL;
5479
5480	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5481		struct ata_port *ap = host->ports[i];
5482
5483		if (ppi[j])
5484			pi = ppi[j++];
5485
5486		ap->pio_mask = pi->pio_mask;
5487		ap->mwdma_mask = pi->mwdma_mask;
5488		ap->udma_mask = pi->udma_mask;
5489		ap->flags |= pi->flags;
5490		ap->link.flags |= pi->link_flags;
5491		ap->ops = pi->port_ops;
5492
5493		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5494			host->ops = pi->port_ops;
5495	}
5496
5497	return host;
5498}
5499EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5500
5501static void ata_host_stop(struct device *gendev, void *res)
5502{
5503	struct ata_host *host = dev_get_drvdata(gendev);
5504	int i;
5505
5506	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5507
5508	for (i = 0; i < host->n_ports; i++) {
5509		struct ata_port *ap = host->ports[i];
5510
5511		if (ap->ops->port_stop)
5512			ap->ops->port_stop(ap);
5513	}
5514
5515	if (host->ops->host_stop)
5516		host->ops->host_stop(host);
5517}
5518
5519/**
5520 *	ata_finalize_port_ops - finalize ata_port_operations
5521 *	@ops: ata_port_operations to finalize
5522 *
5523 *	An ata_port_operations can inherit from another ops and that
5524 *	ops can again inherit from another.  This can go on as many
5525 *	times as necessary as long as there is no loop in the
5526 *	inheritance chain.
5527 *
5528 *	Ops tables are finalized when the host is started.  NULL or
5529 *	unspecified entries are inherited from the closet ancestor
5530 *	which has the method and the entry is populated with it.
5531 *	After finalization, the ops table directly points to all the
5532 *	methods and ->inherits is no longer necessary and cleared.
5533 *
5534 *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5535 *
5536 *	LOCKING:
5537 *	None.
5538 */
5539static void ata_finalize_port_ops(struct ata_port_operations *ops)
5540{
5541	static DEFINE_SPINLOCK(lock);
5542	const struct ata_port_operations *cur;
5543	void **begin = (void **)ops;
5544	void **end = (void **)&ops->inherits;
5545	void **pp;
5546
5547	if (!ops || !ops->inherits)
5548		return;
5549
5550	spin_lock(&lock);
5551
5552	for (cur = ops->inherits; cur; cur = cur->inherits) {
5553		void **inherit = (void **)cur;
5554
5555		for (pp = begin; pp < end; pp++, inherit++)
5556			if (!*pp)
5557				*pp = *inherit;
5558	}
5559
5560	for (pp = begin; pp < end; pp++)
5561		if (IS_ERR(*pp))
5562			*pp = NULL;
5563
5564	ops->inherits = NULL;
5565
5566	spin_unlock(&lock);
5567}
5568
5569/**
5570 *	ata_host_start - start and freeze ports of an ATA host
5571 *	@host: ATA host to start ports for
5572 *
5573 *	Start and then freeze ports of @host.  Started status is
5574 *	recorded in host->flags, so this function can be called
5575 *	multiple times.  Ports are guaranteed to get started only
5576 *	once.  If host->ops isn't initialized yet, its set to the
5577 *	first non-dummy port ops.
5578 *
5579 *	LOCKING:
5580 *	Inherited from calling layer (may sleep).
5581 *
5582 *	RETURNS:
5583 *	0 if all ports are started successfully, -errno otherwise.
5584 */
5585int ata_host_start(struct ata_host *host)
5586{
5587	int have_stop = 0;
5588	void *start_dr = NULL;
5589	int i, rc;
5590
5591	if (host->flags & ATA_HOST_STARTED)
5592		return 0;
5593
5594	ata_finalize_port_ops(host->ops);
5595
5596	for (i = 0; i < host->n_ports; i++) {
5597		struct ata_port *ap = host->ports[i];
5598
5599		ata_finalize_port_ops(ap->ops);
5600
5601		if (!host->ops && !ata_port_is_dummy(ap))
5602			host->ops = ap->ops;
5603
5604		if (ap->ops->port_stop)
5605			have_stop = 1;
5606	}
5607
5608	if (host->ops && host->ops->host_stop)
5609		have_stop = 1;
5610
5611	if (have_stop) {
5612		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5613		if (!start_dr)
5614			return -ENOMEM;
5615	}
5616
5617	for (i = 0; i < host->n_ports; i++) {
5618		struct ata_port *ap = host->ports[i];
5619
5620		if (ap->ops->port_start) {
5621			rc = ap->ops->port_start(ap);
5622			if (rc) {
5623				if (rc != -ENODEV)
5624					dev_err(host->dev,
5625						"failed to start port %d (errno=%d)\n",
5626						i, rc);
5627				goto err_out;
5628			}
5629		}
5630		ata_eh_freeze_port(ap);
5631	}
5632
5633	if (start_dr)
5634		devres_add(host->dev, start_dr);
5635	host->flags |= ATA_HOST_STARTED;
5636	return 0;
5637
5638 err_out:
5639	while (--i >= 0) {
5640		struct ata_port *ap = host->ports[i];
5641
5642		if (ap->ops->port_stop)
5643			ap->ops->port_stop(ap);
5644	}
5645	devres_free(start_dr);
5646	return rc;
5647}
5648EXPORT_SYMBOL_GPL(ata_host_start);
5649
5650/**
5651 *	ata_host_init - Initialize a host struct for sas (ipr, libsas)
5652 *	@host:	host to initialize
5653 *	@dev:	device host is attached to
5654 *	@ops:	port_ops
5655 *
5656 */
5657void ata_host_init(struct ata_host *host, struct device *dev,
5658		   struct ata_port_operations *ops)
5659{
5660	spin_lock_init(&host->lock);
5661	mutex_init(&host->eh_mutex);
5662	host->n_tags = ATA_MAX_QUEUE;
5663	host->dev = dev;
5664	host->ops = ops;
5665	kref_init(&host->kref);
5666}
5667EXPORT_SYMBOL_GPL(ata_host_init);
5668
5669void __ata_port_probe(struct ata_port *ap)
5670{
5671	struct ata_eh_info *ehi = &ap->link.eh_info;
5672	unsigned long flags;
5673
5674	/* kick EH for boot probing */
5675	spin_lock_irqsave(ap->lock, flags);
5676
5677	ehi->probe_mask |= ATA_ALL_DEVICES;
5678	ehi->action |= ATA_EH_RESET;
5679	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5680
5681	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5682	ap->pflags |= ATA_PFLAG_LOADING;
5683	ata_port_schedule_eh(ap);
5684
5685	spin_unlock_irqrestore(ap->lock, flags);
5686}
5687
5688int ata_port_probe(struct ata_port *ap)
5689{
5690	int rc = 0;
5691
5692	if (ap->ops->error_handler) {
5693		__ata_port_probe(ap);
5694		ata_port_wait_eh(ap);
5695	} else {
5696		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5697		rc = ata_bus_probe(ap);
5698		DPRINTK("ata%u: bus probe end\n", ap->print_id);
5699	}
5700	return rc;
5701}
5702
5703
5704static void async_port_probe(void *data, async_cookie_t cookie)
5705{
5706	struct ata_port *ap = data;
5707
5708	/*
5709	 * If we're not allowed to scan this host in parallel,
5710	 * we need to wait until all previous scans have completed
5711	 * before going further.
5712	 * Jeff Garzik says this is only within a controller, so we
5713	 * don't need to wait for port 0, only for later ports.
5714	 */
5715	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5716		async_synchronize_cookie(cookie);
5717
5718	(void)ata_port_probe(ap);
5719
5720	/* in order to keep device order, we need to synchronize at this point */
5721	async_synchronize_cookie(cookie);
5722
5723	ata_scsi_scan_host(ap, 1);
5724}
5725
5726/**
5727 *	ata_host_register - register initialized ATA host
5728 *	@host: ATA host to register
5729 *	@sht: template for SCSI host
5730 *
5731 *	Register initialized ATA host.  @host is allocated using
5732 *	ata_host_alloc() and fully initialized by LLD.  This function
5733 *	starts ports, registers @host with ATA and SCSI layers and
5734 *	probe registered devices.
5735 *
5736 *	LOCKING:
5737 *	Inherited from calling layer (may sleep).
5738 *
5739 *	RETURNS:
5740 *	0 on success, -errno otherwise.
5741 */
5742int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5743{
5744	int i, rc;
5745
5746	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
5747
5748	/* host must have been started */
5749	if (!(host->flags & ATA_HOST_STARTED)) {
5750		dev_err(host->dev, "BUG: trying to register unstarted host\n");
5751		WARN_ON(1);
5752		return -EINVAL;
5753	}
5754
5755	/* Blow away unused ports.  This happens when LLD can't
5756	 * determine the exact number of ports to allocate at
5757	 * allocation time.
5758	 */
5759	for (i = host->n_ports; host->ports[i]; i++)
5760		kfree(host->ports[i]);
5761
5762	/* give ports names and add SCSI hosts */
5763	for (i = 0; i < host->n_ports; i++) {
5764		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
5765		host->ports[i]->local_port_no = i + 1;
5766	}
5767
5768	/* Create associated sysfs transport objects  */
5769	for (i = 0; i < host->n_ports; i++) {
5770		rc = ata_tport_add(host->dev,host->ports[i]);
5771		if (rc) {
5772			goto err_tadd;
5773		}
5774	}
5775
5776	rc = ata_scsi_add_hosts(host, sht);
5777	if (rc)
5778		goto err_tadd;
5779
5780	/* set cable, sata_spd_limit and report */
5781	for (i = 0; i < host->n_ports; i++) {
5782		struct ata_port *ap = host->ports[i];
5783		unsigned long xfer_mask;
5784
5785		/* set SATA cable type if still unset */
5786		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5787			ap->cbl = ATA_CBL_SATA;
5788
5789		/* init sata_spd_limit to the current value */
5790		sata_link_init_spd(&ap->link);
5791		if (ap->slave_link)
5792			sata_link_init_spd(ap->slave_link);
5793
5794		/* print per-port info to dmesg */
5795		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5796					      ap->udma_mask);
5797
5798		if (!ata_port_is_dummy(ap)) {
5799			ata_port_info(ap, "%cATA max %s %s\n",
5800				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5801				      ata_mode_string(xfer_mask),
5802				      ap->link.eh_info.desc);
5803			ata_ehi_clear_desc(&ap->link.eh_info);
5804		} else
5805			ata_port_info(ap, "DUMMY\n");
5806	}
5807
5808	/* perform each probe asynchronously */
5809	for (i = 0; i < host->n_ports; i++) {
5810		struct ata_port *ap = host->ports[i];
5811		ap->cookie = async_schedule(async_port_probe, ap);
5812	}
5813
5814	return 0;
5815
5816 err_tadd:
5817	while (--i >= 0) {
5818		ata_tport_delete(host->ports[i]);
5819	}
5820	return rc;
5821
5822}
5823EXPORT_SYMBOL_GPL(ata_host_register);
5824
5825/**
5826 *	ata_host_activate - start host, request IRQ and register it
5827 *	@host: target ATA host
5828 *	@irq: IRQ to request
5829 *	@irq_handler: irq_handler used when requesting IRQ
5830 *	@irq_flags: irq_flags used when requesting IRQ
5831 *	@sht: scsi_host_template to use when registering the host
5832 *
5833 *	After allocating an ATA host and initializing it, most libata
5834 *	LLDs perform three steps to activate the host - start host,
5835 *	request IRQ and register it.  This helper takes necessary
5836 *	arguments and performs the three steps in one go.
5837 *
5838 *	An invalid IRQ skips the IRQ registration and expects the host to
5839 *	have set polling mode on the port. In this case, @irq_handler
5840 *	should be NULL.
5841 *
5842 *	LOCKING:
5843 *	Inherited from calling layer (may sleep).
5844 *
5845 *	RETURNS:
5846 *	0 on success, -errno otherwise.
5847 */
5848int ata_host_activate(struct ata_host *host, int irq,
5849		      irq_handler_t irq_handler, unsigned long irq_flags,
5850		      struct scsi_host_template *sht)
5851{
5852	int i, rc;
5853	char *irq_desc;
5854
5855	rc = ata_host_start(host);
5856	if (rc)
5857		return rc;
5858
5859	/* Special case for polling mode */
5860	if (!irq) {
5861		WARN_ON(irq_handler);
5862		return ata_host_register(host, sht);
5863	}
5864
5865	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
5866				  dev_driver_string(host->dev),
5867				  dev_name(host->dev));
5868	if (!irq_desc)
5869		return -ENOMEM;
5870
5871	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5872			      irq_desc, host);
5873	if (rc)
5874		return rc;
5875
5876	for (i = 0; i < host->n_ports; i++)
5877		ata_port_desc(host->ports[i], "irq %d", irq);
5878
5879	rc = ata_host_register(host, sht);
5880	/* if failed, just free the IRQ and leave ports alone */
5881	if (rc)
5882		devm_free_irq(host->dev, irq, host);
5883
5884	return rc;
5885}
5886EXPORT_SYMBOL_GPL(ata_host_activate);
5887
5888/**
5889 *	ata_port_detach - Detach ATA port in preparation of device removal
5890 *	@ap: ATA port to be detached
5891 *
5892 *	Detach all ATA devices and the associated SCSI devices of @ap;
5893 *	then, remove the associated SCSI host.  @ap is guaranteed to
5894 *	be quiescent on return from this function.
5895 *
5896 *	LOCKING:
5897 *	Kernel thread context (may sleep).
5898 */
5899static void ata_port_detach(struct ata_port *ap)
5900{
5901	unsigned long flags;
5902	struct ata_link *link;
5903	struct ata_device *dev;
5904
5905	if (!ap->ops->error_handler)
5906		goto skip_eh;
5907
5908	/* tell EH we're leaving & flush EH */
5909	spin_lock_irqsave(ap->lock, flags);
5910	ap->pflags |= ATA_PFLAG_UNLOADING;
5911	ata_port_schedule_eh(ap);
5912	spin_unlock_irqrestore(ap->lock, flags);
5913
5914	/* wait till EH commits suicide */
5915	ata_port_wait_eh(ap);
5916
5917	/* it better be dead now */
5918	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
5919
5920	cancel_delayed_work_sync(&ap->hotplug_task);
5921
5922 skip_eh:
5923	/* clean up zpodd on port removal */
5924	ata_for_each_link(link, ap, HOST_FIRST) {
5925		ata_for_each_dev(dev, link, ALL) {
5926			if (zpodd_dev_enabled(dev))
5927				zpodd_exit(dev);
5928		}
5929	}
5930	if (ap->pmp_link) {
5931		int i;
5932		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
5933			ata_tlink_delete(&ap->pmp_link[i]);
5934	}
5935	/* remove the associated SCSI host */
5936	scsi_remove_host(ap->scsi_host);
5937	ata_tport_delete(ap);
5938}
5939
5940/**
5941 *	ata_host_detach - Detach all ports of an ATA host
5942 *	@host: Host to detach
5943 *
5944 *	Detach all ports of @host.
5945 *
5946 *	LOCKING:
5947 *	Kernel thread context (may sleep).
5948 */
5949void ata_host_detach(struct ata_host *host)
5950{
5951	int i;
5952
5953	for (i = 0; i < host->n_ports; i++) {
5954		/* Ensure ata_port probe has completed */
5955		async_synchronize_cookie(host->ports[i]->cookie + 1);
5956		ata_port_detach(host->ports[i]);
5957	}
5958
5959	/* the host is dead now, dissociate ACPI */
5960	ata_acpi_dissociate(host);
5961}
5962EXPORT_SYMBOL_GPL(ata_host_detach);
5963
5964#ifdef CONFIG_PCI
5965
5966/**
5967 *	ata_pci_remove_one - PCI layer callback for device removal
5968 *	@pdev: PCI device that was removed
5969 *
5970 *	PCI layer indicates to libata via this hook that hot-unplug or
5971 *	module unload event has occurred.  Detach all ports.  Resource
5972 *	release is handled via devres.
5973 *
5974 *	LOCKING:
5975 *	Inherited from PCI layer (may sleep).
5976 */
5977void ata_pci_remove_one(struct pci_dev *pdev)
5978{
5979	struct ata_host *host = pci_get_drvdata(pdev);
5980
5981	ata_host_detach(host);
5982}
5983EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5984
5985void ata_pci_shutdown_one(struct pci_dev *pdev)
5986{
5987	struct ata_host *host = pci_get_drvdata(pdev);
5988	int i;
5989
5990	for (i = 0; i < host->n_ports; i++) {
5991		struct ata_port *ap = host->ports[i];
5992
5993		ap->pflags |= ATA_PFLAG_FROZEN;
5994
5995		/* Disable port interrupts */
5996		if (ap->ops->freeze)
5997			ap->ops->freeze(ap);
5998
5999		/* Stop the port DMA engines */
6000		if (ap->ops->port_stop)
6001			ap->ops->port_stop(ap);
6002	}
6003}
6004EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
6005
6006/* move to PCI subsystem */
6007int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6008{
6009	unsigned long tmp = 0;
6010
6011	switch (bits->width) {
6012	case 1: {
6013		u8 tmp8 = 0;
6014		pci_read_config_byte(pdev, bits->reg, &tmp8);
6015		tmp = tmp8;
6016		break;
6017	}
6018	case 2: {
6019		u16 tmp16 = 0;
6020		pci_read_config_word(pdev, bits->reg, &tmp16);
6021		tmp = tmp16;
6022		break;
6023	}
6024	case 4: {
6025		u32 tmp32 = 0;
6026		pci_read_config_dword(pdev, bits->reg, &tmp32);
6027		tmp = tmp32;
6028		break;
6029	}
6030
6031	default:
6032		return -EINVAL;
6033	}
6034
6035	tmp &= bits->mask;
6036
6037	return (tmp == bits->val) ? 1 : 0;
6038}
6039EXPORT_SYMBOL_GPL(pci_test_config_bits);
6040
6041#ifdef CONFIG_PM
6042void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6043{
6044	pci_save_state(pdev);
6045	pci_disable_device(pdev);
6046
6047	if (mesg.event & PM_EVENT_SLEEP)
6048		pci_set_power_state(pdev, PCI_D3hot);
6049}
6050EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6051
6052int ata_pci_device_do_resume(struct pci_dev *pdev)
6053{
6054	int rc;
6055
6056	pci_set_power_state(pdev, PCI_D0);
6057	pci_restore_state(pdev);
6058
6059	rc = pcim_enable_device(pdev);
6060	if (rc) {
6061		dev_err(&pdev->dev,
6062			"failed to enable device after resume (%d)\n", rc);
6063		return rc;
6064	}
6065
6066	pci_set_master(pdev);
6067	return 0;
6068}
6069EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6070
6071int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6072{
6073	struct ata_host *host = pci_get_drvdata(pdev);
6074	int rc = 0;
6075
6076	rc = ata_host_suspend(host, mesg);
6077	if (rc)
6078		return rc;
6079
6080	ata_pci_device_do_suspend(pdev, mesg);
6081
6082	return 0;
6083}
6084EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6085
6086int ata_pci_device_resume(struct pci_dev *pdev)
6087{
6088	struct ata_host *host = pci_get_drvdata(pdev);
6089	int rc;
6090
6091	rc = ata_pci_device_do_resume(pdev);
6092	if (rc == 0)
6093		ata_host_resume(host);
6094	return rc;
6095}
6096EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6097#endif /* CONFIG_PM */
 
6098#endif /* CONFIG_PCI */
6099
6100/**
6101 *	ata_platform_remove_one - Platform layer callback for device removal
6102 *	@pdev: Platform device that was removed
6103 *
6104 *	Platform layer indicates to libata via this hook that hot-unplug or
6105 *	module unload event has occurred.  Detach all ports.  Resource
6106 *	release is handled via devres.
6107 *
6108 *	LOCKING:
6109 *	Inherited from platform layer (may sleep).
6110 */
6111int ata_platform_remove_one(struct platform_device *pdev)
6112{
6113	struct ata_host *host = platform_get_drvdata(pdev);
6114
6115	ata_host_detach(host);
6116
6117	return 0;
6118}
6119EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6120
6121#ifdef CONFIG_ATA_FORCE
6122static int __init ata_parse_force_one(char **cur,
6123				      struct ata_force_ent *force_ent,
6124				      const char **reason)
6125{
6126	static const struct ata_force_param force_tbl[] __initconst = {
6127		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6128		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6129		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6130		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6131		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6132		{ "sata",	.cbl		= ATA_CBL_SATA },
6133		{ "1.5Gbps",	.spd_limit	= 1 },
6134		{ "3.0Gbps",	.spd_limit	= 2 },
6135		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6136		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6137		{ "noncqtrim",	.horkage_on	= ATA_HORKAGE_NO_NCQ_TRIM },
6138		{ "ncqtrim",	.horkage_off	= ATA_HORKAGE_NO_NCQ_TRIM },
6139		{ "noncqati",	.horkage_on	= ATA_HORKAGE_NO_NCQ_ON_ATI },
6140		{ "ncqati",	.horkage_off	= ATA_HORKAGE_NO_NCQ_ON_ATI },
6141		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6142		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6143		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6144		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6145		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6146		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6147		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6148		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6149		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6150		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6151		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6152		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6153		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6154		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6155		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6156		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6157		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6158		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6159		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6160		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6161		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6162		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6163		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6164		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6165		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6166		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6167		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6168		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6169		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6170		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6171		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6172		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6173		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6174		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6175		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6176		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6177		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6178		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6179		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
6180		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
6181		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },
6182	};
6183	char *start = *cur, *p = *cur;
6184	char *id, *val, *endp;
6185	const struct ata_force_param *match_fp = NULL;
6186	int nr_matches = 0, i;
6187
6188	/* find where this param ends and update *cur */
6189	while (*p != '\0' && *p != ',')
6190		p++;
6191
6192	if (*p == '\0')
6193		*cur = p;
6194	else
6195		*cur = p + 1;
6196
6197	*p = '\0';
6198
6199	/* parse */
6200	p = strchr(start, ':');
6201	if (!p) {
6202		val = strstrip(start);
6203		goto parse_val;
6204	}
6205	*p = '\0';
6206
6207	id = strstrip(start);
6208	val = strstrip(p + 1);
6209
6210	/* parse id */
6211	p = strchr(id, '.');
6212	if (p) {
6213		*p++ = '\0';
6214		force_ent->device = simple_strtoul(p, &endp, 10);
6215		if (p == endp || *endp != '\0') {
6216			*reason = "invalid device";
6217			return -EINVAL;
6218		}
6219	}
6220
6221	force_ent->port = simple_strtoul(id, &endp, 10);
6222	if (id == endp || *endp != '\0') {
6223		*reason = "invalid port/link";
6224		return -EINVAL;
6225	}
6226
6227 parse_val:
6228	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6229	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6230		const struct ata_force_param *fp = &force_tbl[i];
6231
6232		if (strncasecmp(val, fp->name, strlen(val)))
6233			continue;
6234
6235		nr_matches++;
6236		match_fp = fp;
6237
6238		if (strcasecmp(val, fp->name) == 0) {
6239			nr_matches = 1;
6240			break;
6241		}
6242	}
6243
6244	if (!nr_matches) {
6245		*reason = "unknown value";
6246		return -EINVAL;
6247	}
6248	if (nr_matches > 1) {
6249		*reason = "ambiguous value";
6250		return -EINVAL;
6251	}
6252
6253	force_ent->param = *match_fp;
6254
6255	return 0;
6256}
6257
6258static void __init ata_parse_force_param(void)
6259{
6260	int idx = 0, size = 1;
6261	int last_port = -1, last_device = -1;
6262	char *p, *cur, *next;
6263
6264	/* calculate maximum number of params and allocate force_tbl */
6265	for (p = ata_force_param_buf; *p; p++)
6266		if (*p == ',')
6267			size++;
6268
6269	ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
6270	if (!ata_force_tbl) {
6271		printk(KERN_WARNING "ata: failed to extend force table, "
6272		       "libata.force ignored\n");
6273		return;
6274	}
6275
6276	/* parse and populate the table */
6277	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6278		const char *reason = "";
6279		struct ata_force_ent te = { .port = -1, .device = -1 };
6280
6281		next = cur;
6282		if (ata_parse_force_one(&next, &te, &reason)) {
6283			printk(KERN_WARNING "ata: failed to parse force "
6284			       "parameter \"%s\" (%s)\n",
6285			       cur, reason);
6286			continue;
6287		}
6288
6289		if (te.port == -1) {
6290			te.port = last_port;
6291			te.device = last_device;
6292		}
6293
6294		ata_force_tbl[idx++] = te;
6295
6296		last_port = te.port;
6297		last_device = te.device;
6298	}
6299
6300	ata_force_tbl_size = idx;
6301}
6302
6303static void ata_free_force_param(void)
6304{
6305	kfree(ata_force_tbl);
6306}
6307#else
6308static inline void ata_parse_force_param(void) { }
6309static inline void ata_free_force_param(void) { }
6310#endif
6311
6312static int __init ata_init(void)
6313{
6314	int rc;
6315
6316	ata_parse_force_param();
6317
6318	rc = ata_sff_init();
6319	if (rc) {
6320		ata_free_force_param();
6321		return rc;
6322	}
6323
6324	libata_transport_init();
6325	ata_scsi_transport_template = ata_attach_transport();
6326	if (!ata_scsi_transport_template) {
6327		ata_sff_exit();
6328		rc = -ENOMEM;
6329		goto err_out;
6330	}
6331
6332	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6333	return 0;
6334
6335err_out:
6336	return rc;
6337}
6338
6339static void __exit ata_exit(void)
6340{
6341	ata_release_transport(ata_scsi_transport_template);
6342	libata_transport_exit();
6343	ata_sff_exit();
6344	ata_free_force_param();
6345}
6346
6347subsys_initcall(ata_init);
6348module_exit(ata_exit);
6349
6350static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6351
6352int ata_ratelimit(void)
6353{
6354	return __ratelimit(&ratelimit);
6355}
6356EXPORT_SYMBOL_GPL(ata_ratelimit);
6357
6358/**
6359 *	ata_msleep - ATA EH owner aware msleep
6360 *	@ap: ATA port to attribute the sleep to
6361 *	@msecs: duration to sleep in milliseconds
6362 *
6363 *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6364 *	ownership is released before going to sleep and reacquired
6365 *	after the sleep is complete.  IOW, other ports sharing the
6366 *	@ap->host will be allowed to own the EH while this task is
6367 *	sleeping.
6368 *
6369 *	LOCKING:
6370 *	Might sleep.
6371 */
6372void ata_msleep(struct ata_port *ap, unsigned int msecs)
6373{
6374	bool owns_eh = ap && ap->host->eh_owner == current;
6375
6376	if (owns_eh)
6377		ata_eh_release(ap);
6378
6379	if (msecs < 20) {
6380		unsigned long usecs = msecs * USEC_PER_MSEC;
6381		usleep_range(usecs, usecs + 50);
6382	} else {
6383		msleep(msecs);
6384	}
6385
6386	if (owns_eh)
6387		ata_eh_acquire(ap);
6388}
6389EXPORT_SYMBOL_GPL(ata_msleep);
6390
6391/**
6392 *	ata_wait_register - wait until register value changes
6393 *	@ap: ATA port to wait register for, can be NULL
6394 *	@reg: IO-mapped register
6395 *	@mask: Mask to apply to read register value
6396 *	@val: Wait condition
6397 *	@interval: polling interval in milliseconds
6398 *	@timeout: timeout in milliseconds
6399 *
6400 *	Waiting for some bits of register to change is a common
6401 *	operation for ATA controllers.  This function reads 32bit LE
6402 *	IO-mapped register @reg and tests for the following condition.
6403 *
6404 *	(*@reg & mask) != val
6405 *
6406 *	If the condition is met, it returns; otherwise, the process is
6407 *	repeated after @interval_msec until timeout.
6408 *
6409 *	LOCKING:
6410 *	Kernel thread context (may sleep)
6411 *
6412 *	RETURNS:
6413 *	The final register value.
6414 */
6415u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6416		      unsigned long interval, unsigned long timeout)
6417{
6418	unsigned long deadline;
6419	u32 tmp;
6420
6421	tmp = ioread32(reg);
6422
6423	/* Calculate timeout _after_ the first read to make sure
6424	 * preceding writes reach the controller before starting to
6425	 * eat away the timeout.
6426	 */
6427	deadline = ata_deadline(jiffies, timeout);
6428
6429	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6430		ata_msleep(ap, interval);
6431		tmp = ioread32(reg);
6432	}
6433
6434	return tmp;
6435}
6436EXPORT_SYMBOL_GPL(ata_wait_register);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6437
6438/*
6439 * Dummy port_ops
6440 */
6441static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6442{
6443	return AC_ERR_SYSTEM;
6444}
6445
6446static void ata_dummy_error_handler(struct ata_port *ap)
6447{
6448	/* truly dummy */
6449}
6450
6451struct ata_port_operations ata_dummy_port_ops = {
6452	.qc_prep		= ata_noop_qc_prep,
6453	.qc_issue		= ata_dummy_qc_issue,
6454	.error_handler		= ata_dummy_error_handler,
6455	.sched_eh		= ata_std_sched_eh,
6456	.end_eh			= ata_std_end_eh,
6457};
6458EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6459
6460const struct ata_port_info ata_dummy_port_info = {
6461	.port_ops		= &ata_dummy_port_ops,
6462};
6463EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6464
6465/*
6466 * Utility print functions
6467 */
6468void ata_port_printk(const struct ata_port *ap, const char *level,
6469		     const char *fmt, ...)
6470{
6471	struct va_format vaf;
6472	va_list args;
6473
6474	va_start(args, fmt);
6475
6476	vaf.fmt = fmt;
6477	vaf.va = &args;
6478
6479	printk("%sata%u: %pV", level, ap->print_id, &vaf);
6480
6481	va_end(args);
6482}
6483EXPORT_SYMBOL(ata_port_printk);
6484
6485void ata_link_printk(const struct ata_link *link, const char *level,
6486		     const char *fmt, ...)
6487{
6488	struct va_format vaf;
6489	va_list args;
6490
6491	va_start(args, fmt);
6492
6493	vaf.fmt = fmt;
6494	vaf.va = &args;
6495
6496	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6497		printk("%sata%u.%02u: %pV",
6498		       level, link->ap->print_id, link->pmp, &vaf);
6499	else
6500		printk("%sata%u: %pV",
6501		       level, link->ap->print_id, &vaf);
6502
6503	va_end(args);
6504}
6505EXPORT_SYMBOL(ata_link_printk);
6506
6507void ata_dev_printk(const struct ata_device *dev, const char *level,
6508		    const char *fmt, ...)
6509{
6510	struct va_format vaf;
6511	va_list args;
6512
6513	va_start(args, fmt);
6514
6515	vaf.fmt = fmt;
6516	vaf.va = &args;
6517
6518	printk("%sata%u.%02u: %pV",
6519	       level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6520	       &vaf);
6521
6522	va_end(args);
6523}
6524EXPORT_SYMBOL(ata_dev_printk);
6525
6526void ata_print_version(const struct device *dev, const char *version)
6527{
6528	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6529}
6530EXPORT_SYMBOL(ata_print_version);