Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v3.1
 
   1/*
   2 *  libata-core.c - helper library for ATA
   3 *
   4 *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
   5 *    		    Please ALWAYS copy linux-ide@vger.kernel.org
   6 *		    on emails.
   7 *
   8 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
   9 *  Copyright 2003-2004 Jeff Garzik
  10 *
  11 *
  12 *  This program is free software; you can redistribute it and/or modify
  13 *  it under the terms of the GNU General Public License as published by
  14 *  the Free Software Foundation; either version 2, or (at your option)
  15 *  any later version.
  16 *
  17 *  This program is distributed in the hope that it will be useful,
  18 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 *  GNU General Public License for more details.
  21 *
  22 *  You should have received a copy of the GNU General Public License
  23 *  along with this program; see the file COPYING.  If not, write to
  24 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25 *
  26 *
  27 *  libata documentation is available via 'make {ps|pdf}docs',
  28 *  as Documentation/DocBook/libata.*
  29 *
  30 *  Hardware documentation available from http://www.t13.org/ and
  31 *  http://www.sata-io.org/
  32 *
  33 *  Standards documents from:
  34 *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
  35 *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
  36 *	http://www.sata-io.org (SATA)
  37 *	http://www.compactflash.org (CF)
  38 *	http://www.qic.org (QIC157 - Tape and DSC)
  39 *	http://www.ce-ata.org (CE-ATA: not supported)
  40 *
 
 
 
 
  41 */
  42
  43#include <linux/kernel.h>
  44#include <linux/module.h>
  45#include <linux/pci.h>
  46#include <linux/init.h>
  47#include <linux/list.h>
  48#include <linux/mm.h>
  49#include <linux/spinlock.h>
  50#include <linux/blkdev.h>
  51#include <linux/delay.h>
  52#include <linux/timer.h>
 
  53#include <linux/interrupt.h>
  54#include <linux/completion.h>
  55#include <linux/suspend.h>
  56#include <linux/workqueue.h>
  57#include <linux/scatterlist.h>
  58#include <linux/io.h>
  59#include <linux/async.h>
  60#include <linux/log2.h>
  61#include <linux/slab.h>
 
  62#include <scsi/scsi.h>
  63#include <scsi/scsi_cmnd.h>
  64#include <scsi/scsi_host.h>
  65#include <linux/libata.h>
  66#include <asm/byteorder.h>
 
  67#include <linux/cdrom.h>
  68#include <linux/ratelimit.h>
 
 
 
 
 
 
 
  69
  70#include "libata.h"
  71#include "libata-transport.h"
  72
  73/* debounce timing parameters in msecs { interval, duration, timeout } */
  74const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
  75const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
  76const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
  77
  78const struct ata_port_operations ata_base_port_ops = {
  79	.prereset		= ata_std_prereset,
  80	.postreset		= ata_std_postreset,
  81	.error_handler		= ata_std_error_handler,
 
 
  82};
  83
  84const struct ata_port_operations sata_port_ops = {
  85	.inherits		= &ata_base_port_ops,
  86
  87	.qc_defer		= ata_std_qc_defer,
  88	.hardreset		= sata_std_hardreset,
  89};
 
  90
  91static unsigned int ata_dev_init_params(struct ata_device *dev,
  92					u16 heads, u16 sectors);
  93static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
  94static void ata_dev_xfermask(struct ata_device *dev);
  95static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
  96
  97unsigned int ata_print_id = 1;
  98
 
  99struct ata_force_param {
 100	const char	*name;
 101	unsigned int	cbl;
 102	int		spd_limit;
 103	unsigned long	xfer_mask;
 104	unsigned int	horkage_on;
 105	unsigned int	horkage_off;
 106	unsigned int	lflags;
 
 107};
 108
 109struct ata_force_ent {
 110	int			port;
 111	int			device;
 112	struct ata_force_param	param;
 113};
 114
 115static struct ata_force_ent *ata_force_tbl;
 116static int ata_force_tbl_size;
 117
 118static char ata_force_param_buf[PAGE_SIZE] __initdata;
 119/* param_buf is thrown away after initialization, disallow read */
 120module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
 121MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
 
 122
 123static int atapi_enabled = 1;
 124module_param(atapi_enabled, int, 0444);
 125MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
 126
 127static int atapi_dmadir = 0;
 128module_param(atapi_dmadir, int, 0444);
 129MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
 130
 131int atapi_passthru16 = 1;
 132module_param(atapi_passthru16, int, 0444);
 133MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
 134
 135int libata_fua = 0;
 136module_param_named(fua, libata_fua, int, 0444);
 137MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
 138
 139static int ata_ignore_hpa;
 140module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
 141MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
 142
 143static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
 144module_param_named(dma, libata_dma_mask, int, 0444);
 145MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
 146
 147static int ata_probe_timeout;
 148module_param(ata_probe_timeout, int, 0444);
 149MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
 150
 151int libata_noacpi = 0;
 152module_param_named(noacpi, libata_noacpi, int, 0444);
 153MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
 154
 155int libata_allow_tpm = 0;
 156module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
 157MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
 158
 159static int atapi_an;
 160module_param(atapi_an, int, 0444);
 161MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
 162
 163MODULE_AUTHOR("Jeff Garzik");
 164MODULE_DESCRIPTION("Library module for ATA devices");
 165MODULE_LICENSE("GPL");
 166MODULE_VERSION(DRV_VERSION);
 167
 
 
 
 
 
 
 168
 169static bool ata_sstatus_online(u32 sstatus)
 170{
 171	return (sstatus & 0xf) == 0x3;
 172}
 173
 174/**
 175 *	ata_link_next - link iteration helper
 176 *	@link: the previous link, NULL to start
 177 *	@ap: ATA port containing links to iterate
 178 *	@mode: iteration mode, one of ATA_LITER_*
 179 *
 180 *	LOCKING:
 181 *	Host lock or EH context.
 182 *
 183 *	RETURNS:
 184 *	Pointer to the next link.
 185 */
 186struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
 187			       enum ata_link_iter_mode mode)
 188{
 189	BUG_ON(mode != ATA_LITER_EDGE &&
 190	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
 191
 192	/* NULL link indicates start of iteration */
 193	if (!link)
 194		switch (mode) {
 195		case ATA_LITER_EDGE:
 196		case ATA_LITER_PMP_FIRST:
 197			if (sata_pmp_attached(ap))
 198				return ap->pmp_link;
 199			/* fall through */
 200		case ATA_LITER_HOST_FIRST:
 201			return &ap->link;
 202		}
 203
 204	/* we just iterated over the host link, what's next? */
 205	if (link == &ap->link)
 206		switch (mode) {
 207		case ATA_LITER_HOST_FIRST:
 208			if (sata_pmp_attached(ap))
 209				return ap->pmp_link;
 210			/* fall through */
 211		case ATA_LITER_PMP_FIRST:
 212			if (unlikely(ap->slave_link))
 213				return ap->slave_link;
 214			/* fall through */
 215		case ATA_LITER_EDGE:
 216			return NULL;
 217		}
 218
 219	/* slave_link excludes PMP */
 220	if (unlikely(link == ap->slave_link))
 221		return NULL;
 222
 223	/* we were over a PMP link */
 224	if (++link < ap->pmp_link + ap->nr_pmp_links)
 225		return link;
 226
 227	if (mode == ATA_LITER_PMP_FIRST)
 228		return &ap->link;
 229
 230	return NULL;
 231}
 
 232
 233/**
 234 *	ata_dev_next - device iteration helper
 235 *	@dev: the previous device, NULL to start
 236 *	@link: ATA link containing devices to iterate
 237 *	@mode: iteration mode, one of ATA_DITER_*
 238 *
 239 *	LOCKING:
 240 *	Host lock or EH context.
 241 *
 242 *	RETURNS:
 243 *	Pointer to the next device.
 244 */
 245struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
 246				enum ata_dev_iter_mode mode)
 247{
 248	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
 249	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
 250
 251	/* NULL dev indicates start of iteration */
 252	if (!dev)
 253		switch (mode) {
 254		case ATA_DITER_ENABLED:
 255		case ATA_DITER_ALL:
 256			dev = link->device;
 257			goto check;
 258		case ATA_DITER_ENABLED_REVERSE:
 259		case ATA_DITER_ALL_REVERSE:
 260			dev = link->device + ata_link_max_devices(link) - 1;
 261			goto check;
 262		}
 263
 264 next:
 265	/* move to the next one */
 266	switch (mode) {
 267	case ATA_DITER_ENABLED:
 268	case ATA_DITER_ALL:
 269		if (++dev < link->device + ata_link_max_devices(link))
 270			goto check;
 271		return NULL;
 272	case ATA_DITER_ENABLED_REVERSE:
 273	case ATA_DITER_ALL_REVERSE:
 274		if (--dev >= link->device)
 275			goto check;
 276		return NULL;
 277	}
 278
 279 check:
 280	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
 281	    !ata_dev_enabled(dev))
 282		goto next;
 283	return dev;
 284}
 
 285
 286/**
 287 *	ata_dev_phys_link - find physical link for a device
 288 *	@dev: ATA device to look up physical link for
 289 *
 290 *	Look up physical link which @dev is attached to.  Note that
 291 *	this is different from @dev->link only when @dev is on slave
 292 *	link.  For all other cases, it's the same as @dev->link.
 293 *
 294 *	LOCKING:
 295 *	Don't care.
 296 *
 297 *	RETURNS:
 298 *	Pointer to the found physical link.
 299 */
 300struct ata_link *ata_dev_phys_link(struct ata_device *dev)
 301{
 302	struct ata_port *ap = dev->link->ap;
 303
 304	if (!ap->slave_link)
 305		return dev->link;
 306	if (!dev->devno)
 307		return &ap->link;
 308	return ap->slave_link;
 309}
 310
 
 311/**
 312 *	ata_force_cbl - force cable type according to libata.force
 313 *	@ap: ATA port of interest
 314 *
 315 *	Force cable type according to libata.force and whine about it.
 316 *	The last entry which has matching port number is used, so it
 317 *	can be specified as part of device force parameters.  For
 318 *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
 319 *	same effect.
 320 *
 321 *	LOCKING:
 322 *	EH context.
 323 */
 324void ata_force_cbl(struct ata_port *ap)
 325{
 326	int i;
 327
 328	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 329		const struct ata_force_ent *fe = &ata_force_tbl[i];
 330
 331		if (fe->port != -1 && fe->port != ap->print_id)
 332			continue;
 333
 334		if (fe->param.cbl == ATA_CBL_NONE)
 335			continue;
 336
 337		ap->cbl = fe->param.cbl;
 338		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
 339		return;
 340	}
 341}
 342
 343/**
 344 *	ata_force_link_limits - force link limits according to libata.force
 345 *	@link: ATA link of interest
 346 *
 347 *	Force link flags and SATA spd limit according to libata.force
 348 *	and whine about it.  When only the port part is specified
 349 *	(e.g. 1:), the limit applies to all links connected to both
 350 *	the host link and all fan-out ports connected via PMP.  If the
 351 *	device part is specified as 0 (e.g. 1.00:), it specifies the
 352 *	first fan-out link not the host link.  Device number 15 always
 353 *	points to the host link whether PMP is attached or not.  If the
 354 *	controller has slave link, device number 16 points to it.
 355 *
 356 *	LOCKING:
 357 *	EH context.
 358 */
 359static void ata_force_link_limits(struct ata_link *link)
 360{
 361	bool did_spd = false;
 362	int linkno = link->pmp;
 363	int i;
 364
 365	if (ata_is_host_link(link))
 366		linkno += 15;
 367
 368	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 369		const struct ata_force_ent *fe = &ata_force_tbl[i];
 370
 371		if (fe->port != -1 && fe->port != link->ap->print_id)
 372			continue;
 373
 374		if (fe->device != -1 && fe->device != linkno)
 375			continue;
 376
 377		/* only honor the first spd limit */
 378		if (!did_spd && fe->param.spd_limit) {
 379			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
 380			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
 381					fe->param.name);
 382			did_spd = true;
 383		}
 384
 385		/* let lflags stack */
 386		if (fe->param.lflags) {
 387			link->flags |= fe->param.lflags;
 388			ata_link_notice(link,
 389					"FORCE: link flag 0x%x forced -> 0x%x\n",
 390					fe->param.lflags, link->flags);
 
 
 
 
 
 
 391		}
 392	}
 393}
 394
 395/**
 396 *	ata_force_xfermask - force xfermask according to libata.force
 397 *	@dev: ATA device of interest
 398 *
 399 *	Force xfer_mask according to libata.force and whine about it.
 400 *	For consistency with link selection, device number 15 selects
 401 *	the first device connected to the host link.
 402 *
 403 *	LOCKING:
 404 *	EH context.
 405 */
 406static void ata_force_xfermask(struct ata_device *dev)
 407{
 408	int devno = dev->link->pmp + dev->devno;
 409	int alt_devno = devno;
 410	int i;
 411
 412	/* allow n.15/16 for devices attached to host port */
 413	if (ata_is_host_link(dev->link))
 414		alt_devno += 15;
 415
 416	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 417		const struct ata_force_ent *fe = &ata_force_tbl[i];
 418		unsigned long pio_mask, mwdma_mask, udma_mask;
 419
 420		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
 421			continue;
 422
 423		if (fe->device != -1 && fe->device != devno &&
 424		    fe->device != alt_devno)
 425			continue;
 426
 427		if (!fe->param.xfer_mask)
 428			continue;
 429
 430		ata_unpack_xfermask(fe->param.xfer_mask,
 431				    &pio_mask, &mwdma_mask, &udma_mask);
 432		if (udma_mask)
 433			dev->udma_mask = udma_mask;
 434		else if (mwdma_mask) {
 435			dev->udma_mask = 0;
 436			dev->mwdma_mask = mwdma_mask;
 437		} else {
 438			dev->udma_mask = 0;
 439			dev->mwdma_mask = 0;
 440			dev->pio_mask = pio_mask;
 441		}
 442
 443		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
 444			       fe->param.name);
 445		return;
 446	}
 447}
 448
 449/**
 450 *	ata_force_horkage - force horkage according to libata.force
 451 *	@dev: ATA device of interest
 452 *
 453 *	Force horkage according to libata.force and whine about it.
 454 *	For consistency with link selection, device number 15 selects
 455 *	the first device connected to the host link.
 456 *
 457 *	LOCKING:
 458 *	EH context.
 459 */
 460static void ata_force_horkage(struct ata_device *dev)
 461{
 462	int devno = dev->link->pmp + dev->devno;
 463	int alt_devno = devno;
 464	int i;
 465
 466	/* allow n.15/16 for devices attached to host port */
 467	if (ata_is_host_link(dev->link))
 468		alt_devno += 15;
 469
 470	for (i = 0; i < ata_force_tbl_size; i++) {
 471		const struct ata_force_ent *fe = &ata_force_tbl[i];
 472
 473		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
 474			continue;
 475
 476		if (fe->device != -1 && fe->device != devno &&
 477		    fe->device != alt_devno)
 478			continue;
 479
 480		if (!(~dev->horkage & fe->param.horkage_on) &&
 481		    !(dev->horkage & fe->param.horkage_off))
 482			continue;
 483
 484		dev->horkage |= fe->param.horkage_on;
 485		dev->horkage &= ~fe->param.horkage_off;
 486
 487		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
 488			       fe->param.name);
 489	}
 490}
 
 
 
 
 
 491
 492/**
 493 *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
 494 *	@opcode: SCSI opcode
 495 *
 496 *	Determine ATAPI command type from @opcode.
 497 *
 498 *	LOCKING:
 499 *	None.
 500 *
 501 *	RETURNS:
 502 *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
 503 */
 504int atapi_cmd_type(u8 opcode)
 505{
 506	switch (opcode) {
 507	case GPCMD_READ_10:
 508	case GPCMD_READ_12:
 509		return ATAPI_READ;
 510
 511	case GPCMD_WRITE_10:
 512	case GPCMD_WRITE_12:
 513	case GPCMD_WRITE_AND_VERIFY_10:
 514		return ATAPI_WRITE;
 515
 516	case GPCMD_READ_CD:
 517	case GPCMD_READ_CD_MSF:
 518		return ATAPI_READ_CD;
 519
 520	case ATA_16:
 521	case ATA_12:
 522		if (atapi_passthru16)
 523			return ATAPI_PASS_THRU;
 524		/* fall thru */
 525	default:
 526		return ATAPI_MISC;
 527	}
 528}
 529
 530/**
 531 *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
 532 *	@tf: Taskfile to convert
 533 *	@pmp: Port multiplier port
 534 *	@is_cmd: This FIS is for command
 535 *	@fis: Buffer into which data will output
 536 *
 537 *	Converts a standard ATA taskfile to a Serial ATA
 538 *	FIS structure (Register - Host to Device).
 539 *
 540 *	LOCKING:
 541 *	Inherited from caller.
 542 */
 543void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
 544{
 545	fis[0] = 0x27;			/* Register - Host to Device FIS */
 546	fis[1] = pmp & 0xf;		/* Port multiplier number*/
 547	if (is_cmd)
 548		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
 549
 550	fis[2] = tf->command;
 551	fis[3] = tf->feature;
 552
 553	fis[4] = tf->lbal;
 554	fis[5] = tf->lbam;
 555	fis[6] = tf->lbah;
 556	fis[7] = tf->device;
 557
 558	fis[8] = tf->hob_lbal;
 559	fis[9] = tf->hob_lbam;
 560	fis[10] = tf->hob_lbah;
 561	fis[11] = tf->hob_feature;
 562
 563	fis[12] = tf->nsect;
 564	fis[13] = tf->hob_nsect;
 565	fis[14] = 0;
 566	fis[15] = tf->ctl;
 567
 568	fis[16] = 0;
 569	fis[17] = 0;
 570	fis[18] = 0;
 571	fis[19] = 0;
 572}
 573
 574/**
 575 *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
 576 *	@fis: Buffer from which data will be input
 577 *	@tf: Taskfile to output
 578 *
 579 *	Converts a serial ATA FIS structure to a standard ATA taskfile.
 580 *
 581 *	LOCKING:
 582 *	Inherited from caller.
 583 */
 584
 585void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
 586{
 587	tf->command	= fis[2];	/* status */
 588	tf->feature	= fis[3];	/* error */
 589
 590	tf->lbal	= fis[4];
 591	tf->lbam	= fis[5];
 592	tf->lbah	= fis[6];
 593	tf->device	= fis[7];
 594
 595	tf->hob_lbal	= fis[8];
 596	tf->hob_lbam	= fis[9];
 597	tf->hob_lbah	= fis[10];
 598
 599	tf->nsect	= fis[12];
 600	tf->hob_nsect	= fis[13];
 601}
 602
 603static const u8 ata_rw_cmds[] = {
 604	/* pio multi */
 605	ATA_CMD_READ_MULTI,
 606	ATA_CMD_WRITE_MULTI,
 607	ATA_CMD_READ_MULTI_EXT,
 608	ATA_CMD_WRITE_MULTI_EXT,
 609	0,
 610	0,
 611	0,
 612	ATA_CMD_WRITE_MULTI_FUA_EXT,
 613	/* pio */
 614	ATA_CMD_PIO_READ,
 615	ATA_CMD_PIO_WRITE,
 616	ATA_CMD_PIO_READ_EXT,
 617	ATA_CMD_PIO_WRITE_EXT,
 618	0,
 619	0,
 620	0,
 621	0,
 622	/* dma */
 623	ATA_CMD_READ,
 624	ATA_CMD_WRITE,
 625	ATA_CMD_READ_EXT,
 626	ATA_CMD_WRITE_EXT,
 627	0,
 628	0,
 629	0,
 630	ATA_CMD_WRITE_FUA_EXT
 631};
 632
 633/**
 634 *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
 635 *	@tf: command to examine and configure
 636 *	@dev: device tf belongs to
 637 *
 638 *	Examine the device configuration and tf->flags to calculate
 639 *	the proper read/write commands and protocol to use.
 640 *
 641 *	LOCKING:
 642 *	caller.
 643 */
 644static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
 
 645{
 646	u8 cmd;
 647
 648	int index, fua, lba48, write;
 649
 650	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
 651	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
 652	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
 653
 654	if (dev->flags & ATA_DFLAG_PIO) {
 655		tf->protocol = ATA_PROT_PIO;
 656		index = dev->multi_count ? 0 : 8;
 657	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
 658		/* Unable to use DMA due to host limitation */
 659		tf->protocol = ATA_PROT_PIO;
 660		index = dev->multi_count ? 0 : 8;
 661	} else {
 662		tf->protocol = ATA_PROT_DMA;
 663		index = 16;
 664	}
 665
 666	cmd = ata_rw_cmds[index + fua + lba48 + write];
 667	if (cmd) {
 668		tf->command = cmd;
 669		return 0;
 670	}
 671	return -1;
 
 672}
 673
 674/**
 675 *	ata_tf_read_block - Read block address from ATA taskfile
 676 *	@tf: ATA taskfile of interest
 677 *	@dev: ATA device @tf belongs to
 678 *
 679 *	LOCKING:
 680 *	None.
 681 *
 682 *	Read block address from @tf.  This function can handle all
 683 *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
 684 *	flags select the address format to use.
 685 *
 686 *	RETURNS:
 687 *	Block address read from @tf.
 688 */
 689u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
 690{
 691	u64 block = 0;
 692
 693	if (tf->flags & ATA_TFLAG_LBA) {
 694		if (tf->flags & ATA_TFLAG_LBA48) {
 695			block |= (u64)tf->hob_lbah << 40;
 696			block |= (u64)tf->hob_lbam << 32;
 697			block |= (u64)tf->hob_lbal << 24;
 698		} else
 699			block |= (tf->device & 0xf) << 24;
 700
 701		block |= tf->lbah << 16;
 702		block |= tf->lbam << 8;
 703		block |= tf->lbal;
 704	} else {
 705		u32 cyl, head, sect;
 706
 707		cyl = tf->lbam | (tf->lbah << 8);
 708		head = tf->device & 0xf;
 709		sect = tf->lbal;
 710
 711		if (!sect) {
 712			ata_dev_warn(dev,
 713				     "device reported invalid CHS sector 0\n");
 714			sect = 1; /* oh well */
 715		}
 716
 717		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
 718	}
 719
 720	return block;
 721}
 722
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 723/**
 724 *	ata_build_rw_tf - Build ATA taskfile for given read/write request
 725 *	@tf: Target ATA taskfile
 726 *	@dev: ATA device @tf belongs to
 727 *	@block: Block address
 728 *	@n_block: Number of blocks
 729 *	@tf_flags: RW/FUA etc...
 730 *	@tag: tag
 
 731 *
 732 *	LOCKING:
 733 *	None.
 734 *
 735 *	Build ATA taskfile @tf for read/write request described by
 736 *	@block, @n_block, @tf_flags and @tag on @dev.
 737 *
 738 *	RETURNS:
 739 *
 740 *	0 on success, -ERANGE if the request is too large for @dev,
 741 *	-EINVAL if the request is invalid.
 742 */
 743int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
 744		    u64 block, u32 n_block, unsigned int tf_flags,
 745		    unsigned int tag)
 746{
 
 
 
 747	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 748	tf->flags |= tf_flags;
 749
 750	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
 751		/* yay, NCQ */
 752		if (!lba_48_ok(block, n_block))
 753			return -ERANGE;
 754
 755		tf->protocol = ATA_PROT_NCQ;
 756		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
 757
 758		if (tf->flags & ATA_TFLAG_WRITE)
 759			tf->command = ATA_CMD_FPDMA_WRITE;
 760		else
 761			tf->command = ATA_CMD_FPDMA_READ;
 762
 763		tf->nsect = tag << 3;
 764		tf->hob_feature = (n_block >> 8) & 0xff;
 765		tf->feature = n_block & 0xff;
 766
 767		tf->hob_lbah = (block >> 40) & 0xff;
 768		tf->hob_lbam = (block >> 32) & 0xff;
 769		tf->hob_lbal = (block >> 24) & 0xff;
 770		tf->lbah = (block >> 16) & 0xff;
 771		tf->lbam = (block >> 8) & 0xff;
 772		tf->lbal = block & 0xff;
 773
 774		tf->device = 1 << 6;
 775		if (tf->flags & ATA_TFLAG_FUA)
 776			tf->device |= 1 << 7;
 
 
 
 
 
 
 
 
 777	} else if (dev->flags & ATA_DFLAG_LBA) {
 778		tf->flags |= ATA_TFLAG_LBA;
 779
 780		if (lba_28_ok(block, n_block)) {
 
 
 
 
 
 
 781			/* use LBA28 */
 782			tf->device |= (block >> 24) & 0xf;
 783		} else if (lba_48_ok(block, n_block)) {
 784			if (!(dev->flags & ATA_DFLAG_LBA48))
 785				return -ERANGE;
 786
 787			/* use LBA48 */
 788			tf->flags |= ATA_TFLAG_LBA48;
 789
 790			tf->hob_nsect = (n_block >> 8) & 0xff;
 791
 792			tf->hob_lbah = (block >> 40) & 0xff;
 793			tf->hob_lbam = (block >> 32) & 0xff;
 794			tf->hob_lbal = (block >> 24) & 0xff;
 795		} else
 796			/* request too large even for LBA48 */
 797			return -ERANGE;
 
 798
 799		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
 800			return -EINVAL;
 801
 802		tf->nsect = n_block & 0xff;
 803
 804		tf->lbah = (block >> 16) & 0xff;
 805		tf->lbam = (block >> 8) & 0xff;
 806		tf->lbal = block & 0xff;
 807
 808		tf->device |= ATA_LBA;
 809	} else {
 810		/* CHS */
 811		u32 sect, head, cyl, track;
 812
 813		/* The request -may- be too large for CHS addressing. */
 814		if (!lba_28_ok(block, n_block))
 815			return -ERANGE;
 816
 817		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
 818			return -EINVAL;
 819
 820		/* Convert LBA to CHS */
 821		track = (u32)block / dev->sectors;
 822		cyl   = track / dev->heads;
 823		head  = track % dev->heads;
 824		sect  = (u32)block % dev->sectors + 1;
 825
 826		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
 827			(u32)block, track, cyl, head, sect);
 828
 829		/* Check whether the converted CHS can fit.
 830		   Cylinder: 0-65535
 831		   Head: 0-15
 832		   Sector: 1-255*/
 833		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
 834			return -ERANGE;
 835
 836		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
 837		tf->lbal = sect;
 838		tf->lbam = cyl;
 839		tf->lbah = cyl >> 8;
 840		tf->device |= head;
 841	}
 842
 843	return 0;
 844}
 845
 846/**
 847 *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
 848 *	@pio_mask: pio_mask
 849 *	@mwdma_mask: mwdma_mask
 850 *	@udma_mask: udma_mask
 851 *
 852 *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
 853 *	unsigned int xfer_mask.
 854 *
 855 *	LOCKING:
 856 *	None.
 857 *
 858 *	RETURNS:
 859 *	Packed xfer_mask.
 860 */
 861unsigned long ata_pack_xfermask(unsigned long pio_mask,
 862				unsigned long mwdma_mask,
 863				unsigned long udma_mask)
 864{
 865	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
 866		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
 867		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
 868}
 
 869
 870/**
 871 *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
 872 *	@xfer_mask: xfer_mask to unpack
 873 *	@pio_mask: resulting pio_mask
 874 *	@mwdma_mask: resulting mwdma_mask
 875 *	@udma_mask: resulting udma_mask
 876 *
 877 *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
 878 *	Any NULL distination masks will be ignored.
 879 */
 880void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
 881			 unsigned long *mwdma_mask, unsigned long *udma_mask)
 882{
 883	if (pio_mask)
 884		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
 885	if (mwdma_mask)
 886		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
 887	if (udma_mask)
 888		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
 889}
 890
 891static const struct ata_xfer_ent {
 892	int shift, bits;
 893	u8 base;
 894} ata_xfer_tbl[] = {
 895	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
 896	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
 897	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
 898	{ -1, },
 899};
 900
 901/**
 902 *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
 903 *	@xfer_mask: xfer_mask of interest
 904 *
 905 *	Return matching XFER_* value for @xfer_mask.  Only the highest
 906 *	bit of @xfer_mask is considered.
 907 *
 908 *	LOCKING:
 909 *	None.
 910 *
 911 *	RETURNS:
 912 *	Matching XFER_* value, 0xff if no match found.
 913 */
 914u8 ata_xfer_mask2mode(unsigned long xfer_mask)
 915{
 916	int highbit = fls(xfer_mask) - 1;
 917	const struct ata_xfer_ent *ent;
 918
 919	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 920		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
 921			return ent->base + highbit - ent->shift;
 922	return 0xff;
 923}
 
 924
 925/**
 926 *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
 927 *	@xfer_mode: XFER_* of interest
 928 *
 929 *	Return matching xfer_mask for @xfer_mode.
 930 *
 931 *	LOCKING:
 932 *	None.
 933 *
 934 *	RETURNS:
 935 *	Matching xfer_mask, 0 if no match found.
 936 */
 937unsigned long ata_xfer_mode2mask(u8 xfer_mode)
 938{
 939	const struct ata_xfer_ent *ent;
 940
 941	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 942		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
 943			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
 944				& ~((1 << ent->shift) - 1);
 945	return 0;
 946}
 
 947
 948/**
 949 *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
 950 *	@xfer_mode: XFER_* of interest
 951 *
 952 *	Return matching xfer_shift for @xfer_mode.
 953 *
 954 *	LOCKING:
 955 *	None.
 956 *
 957 *	RETURNS:
 958 *	Matching xfer_shift, -1 if no match found.
 959 */
 960int ata_xfer_mode2shift(unsigned long xfer_mode)
 961{
 962	const struct ata_xfer_ent *ent;
 963
 964	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 965		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
 966			return ent->shift;
 967	return -1;
 968}
 
 969
 970/**
 971 *	ata_mode_string - convert xfer_mask to string
 972 *	@xfer_mask: mask of bits supported; only highest bit counts.
 973 *
 974 *	Determine string which represents the highest speed
 975 *	(highest bit in @modemask).
 976 *
 977 *	LOCKING:
 978 *	None.
 979 *
 980 *	RETURNS:
 981 *	Constant C string representing highest speed listed in
 982 *	@mode_mask, or the constant C string "<n/a>".
 983 */
 984const char *ata_mode_string(unsigned long xfer_mask)
 985{
 986	static const char * const xfer_mode_str[] = {
 987		"PIO0",
 988		"PIO1",
 989		"PIO2",
 990		"PIO3",
 991		"PIO4",
 992		"PIO5",
 993		"PIO6",
 994		"MWDMA0",
 995		"MWDMA1",
 996		"MWDMA2",
 997		"MWDMA3",
 998		"MWDMA4",
 999		"UDMA/16",
1000		"UDMA/25",
1001		"UDMA/33",
1002		"UDMA/44",
1003		"UDMA/66",
1004		"UDMA/100",
1005		"UDMA/133",
1006		"UDMA7",
1007	};
1008	int highbit;
1009
1010	highbit = fls(xfer_mask) - 1;
1011	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1012		return xfer_mode_str[highbit];
1013	return "<n/a>";
1014}
 
1015
1016const char *sata_spd_string(unsigned int spd)
1017{
1018	static const char * const spd_str[] = {
1019		"1.5 Gbps",
1020		"3.0 Gbps",
1021		"6.0 Gbps",
1022	};
1023
1024	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1025		return "<unknown>";
1026	return spd_str[spd - 1];
1027}
1028
1029/**
1030 *	ata_dev_classify - determine device type based on ATA-spec signature
1031 *	@tf: ATA taskfile register set for device to be identified
1032 *
1033 *	Determine from taskfile register contents whether a device is
1034 *	ATA or ATAPI, as per "Signature and persistence" section
1035 *	of ATA/PI spec (volume 1, sect 5.14).
1036 *
1037 *	LOCKING:
1038 *	None.
1039 *
1040 *	RETURNS:
1041 *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1042 *	%ATA_DEV_UNKNOWN the event of failure.
1043 */
1044unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1045{
1046	/* Apple's open source Darwin code hints that some devices only
1047	 * put a proper signature into the LBA mid/high registers,
1048	 * So, we only check those.  It's sufficient for uniqueness.
1049	 *
1050	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1051	 * signatures for ATA and ATAPI devices attached on SerialATA,
1052	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1053	 * spec has never mentioned about using different signatures
1054	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1055	 * Multiplier specification began to use 0x69/0x96 to identify
1056	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1057	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1058	 * 0x69/0x96 shortly and described them as reserved for
1059	 * SerialATA.
1060	 *
1061	 * We follow the current spec and consider that 0x69/0x96
1062	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1063	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1064	 * SEMB signature.  This is worked around in
1065	 * ata_dev_read_id().
1066	 */
1067	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1068		DPRINTK("found ATA device by sig\n");
1069		return ATA_DEV_ATA;
1070	}
1071
1072	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1073		DPRINTK("found ATAPI device by sig\n");
1074		return ATA_DEV_ATAPI;
1075	}
1076
1077	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1078		DPRINTK("found PMP device by sig\n");
1079		return ATA_DEV_PMP;
1080	}
1081
1082	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1083		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1084		return ATA_DEV_SEMB;
1085	}
1086
1087	DPRINTK("unknown device\n");
 
 
1088	return ATA_DEV_UNKNOWN;
1089}
 
1090
1091/**
1092 *	ata_id_string - Convert IDENTIFY DEVICE page into string
1093 *	@id: IDENTIFY DEVICE results we will examine
1094 *	@s: string into which data is output
1095 *	@ofs: offset into identify device page
1096 *	@len: length of string to return. must be an even number.
1097 *
1098 *	The strings in the IDENTIFY DEVICE page are broken up into
1099 *	16-bit chunks.  Run through the string, and output each
1100 *	8-bit chunk linearly, regardless of platform.
1101 *
1102 *	LOCKING:
1103 *	caller.
1104 */
1105
1106void ata_id_string(const u16 *id, unsigned char *s,
1107		   unsigned int ofs, unsigned int len)
1108{
1109	unsigned int c;
1110
1111	BUG_ON(len & 1);
1112
1113	while (len > 0) {
1114		c = id[ofs] >> 8;
1115		*s = c;
1116		s++;
1117
1118		c = id[ofs] & 0xff;
1119		*s = c;
1120		s++;
1121
1122		ofs++;
1123		len -= 2;
1124	}
1125}
 
1126
1127/**
1128 *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1129 *	@id: IDENTIFY DEVICE results we will examine
1130 *	@s: string into which data is output
1131 *	@ofs: offset into identify device page
1132 *	@len: length of string to return. must be an odd number.
1133 *
1134 *	This function is identical to ata_id_string except that it
1135 *	trims trailing spaces and terminates the resulting string with
1136 *	null.  @len must be actual maximum length (even number) + 1.
1137 *
1138 *	LOCKING:
1139 *	caller.
1140 */
1141void ata_id_c_string(const u16 *id, unsigned char *s,
1142		     unsigned int ofs, unsigned int len)
1143{
1144	unsigned char *p;
1145
1146	ata_id_string(id, s, ofs, len - 1);
1147
1148	p = s + strnlen(s, len - 1);
1149	while (p > s && p[-1] == ' ')
1150		p--;
1151	*p = '\0';
1152}
 
1153
1154static u64 ata_id_n_sectors(const u16 *id)
1155{
1156	if (ata_id_has_lba(id)) {
1157		if (ata_id_has_lba48(id))
1158			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1159		else
1160			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1161	} else {
1162		if (ata_id_current_chs_valid(id))
1163			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1164			       id[ATA_ID_CUR_SECTORS];
1165		else
1166			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1167			       id[ATA_ID_SECTORS];
1168	}
 
 
 
 
 
 
 
1169}
1170
1171u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1172{
1173	u64 sectors = 0;
1174
1175	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1176	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1177	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1178	sectors |= (tf->lbah & 0xff) << 16;
1179	sectors |= (tf->lbam & 0xff) << 8;
1180	sectors |= (tf->lbal & 0xff);
1181
1182	return sectors;
1183}
1184
1185u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1186{
1187	u64 sectors = 0;
1188
1189	sectors |= (tf->device & 0x0f) << 24;
1190	sectors |= (tf->lbah & 0xff) << 16;
1191	sectors |= (tf->lbam & 0xff) << 8;
1192	sectors |= (tf->lbal & 0xff);
1193
1194	return sectors;
1195}
1196
1197/**
1198 *	ata_read_native_max_address - Read native max address
1199 *	@dev: target device
1200 *	@max_sectors: out parameter for the result native max address
1201 *
1202 *	Perform an LBA48 or LBA28 native size query upon the device in
1203 *	question.
1204 *
1205 *	RETURNS:
1206 *	0 on success, -EACCES if command is aborted by the drive.
1207 *	-EIO on other errors.
1208 */
1209static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1210{
1211	unsigned int err_mask;
1212	struct ata_taskfile tf;
1213	int lba48 = ata_id_has_lba48(dev->id);
1214
1215	ata_tf_init(dev, &tf);
1216
1217	/* always clear all address registers */
1218	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1219
1220	if (lba48) {
1221		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1222		tf.flags |= ATA_TFLAG_LBA48;
1223	} else
1224		tf.command = ATA_CMD_READ_NATIVE_MAX;
1225
1226	tf.protocol |= ATA_PROT_NODATA;
1227	tf.device |= ATA_LBA;
1228
1229	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1230	if (err_mask) {
1231		ata_dev_warn(dev,
1232			     "failed to read native max address (err_mask=0x%x)\n",
1233			     err_mask);
1234		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1235			return -EACCES;
1236		return -EIO;
1237	}
1238
1239	if (lba48)
1240		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1241	else
1242		*max_sectors = ata_tf_to_lba(&tf) + 1;
1243	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1244		(*max_sectors)--;
1245	return 0;
1246}
1247
1248/**
1249 *	ata_set_max_sectors - Set max sectors
1250 *	@dev: target device
1251 *	@new_sectors: new max sectors value to set for the device
1252 *
1253 *	Set max sectors of @dev to @new_sectors.
1254 *
1255 *	RETURNS:
1256 *	0 on success, -EACCES if command is aborted or denied (due to
1257 *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1258 *	errors.
1259 */
1260static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1261{
1262	unsigned int err_mask;
1263	struct ata_taskfile tf;
1264	int lba48 = ata_id_has_lba48(dev->id);
1265
1266	new_sectors--;
1267
1268	ata_tf_init(dev, &tf);
1269
1270	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1271
1272	if (lba48) {
1273		tf.command = ATA_CMD_SET_MAX_EXT;
1274		tf.flags |= ATA_TFLAG_LBA48;
1275
1276		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1277		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1278		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1279	} else {
1280		tf.command = ATA_CMD_SET_MAX;
1281
1282		tf.device |= (new_sectors >> 24) & 0xf;
1283	}
1284
1285	tf.protocol |= ATA_PROT_NODATA;
1286	tf.device |= ATA_LBA;
1287
1288	tf.lbal = (new_sectors >> 0) & 0xff;
1289	tf.lbam = (new_sectors >> 8) & 0xff;
1290	tf.lbah = (new_sectors >> 16) & 0xff;
1291
1292	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1293	if (err_mask) {
1294		ata_dev_warn(dev,
1295			     "failed to set max address (err_mask=0x%x)\n",
1296			     err_mask);
1297		if (err_mask == AC_ERR_DEV &&
1298		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1299			return -EACCES;
1300		return -EIO;
1301	}
1302
1303	return 0;
1304}
1305
1306/**
1307 *	ata_hpa_resize		-	Resize a device with an HPA set
1308 *	@dev: Device to resize
1309 *
1310 *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1311 *	it if required to the full size of the media. The caller must check
1312 *	the drive has the HPA feature set enabled.
1313 *
1314 *	RETURNS:
1315 *	0 on success, -errno on failure.
1316 */
1317static int ata_hpa_resize(struct ata_device *dev)
1318{
1319	struct ata_eh_context *ehc = &dev->link->eh_context;
1320	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1321	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1322	u64 sectors = ata_id_n_sectors(dev->id);
1323	u64 native_sectors;
1324	int rc;
1325
1326	/* do we need to do it? */
1327	if (dev->class != ATA_DEV_ATA ||
1328	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1329	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1330		return 0;
1331
1332	/* read native max address */
1333	rc = ata_read_native_max_address(dev, &native_sectors);
1334	if (rc) {
1335		/* If device aborted the command or HPA isn't going to
1336		 * be unlocked, skip HPA resizing.
1337		 */
1338		if (rc == -EACCES || !unlock_hpa) {
1339			ata_dev_warn(dev,
1340				     "HPA support seems broken, skipping HPA handling\n");
1341			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1342
1343			/* we can continue if device aborted the command */
1344			if (rc == -EACCES)
1345				rc = 0;
1346		}
1347
1348		return rc;
1349	}
1350	dev->n_native_sectors = native_sectors;
1351
1352	/* nothing to do? */
1353	if (native_sectors <= sectors || !unlock_hpa) {
1354		if (!print_info || native_sectors == sectors)
1355			return 0;
1356
1357		if (native_sectors > sectors)
1358			ata_dev_info(dev,
1359				"HPA detected: current %llu, native %llu\n",
1360				(unsigned long long)sectors,
1361				(unsigned long long)native_sectors);
1362		else if (native_sectors < sectors)
1363			ata_dev_warn(dev,
1364				"native sectors (%llu) is smaller than sectors (%llu)\n",
1365				(unsigned long long)native_sectors,
1366				(unsigned long long)sectors);
1367		return 0;
1368	}
1369
1370	/* let's unlock HPA */
1371	rc = ata_set_max_sectors(dev, native_sectors);
1372	if (rc == -EACCES) {
1373		/* if device aborted the command, skip HPA resizing */
1374		ata_dev_warn(dev,
1375			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1376			     (unsigned long long)sectors,
1377			     (unsigned long long)native_sectors);
1378		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1379		return 0;
1380	} else if (rc)
1381		return rc;
1382
1383	/* re-read IDENTIFY data */
1384	rc = ata_dev_reread_id(dev, 0);
1385	if (rc) {
1386		ata_dev_err(dev,
1387			    "failed to re-read IDENTIFY data after HPA resizing\n");
1388		return rc;
1389	}
1390
1391	if (print_info) {
1392		u64 new_sectors = ata_id_n_sectors(dev->id);
1393		ata_dev_info(dev,
1394			"HPA unlocked: %llu -> %llu, native %llu\n",
1395			(unsigned long long)sectors,
1396			(unsigned long long)new_sectors,
1397			(unsigned long long)native_sectors);
1398	}
1399
1400	return 0;
1401}
1402
1403/**
1404 *	ata_dump_id - IDENTIFY DEVICE info debugging output
 
1405 *	@id: IDENTIFY DEVICE page to dump
1406 *
1407 *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1408 *	page.
1409 *
1410 *	LOCKING:
1411 *	caller.
1412 */
1413
1414static inline void ata_dump_id(const u16 *id)
1415{
1416	DPRINTK("49==0x%04x  "
1417		"53==0x%04x  "
1418		"63==0x%04x  "
1419		"64==0x%04x  "
1420		"75==0x%04x  \n",
1421		id[49],
1422		id[53],
1423		id[63],
1424		id[64],
1425		id[75]);
1426	DPRINTK("80==0x%04x  "
1427		"81==0x%04x  "
1428		"82==0x%04x  "
1429		"83==0x%04x  "
1430		"84==0x%04x  \n",
1431		id[80],
1432		id[81],
1433		id[82],
1434		id[83],
1435		id[84]);
1436	DPRINTK("88==0x%04x  "
1437		"93==0x%04x\n",
1438		id[88],
1439		id[93]);
1440}
1441
1442/**
1443 *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1444 *	@id: IDENTIFY data to compute xfer mask from
1445 *
1446 *	Compute the xfermask for this device. This is not as trivial
1447 *	as it seems if we must consider early devices correctly.
1448 *
1449 *	FIXME: pre IDE drive timing (do we care ?).
1450 *
1451 *	LOCKING:
1452 *	None.
1453 *
1454 *	RETURNS:
1455 *	Computed xfermask
1456 */
1457unsigned long ata_id_xfermask(const u16 *id)
1458{
1459	unsigned long pio_mask, mwdma_mask, udma_mask;
1460
1461	/* Usual case. Word 53 indicates word 64 is valid */
1462	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1463		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1464		pio_mask <<= 3;
1465		pio_mask |= 0x7;
1466	} else {
1467		/* If word 64 isn't valid then Word 51 high byte holds
1468		 * the PIO timing number for the maximum. Turn it into
1469		 * a mask.
1470		 */
1471		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1472		if (mode < 5)	/* Valid PIO range */
1473			pio_mask = (2 << mode) - 1;
1474		else
1475			pio_mask = 1;
1476
1477		/* But wait.. there's more. Design your standards by
1478		 * committee and you too can get a free iordy field to
1479		 * process. However its the speeds not the modes that
1480		 * are supported... Note drivers using the timing API
1481		 * will get this right anyway
1482		 */
1483	}
1484
1485	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1486
1487	if (ata_id_is_cfa(id)) {
1488		/*
1489		 *	Process compact flash extended modes
1490		 */
1491		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1492		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1493
1494		if (pio)
1495			pio_mask |= (1 << 5);
1496		if (pio > 1)
1497			pio_mask |= (1 << 6);
1498		if (dma)
1499			mwdma_mask |= (1 << 3);
1500		if (dma > 1)
1501			mwdma_mask |= (1 << 4);
1502	}
1503
1504	udma_mask = 0;
1505	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1506		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1507
1508	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1509}
 
1510
1511static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1512{
1513	struct completion *waiting = qc->private_data;
1514
1515	complete(waiting);
1516}
1517
1518/**
1519 *	ata_exec_internal_sg - execute libata internal command
1520 *	@dev: Device to which the command is sent
1521 *	@tf: Taskfile registers for the command and the result
1522 *	@cdb: CDB for packet command
1523 *	@dma_dir: Data tranfer direction of the command
1524 *	@sgl: sg list for the data buffer of the command
1525 *	@n_elem: Number of sg entries
1526 *	@timeout: Timeout in msecs (0 for default)
1527 *
1528 *	Executes libata internal command with timeout.  @tf contains
1529 *	command on entry and result on return.  Timeout and error
1530 *	conditions are reported via return value.  No recovery action
1531 *	is taken after a command times out.  It's caller's duty to
1532 *	clean up after timeout.
1533 *
1534 *	LOCKING:
1535 *	None.  Should be called with kernel context, might sleep.
1536 *
1537 *	RETURNS:
1538 *	Zero on success, AC_ERR_* mask on failure
1539 */
1540unsigned ata_exec_internal_sg(struct ata_device *dev,
1541			      struct ata_taskfile *tf, const u8 *cdb,
1542			      int dma_dir, struct scatterlist *sgl,
1543			      unsigned int n_elem, unsigned long timeout)
1544{
1545	struct ata_link *link = dev->link;
1546	struct ata_port *ap = link->ap;
1547	u8 command = tf->command;
1548	int auto_timeout = 0;
1549	struct ata_queued_cmd *qc;
1550	unsigned int tag, preempted_tag;
1551	u32 preempted_sactive, preempted_qc_active;
 
1552	int preempted_nr_active_links;
1553	DECLARE_COMPLETION_ONSTACK(wait);
1554	unsigned long flags;
1555	unsigned int err_mask;
1556	int rc;
1557
1558	spin_lock_irqsave(ap->lock, flags);
1559
1560	/* no internal command while frozen */
1561	if (ap->pflags & ATA_PFLAG_FROZEN) {
1562		spin_unlock_irqrestore(ap->lock, flags);
1563		return AC_ERR_SYSTEM;
1564	}
1565
1566	/* initialize internal qc */
 
1567
1568	/* XXX: Tag 0 is used for drivers with legacy EH as some
1569	 * drivers choke if any other tag is given.  This breaks
1570	 * ata_tag_internal() test for those drivers.  Don't use new
1571	 * EH stuff without converting to it.
1572	 */
1573	if (ap->ops->error_handler)
1574		tag = ATA_TAG_INTERNAL;
1575	else
1576		tag = 0;
1577
1578	if (test_and_set_bit(tag, &ap->qc_allocated))
1579		BUG();
1580	qc = __ata_qc_from_tag(ap, tag);
1581
1582	qc->tag = tag;
1583	qc->scsicmd = NULL;
1584	qc->ap = ap;
1585	qc->dev = dev;
1586	ata_qc_reinit(qc);
1587
1588	preempted_tag = link->active_tag;
1589	preempted_sactive = link->sactive;
1590	preempted_qc_active = ap->qc_active;
1591	preempted_nr_active_links = ap->nr_active_links;
1592	link->active_tag = ATA_TAG_POISON;
1593	link->sactive = 0;
1594	ap->qc_active = 0;
1595	ap->nr_active_links = 0;
1596
1597	/* prepare & issue qc */
1598	qc->tf = *tf;
1599	if (cdb)
1600		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
 
 
 
 
 
 
1601	qc->flags |= ATA_QCFLAG_RESULT_TF;
1602	qc->dma_dir = dma_dir;
1603	if (dma_dir != DMA_NONE) {
1604		unsigned int i, buflen = 0;
1605		struct scatterlist *sg;
1606
1607		for_each_sg(sgl, sg, n_elem, i)
1608			buflen += sg->length;
1609
1610		ata_sg_init(qc, sgl, n_elem);
1611		qc->nbytes = buflen;
1612	}
1613
1614	qc->private_data = &wait;
1615	qc->complete_fn = ata_qc_complete_internal;
1616
1617	ata_qc_issue(qc);
1618
1619	spin_unlock_irqrestore(ap->lock, flags);
1620
1621	if (!timeout) {
1622		if (ata_probe_timeout)
1623			timeout = ata_probe_timeout * 1000;
1624		else {
1625			timeout = ata_internal_cmd_timeout(dev, command);
1626			auto_timeout = 1;
1627		}
1628	}
1629
1630	if (ap->ops->error_handler)
1631		ata_eh_release(ap);
1632
1633	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1634
1635	if (ap->ops->error_handler)
1636		ata_eh_acquire(ap);
1637
1638	ata_sff_flush_pio_task(ap);
1639
1640	if (!rc) {
1641		spin_lock_irqsave(ap->lock, flags);
1642
1643		/* We're racing with irq here.  If we lose, the
1644		 * following test prevents us from completing the qc
1645		 * twice.  If we win, the port is frozen and will be
1646		 * cleaned up by ->post_internal_cmd().
1647		 */
1648		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1649			qc->err_mask |= AC_ERR_TIMEOUT;
1650
1651			if (ap->ops->error_handler)
1652				ata_port_freeze(ap);
1653			else
1654				ata_qc_complete(qc);
1655
1656			if (ata_msg_warn(ap))
1657				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1658					     command);
1659		}
1660
1661		spin_unlock_irqrestore(ap->lock, flags);
1662	}
1663
1664	/* do post_internal_cmd */
1665	if (ap->ops->post_internal_cmd)
1666		ap->ops->post_internal_cmd(qc);
1667
1668	/* perform minimal error analysis */
1669	if (qc->flags & ATA_QCFLAG_FAILED) {
1670		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1671			qc->err_mask |= AC_ERR_DEV;
1672
1673		if (!qc->err_mask)
1674			qc->err_mask |= AC_ERR_OTHER;
1675
1676		if (qc->err_mask & ~AC_ERR_OTHER)
1677			qc->err_mask &= ~AC_ERR_OTHER;
 
 
1678	}
1679
1680	/* finish up */
1681	spin_lock_irqsave(ap->lock, flags);
1682
1683	*tf = qc->result_tf;
1684	err_mask = qc->err_mask;
1685
1686	ata_qc_free(qc);
1687	link->active_tag = preempted_tag;
1688	link->sactive = preempted_sactive;
1689	ap->qc_active = preempted_qc_active;
1690	ap->nr_active_links = preempted_nr_active_links;
1691
1692	spin_unlock_irqrestore(ap->lock, flags);
1693
1694	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1695		ata_internal_cmd_timed_out(dev, command);
1696
1697	return err_mask;
1698}
1699
1700/**
1701 *	ata_exec_internal - execute libata internal command
1702 *	@dev: Device to which the command is sent
1703 *	@tf: Taskfile registers for the command and the result
1704 *	@cdb: CDB for packet command
1705 *	@dma_dir: Data tranfer direction of the command
1706 *	@buf: Data buffer of the command
1707 *	@buflen: Length of data buffer
1708 *	@timeout: Timeout in msecs (0 for default)
1709 *
1710 *	Wrapper around ata_exec_internal_sg() which takes simple
1711 *	buffer instead of sg list.
1712 *
1713 *	LOCKING:
1714 *	None.  Should be called with kernel context, might sleep.
1715 *
1716 *	RETURNS:
1717 *	Zero on success, AC_ERR_* mask on failure
1718 */
1719unsigned ata_exec_internal(struct ata_device *dev,
1720			   struct ata_taskfile *tf, const u8 *cdb,
1721			   int dma_dir, void *buf, unsigned int buflen,
1722			   unsigned long timeout)
1723{
1724	struct scatterlist *psg = NULL, sg;
1725	unsigned int n_elem = 0;
1726
1727	if (dma_dir != DMA_NONE) {
1728		WARN_ON(!buf);
1729		sg_init_one(&sg, buf, buflen);
1730		psg = &sg;
1731		n_elem++;
1732	}
1733
1734	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1735				    timeout);
1736}
1737
1738/**
1739 *	ata_do_simple_cmd - execute simple internal command
1740 *	@dev: Device to which the command is sent
1741 *	@cmd: Opcode to execute
1742 *
1743 *	Execute a 'simple' command, that only consists of the opcode
1744 *	'cmd' itself, without filling any other registers
1745 *
1746 *	LOCKING:
1747 *	Kernel thread context (may sleep).
1748 *
1749 *	RETURNS:
1750 *	Zero on success, AC_ERR_* mask on failure
1751 */
1752unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1753{
1754	struct ata_taskfile tf;
1755
1756	ata_tf_init(dev, &tf);
1757
1758	tf.command = cmd;
1759	tf.flags |= ATA_TFLAG_DEVICE;
1760	tf.protocol = ATA_PROT_NODATA;
1761
1762	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1763}
1764
1765/**
1766 *	ata_pio_need_iordy	-	check if iordy needed
1767 *	@adev: ATA device
1768 *
1769 *	Check if the current speed of the device requires IORDY. Used
1770 *	by various controllers for chip configuration.
1771 */
1772unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1773{
1774	/* Don't set IORDY if we're preparing for reset.  IORDY may
1775	 * lead to controller lock up on certain controllers if the
1776	 * port is not occupied.  See bko#11703 for details.
1777	 */
1778	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1779		return 0;
1780	/* Controller doesn't support IORDY.  Probably a pointless
1781	 * check as the caller should know this.
1782	 */
1783	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1784		return 0;
1785	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1786	if (ata_id_is_cfa(adev->id)
1787	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1788		return 0;
1789	/* PIO3 and higher it is mandatory */
1790	if (adev->pio_mode > XFER_PIO_2)
1791		return 1;
1792	/* We turn it on when possible */
1793	if (ata_id_has_iordy(adev->id))
1794		return 1;
1795	return 0;
1796}
 
1797
1798/**
1799 *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1800 *	@adev: ATA device
1801 *
1802 *	Compute the highest mode possible if we are not using iordy. Return
1803 *	-1 if no iordy mode is available.
1804 */
1805static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1806{
1807	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1808	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1809		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1810		/* Is the speed faster than the drive allows non IORDY ? */
1811		if (pio) {
1812			/* This is cycle times not frequency - watch the logic! */
1813			if (pio > 240)	/* PIO2 is 240nS per cycle */
1814				return 3 << ATA_SHIFT_PIO;
1815			return 7 << ATA_SHIFT_PIO;
1816		}
1817	}
1818	return 3 << ATA_SHIFT_PIO;
1819}
1820
1821/**
1822 *	ata_do_dev_read_id		-	default ID read method
1823 *	@dev: device
1824 *	@tf: proposed taskfile
1825 *	@id: data buffer
1826 *
1827 *	Issue the identify taskfile and hand back the buffer containing
1828 *	identify data. For some RAID controllers and for pre ATA devices
1829 *	this function is wrapped or replaced by the driver
1830 */
1831unsigned int ata_do_dev_read_id(struct ata_device *dev,
1832					struct ata_taskfile *tf, u16 *id)
1833{
1834	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1835				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1836}
 
1837
1838/**
1839 *	ata_dev_read_id - Read ID data from the specified device
1840 *	@dev: target device
1841 *	@p_class: pointer to class of the target device (may be changed)
1842 *	@flags: ATA_READID_* flags
1843 *	@id: buffer to read IDENTIFY data into
1844 *
1845 *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1846 *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1847 *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1848 *	for pre-ATA4 drives.
1849 *
1850 *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1851 *	now we abort if we hit that case.
1852 *
1853 *	LOCKING:
1854 *	Kernel thread context (may sleep)
1855 *
1856 *	RETURNS:
1857 *	0 on success, -errno otherwise.
1858 */
1859int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1860		    unsigned int flags, u16 *id)
1861{
1862	struct ata_port *ap = dev->link->ap;
1863	unsigned int class = *p_class;
1864	struct ata_taskfile tf;
1865	unsigned int err_mask = 0;
1866	const char *reason;
1867	bool is_semb = class == ATA_DEV_SEMB;
1868	int may_fallback = 1, tried_spinup = 0;
1869	int rc;
1870
1871	if (ata_msg_ctl(ap))
1872		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1873
1874retry:
1875	ata_tf_init(dev, &tf);
1876
1877	switch (class) {
1878	case ATA_DEV_SEMB:
1879		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
 
1880	case ATA_DEV_ATA:
 
1881		tf.command = ATA_CMD_ID_ATA;
1882		break;
1883	case ATA_DEV_ATAPI:
1884		tf.command = ATA_CMD_ID_ATAPI;
1885		break;
1886	default:
1887		rc = -ENODEV;
1888		reason = "unsupported class";
1889		goto err_out;
1890	}
1891
1892	tf.protocol = ATA_PROT_PIO;
1893
1894	/* Some devices choke if TF registers contain garbage.  Make
1895	 * sure those are properly initialized.
1896	 */
1897	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1898
1899	/* Device presence detection is unreliable on some
1900	 * controllers.  Always poll IDENTIFY if available.
1901	 */
1902	tf.flags |= ATA_TFLAG_POLLING;
1903
1904	if (ap->ops->read_id)
1905		err_mask = ap->ops->read_id(dev, &tf, id);
1906	else
1907		err_mask = ata_do_dev_read_id(dev, &tf, id);
1908
1909	if (err_mask) {
1910		if (err_mask & AC_ERR_NODEV_HINT) {
1911			ata_dev_dbg(dev, "NODEV after polling detection\n");
1912			return -ENOENT;
1913		}
1914
1915		if (is_semb) {
1916			ata_dev_info(dev,
1917		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1918			/* SEMB is not supported yet */
1919			*p_class = ATA_DEV_SEMB_UNSUP;
1920			return 0;
1921		}
1922
1923		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1924			/* Device or controller might have reported
1925			 * the wrong device class.  Give a shot at the
1926			 * other IDENTIFY if the current one is
1927			 * aborted by the device.
1928			 */
1929			if (may_fallback) {
1930				may_fallback = 0;
1931
1932				if (class == ATA_DEV_ATA)
1933					class = ATA_DEV_ATAPI;
1934				else
1935					class = ATA_DEV_ATA;
1936				goto retry;
1937			}
1938
1939			/* Control reaches here iff the device aborted
1940			 * both flavors of IDENTIFYs which happens
1941			 * sometimes with phantom devices.
1942			 */
1943			ata_dev_dbg(dev,
1944				    "both IDENTIFYs aborted, assuming NODEV\n");
1945			return -ENOENT;
1946		}
1947
1948		rc = -EIO;
1949		reason = "I/O error";
1950		goto err_out;
1951	}
1952
1953	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1954		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1955			    "class=%d may_fallback=%d tried_spinup=%d\n",
1956			    class, may_fallback, tried_spinup);
1957		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1958			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1959	}
1960
1961	/* Falling back doesn't make sense if ID data was read
1962	 * successfully at least once.
1963	 */
1964	may_fallback = 0;
1965
1966	swap_buf_le16(id, ATA_ID_WORDS);
1967
1968	/* sanity check */
1969	rc = -EINVAL;
1970	reason = "device reports invalid type";
1971
1972	if (class == ATA_DEV_ATA) {
1973		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1974			goto err_out;
 
 
 
 
 
 
1975	} else {
1976		if (ata_id_is_ata(id))
1977			goto err_out;
1978	}
1979
1980	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1981		tried_spinup = 1;
1982		/*
1983		 * Drive powered-up in standby mode, and requires a specific
1984		 * SET_FEATURES spin-up subcommand before it will accept
1985		 * anything other than the original IDENTIFY command.
1986		 */
1987		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1988		if (err_mask && id[2] != 0x738c) {
1989			rc = -EIO;
1990			reason = "SPINUP failed";
1991			goto err_out;
1992		}
1993		/*
1994		 * If the drive initially returned incomplete IDENTIFY info,
1995		 * we now must reissue the IDENTIFY command.
1996		 */
1997		if (id[2] == 0x37c8)
1998			goto retry;
1999	}
2000
2001	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
 
2002		/*
2003		 * The exact sequence expected by certain pre-ATA4 drives is:
2004		 * SRST RESET
2005		 * IDENTIFY (optional in early ATA)
2006		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2007		 * anything else..
2008		 * Some drives were very specific about that exact sequence.
2009		 *
2010		 * Note that ATA4 says lba is mandatory so the second check
2011		 * should never trigger.
2012		 */
2013		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2014			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2015			if (err_mask) {
2016				rc = -EIO;
2017				reason = "INIT_DEV_PARAMS failed";
2018				goto err_out;
2019			}
2020
2021			/* current CHS translation info (id[53-58]) might be
2022			 * changed. reread the identify device info.
2023			 */
2024			flags &= ~ATA_READID_POSTRESET;
2025			goto retry;
2026		}
2027	}
2028
2029	*p_class = class;
2030
2031	return 0;
2032
2033 err_out:
2034	if (ata_msg_warn(ap))
2035		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2036			     reason, err_mask);
2037	return rc;
2038}
2039
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2040static int ata_do_link_spd_horkage(struct ata_device *dev)
2041{
2042	struct ata_link *plink = ata_dev_phys_link(dev);
2043	u32 target, target_limit;
2044
2045	if (!sata_scr_valid(plink))
2046		return 0;
2047
2048	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2049		target = 1;
2050	else
2051		return 0;
2052
2053	target_limit = (1 << target) - 1;
2054
2055	/* if already on stricter limit, no need to push further */
2056	if (plink->sata_spd_limit <= target_limit)
2057		return 0;
2058
2059	plink->sata_spd_limit = target_limit;
2060
2061	/* Request another EH round by returning -EAGAIN if link is
2062	 * going faster than the target speed.  Forward progress is
2063	 * guaranteed by setting sata_spd_limit to target_limit above.
2064	 */
2065	if (plink->sata_spd > target) {
2066		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2067			     sata_spd_string(target));
2068		return -EAGAIN;
2069	}
2070	return 0;
2071}
2072
2073static inline u8 ata_dev_knobble(struct ata_device *dev)
2074{
2075	struct ata_port *ap = dev->link->ap;
2076
2077	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2078		return 0;
2079
2080	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2081}
2082
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2083static int ata_dev_config_ncq(struct ata_device *dev,
2084			       char *desc, size_t desc_sz)
2085{
2086	struct ata_port *ap = dev->link->ap;
2087	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2088	unsigned int err_mask;
2089	char *aa_desc = "";
2090
2091	if (!ata_id_has_ncq(dev->id)) {
2092		desc[0] = '\0';
2093		return 0;
2094	}
 
 
2095	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2096		snprintf(desc, desc_sz, "NCQ (not used)");
2097		return 0;
2098	}
 
 
 
 
 
 
 
2099	if (ap->flags & ATA_FLAG_NCQ) {
2100		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2101		dev->flags |= ATA_DFLAG_NCQ;
2102	}
2103
2104	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2105		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2106		ata_id_has_fpdma_aa(dev->id)) {
2107		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2108			SATA_FPDMA_AA);
2109		if (err_mask) {
2110			ata_dev_err(dev,
2111				    "failed to enable AA (error_mask=0x%x)\n",
2112				    err_mask);
2113			if (err_mask != AC_ERR_DEV) {
2114				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2115				return -EIO;
2116			}
2117		} else
2118			aa_desc = ", AA";
2119	}
2120
2121	if (hdepth >= ddepth)
2122		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2123	else
2124		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2125			ddepth, aa_desc);
 
 
 
 
 
 
 
 
 
 
2126	return 0;
2127}
2128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2129/**
2130 *	ata_dev_configure - Configure the specified ATA/ATAPI device
2131 *	@dev: Target device to configure
2132 *
2133 *	Configure @dev according to @dev->id.  Generic and low-level
2134 *	driver specific fixups are also applied.
2135 *
2136 *	LOCKING:
2137 *	Kernel thread context (may sleep)
2138 *
2139 *	RETURNS:
2140 *	0 on success, -errno otherwise
2141 */
2142int ata_dev_configure(struct ata_device *dev)
2143{
2144	struct ata_port *ap = dev->link->ap;
2145	struct ata_eh_context *ehc = &dev->link->eh_context;
2146	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2147	const u16 *id = dev->id;
2148	unsigned long xfer_mask;
 
2149	char revbuf[7];		/* XYZ-99\0 */
2150	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2151	char modelbuf[ATA_ID_PROD_LEN+1];
2152	int rc;
2153
2154	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2155		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2156		return 0;
2157	}
2158
2159	if (ata_msg_probe(ap))
2160		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2161
2162	/* set horkage */
2163	dev->horkage |= ata_dev_blacklisted(dev);
2164	ata_force_horkage(dev);
2165
2166	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2167		ata_dev_info(dev, "unsupported device, disabling\n");
2168		ata_dev_disable(dev);
2169		return 0;
2170	}
2171
2172	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2173	    dev->class == ATA_DEV_ATAPI) {
2174		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2175			     atapi_enabled ? "not supported with this driver"
2176			     : "disabled");
2177		ata_dev_disable(dev);
2178		return 0;
2179	}
2180
2181	rc = ata_do_link_spd_horkage(dev);
2182	if (rc)
2183		return rc;
2184
 
 
 
 
 
 
 
 
 
 
 
 
 
2185	/* let ACPI work its magic */
2186	rc = ata_acpi_on_devcfg(dev);
2187	if (rc)
2188		return rc;
2189
2190	/* massage HPA, do it early as it might change IDENTIFY data */
2191	rc = ata_hpa_resize(dev);
2192	if (rc)
2193		return rc;
2194
2195	/* print device capabilities */
2196	if (ata_msg_probe(ap))
2197		ata_dev_dbg(dev,
2198			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2199			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2200			    __func__,
2201			    id[49], id[82], id[83], id[84],
2202			    id[85], id[86], id[87], id[88]);
2203
2204	/* initialize to-be-configured parameters */
2205	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2206	dev->max_sectors = 0;
2207	dev->cdb_len = 0;
2208	dev->n_sectors = 0;
2209	dev->cylinders = 0;
2210	dev->heads = 0;
2211	dev->sectors = 0;
2212	dev->multi_count = 0;
2213
2214	/*
2215	 * common ATA, ATAPI feature tests
2216	 */
2217
2218	/* find max transfer mode; for printk only */
2219	xfer_mask = ata_id_xfermask(id);
2220
2221	if (ata_msg_probe(ap))
2222		ata_dump_id(id);
2223
2224	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2225	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2226			sizeof(fwrevbuf));
2227
2228	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2229			sizeof(modelbuf));
2230
2231	/* ATA-specific feature tests */
2232	if (dev->class == ATA_DEV_ATA) {
2233		if (ata_id_is_cfa(id)) {
2234			/* CPRM may make this media unusable */
2235			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2236				ata_dev_warn(dev,
2237	"supports DRM functions and may not be fully accessible\n");
2238			snprintf(revbuf, 7, "CFA");
2239		} else {
2240			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2241			/* Warn the user if the device has TPM extensions */
2242			if (ata_id_has_tpm(id))
2243				ata_dev_warn(dev,
2244	"supports DRM functions and may not be fully accessible\n");
2245		}
2246
2247		dev->n_sectors = ata_id_n_sectors(id);
2248
2249		/* get current R/W Multiple count setting */
2250		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2251			unsigned int max = dev->id[47] & 0xff;
2252			unsigned int cnt = dev->id[59] & 0xff;
2253			/* only recognize/allow powers of two here */
2254			if (is_power_of_2(max) && is_power_of_2(cnt))
2255				if (cnt <= max)
2256					dev->multi_count = cnt;
2257		}
2258
2259		if (ata_id_has_lba(id)) {
2260			const char *lba_desc;
2261			char ncq_desc[24];
2262
2263			lba_desc = "LBA";
2264			dev->flags |= ATA_DFLAG_LBA;
2265			if (ata_id_has_lba48(id)) {
2266				dev->flags |= ATA_DFLAG_LBA48;
2267				lba_desc = "LBA48";
2268
2269				if (dev->n_sectors >= (1UL << 28) &&
2270				    ata_id_has_flush_ext(id))
2271					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2272			}
2273
2274			/* config NCQ */
2275			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2276			if (rc)
2277				return rc;
2278
2279			/* print device info to dmesg */
2280			if (ata_msg_drv(ap) && print_info) {
2281				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2282					     revbuf, modelbuf, fwrevbuf,
2283					     ata_mode_string(xfer_mask));
2284				ata_dev_info(dev,
2285					     "%llu sectors, multi %u: %s %s\n",
2286					(unsigned long long)dev->n_sectors,
2287					dev->multi_count, lba_desc, ncq_desc);
2288			}
2289		} else {
2290			/* CHS */
2291
2292			/* Default translation */
2293			dev->cylinders	= id[1];
2294			dev->heads	= id[3];
2295			dev->sectors	= id[6];
2296
2297			if (ata_id_current_chs_valid(id)) {
2298				/* Current CHS translation is valid. */
2299				dev->cylinders = id[54];
2300				dev->heads     = id[55];
2301				dev->sectors   = id[56];
2302			}
2303
2304			/* print device info to dmesg */
2305			if (ata_msg_drv(ap) && print_info) {
2306				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2307					     revbuf,	modelbuf, fwrevbuf,
2308					     ata_mode_string(xfer_mask));
2309				ata_dev_info(dev,
2310					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2311					     (unsigned long long)dev->n_sectors,
2312					     dev->multi_count, dev->cylinders,
2313					     dev->heads, dev->sectors);
2314			}
2315		}
2316
2317		dev->cdb_len = 16;
 
 
 
 
 
 
 
 
 
 
2318	}
2319
2320	/* ATAPI-specific feature tests */
2321	else if (dev->class == ATA_DEV_ATAPI) {
2322		const char *cdb_intr_string = "";
2323		const char *atapi_an_string = "";
2324		const char *dma_dir_string = "";
2325		u32 sntf;
2326
2327		rc = atapi_cdb_len(id);
2328		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2329			if (ata_msg_warn(ap))
2330				ata_dev_warn(dev, "unsupported CDB len\n");
2331			rc = -EINVAL;
2332			goto err_out_nosup;
2333		}
2334		dev->cdb_len = (unsigned int) rc;
2335
2336		/* Enable ATAPI AN if both the host and device have
2337		 * the support.  If PMP is attached, SNTF is required
2338		 * to enable ATAPI AN to discern between PHY status
2339		 * changed notifications and ATAPI ANs.
2340		 */
2341		if (atapi_an &&
2342		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2343		    (!sata_pmp_attached(ap) ||
2344		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2345			unsigned int err_mask;
2346
2347			/* issue SET feature command to turn this on */
2348			err_mask = ata_dev_set_feature(dev,
2349					SETFEATURES_SATA_ENABLE, SATA_AN);
2350			if (err_mask)
2351				ata_dev_err(dev,
2352					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2353					    err_mask);
2354			else {
2355				dev->flags |= ATA_DFLAG_AN;
2356				atapi_an_string = ", ATAPI AN";
2357			}
2358		}
2359
2360		if (ata_id_cdb_intr(dev->id)) {
2361			dev->flags |= ATA_DFLAG_CDB_INTR;
2362			cdb_intr_string = ", CDB intr";
2363		}
2364
2365		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2366			dev->flags |= ATA_DFLAG_DMADIR;
2367			dma_dir_string = ", DMADIR";
2368		}
2369
 
 
 
 
 
2370		/* print device info to dmesg */
2371		if (ata_msg_drv(ap) && print_info)
2372			ata_dev_info(dev,
2373				     "ATAPI: %s, %s, max %s%s%s%s\n",
2374				     modelbuf, fwrevbuf,
2375				     ata_mode_string(xfer_mask),
2376				     cdb_intr_string, atapi_an_string,
2377				     dma_dir_string);
2378	}
2379
2380	/* determine max_sectors */
2381	dev->max_sectors = ATA_MAX_SECTORS;
2382	if (dev->flags & ATA_DFLAG_LBA48)
2383		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2384
2385	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2386	   200 sectors */
2387	if (ata_dev_knobble(dev)) {
2388		if (ata_msg_drv(ap) && print_info)
2389			ata_dev_info(dev, "applying bridge limits\n");
2390		dev->udma_mask &= ATA_UDMA5;
2391		dev->max_sectors = ATA_MAX_SECTORS;
2392	}
2393
2394	if ((dev->class == ATA_DEV_ATAPI) &&
2395	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2396		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2397		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2398	}
2399
2400	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2401		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2402					 dev->max_sectors);
2403
 
 
 
 
 
 
 
2404	if (ap->ops->dev_config)
2405		ap->ops->dev_config(dev);
2406
2407	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2408		/* Let the user know. We don't want to disallow opens for
2409		   rescue purposes, or in case the vendor is just a blithering
2410		   idiot. Do this after the dev_config call as some controllers
2411		   with buggy firmware may want to avoid reporting false device
2412		   bugs */
2413
2414		if (print_info) {
2415			ata_dev_warn(dev,
2416"Drive reports diagnostics failure. This may indicate a drive\n");
2417			ata_dev_warn(dev,
2418"fault or invalid emulation. Contact drive vendor for information.\n");
2419		}
2420	}
2421
2422	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2423		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2424		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2425	}
2426
2427	return 0;
2428
2429err_out_nosup:
2430	if (ata_msg_probe(ap))
2431		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2432	return rc;
2433}
2434
2435/**
2436 *	ata_cable_40wire	-	return 40 wire cable type
2437 *	@ap: port
2438 *
2439 *	Helper method for drivers which want to hardwire 40 wire cable
2440 *	detection.
2441 */
2442
2443int ata_cable_40wire(struct ata_port *ap)
2444{
2445	return ATA_CBL_PATA40;
2446}
 
2447
2448/**
2449 *	ata_cable_80wire	-	return 80 wire cable type
2450 *	@ap: port
2451 *
2452 *	Helper method for drivers which want to hardwire 80 wire cable
2453 *	detection.
2454 */
2455
2456int ata_cable_80wire(struct ata_port *ap)
2457{
2458	return ATA_CBL_PATA80;
2459}
 
2460
2461/**
2462 *	ata_cable_unknown	-	return unknown PATA cable.
2463 *	@ap: port
2464 *
2465 *	Helper method for drivers which have no PATA cable detection.
2466 */
2467
2468int ata_cable_unknown(struct ata_port *ap)
2469{
2470	return ATA_CBL_PATA_UNK;
2471}
 
2472
2473/**
2474 *	ata_cable_ignore	-	return ignored PATA cable.
2475 *	@ap: port
2476 *
2477 *	Helper method for drivers which don't use cable type to limit
2478 *	transfer mode.
2479 */
2480int ata_cable_ignore(struct ata_port *ap)
2481{
2482	return ATA_CBL_PATA_IGN;
2483}
 
2484
2485/**
2486 *	ata_cable_sata	-	return SATA cable type
2487 *	@ap: port
2488 *
2489 *	Helper method for drivers which have SATA cables
2490 */
2491
2492int ata_cable_sata(struct ata_port *ap)
2493{
2494	return ATA_CBL_SATA;
2495}
2496
2497/**
2498 *	ata_bus_probe - Reset and probe ATA bus
2499 *	@ap: Bus to probe
2500 *
2501 *	Master ATA bus probing function.  Initiates a hardware-dependent
2502 *	bus reset, then attempts to identify any devices found on
2503 *	the bus.
2504 *
2505 *	LOCKING:
2506 *	PCI/etc. bus probe sem.
2507 *
2508 *	RETURNS:
2509 *	Zero on success, negative errno otherwise.
2510 */
2511
2512int ata_bus_probe(struct ata_port *ap)
2513{
2514	unsigned int classes[ATA_MAX_DEVICES];
2515	int tries[ATA_MAX_DEVICES];
2516	int rc;
2517	struct ata_device *dev;
2518
2519	ata_for_each_dev(dev, &ap->link, ALL)
2520		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2521
2522 retry:
2523	ata_for_each_dev(dev, &ap->link, ALL) {
2524		/* If we issue an SRST then an ATA drive (not ATAPI)
2525		 * may change configuration and be in PIO0 timing. If
2526		 * we do a hard reset (or are coming from power on)
2527		 * this is true for ATA or ATAPI. Until we've set a
2528		 * suitable controller mode we should not touch the
2529		 * bus as we may be talking too fast.
2530		 */
2531		dev->pio_mode = XFER_PIO_0;
2532
2533		/* If the controller has a pio mode setup function
2534		 * then use it to set the chipset to rights. Don't
2535		 * touch the DMA setup as that will be dealt with when
2536		 * configuring devices.
2537		 */
2538		if (ap->ops->set_piomode)
2539			ap->ops->set_piomode(ap, dev);
2540	}
2541
2542	/* reset and determine device classes */
2543	ap->ops->phy_reset(ap);
2544
2545	ata_for_each_dev(dev, &ap->link, ALL) {
2546		if (dev->class != ATA_DEV_UNKNOWN)
2547			classes[dev->devno] = dev->class;
2548		else
2549			classes[dev->devno] = ATA_DEV_NONE;
2550
2551		dev->class = ATA_DEV_UNKNOWN;
2552	}
2553
2554	/* read IDENTIFY page and configure devices. We have to do the identify
2555	   specific sequence bass-ackwards so that PDIAG- is released by
2556	   the slave device */
2557
2558	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2559		if (tries[dev->devno])
2560			dev->class = classes[dev->devno];
2561
2562		if (!ata_dev_enabled(dev))
2563			continue;
2564
2565		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2566				     dev->id);
2567		if (rc)
2568			goto fail;
2569	}
2570
2571	/* Now ask for the cable type as PDIAG- should have been released */
2572	if (ap->ops->cable_detect)
2573		ap->cbl = ap->ops->cable_detect(ap);
2574
2575	/* We may have SATA bridge glue hiding here irrespective of
2576	 * the reported cable types and sensed types.  When SATA
2577	 * drives indicate we have a bridge, we don't know which end
2578	 * of the link the bridge is which is a problem.
2579	 */
2580	ata_for_each_dev(dev, &ap->link, ENABLED)
2581		if (ata_id_is_sata(dev->id))
2582			ap->cbl = ATA_CBL_SATA;
2583
2584	/* After the identify sequence we can now set up the devices. We do
2585	   this in the normal order so that the user doesn't get confused */
2586
2587	ata_for_each_dev(dev, &ap->link, ENABLED) {
2588		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2589		rc = ata_dev_configure(dev);
2590		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2591		if (rc)
2592			goto fail;
2593	}
2594
2595	/* configure transfer mode */
2596	rc = ata_set_mode(&ap->link, &dev);
2597	if (rc)
2598		goto fail;
2599
2600	ata_for_each_dev(dev, &ap->link, ENABLED)
2601		return 0;
2602
2603	return -ENODEV;
2604
2605 fail:
2606	tries[dev->devno]--;
2607
2608	switch (rc) {
2609	case -EINVAL:
2610		/* eeek, something went very wrong, give up */
2611		tries[dev->devno] = 0;
2612		break;
2613
2614	case -ENODEV:
2615		/* give it just one more chance */
2616		tries[dev->devno] = min(tries[dev->devno], 1);
2617	case -EIO:
2618		if (tries[dev->devno] == 1) {
2619			/* This is the last chance, better to slow
2620			 * down than lose it.
2621			 */
2622			sata_down_spd_limit(&ap->link, 0);
2623			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2624		}
2625	}
2626
2627	if (!tries[dev->devno])
2628		ata_dev_disable(dev);
2629
2630	goto retry;
2631}
2632
2633/**
2634 *	sata_print_link_status - Print SATA link status
2635 *	@link: SATA link to printk link status about
2636 *
2637 *	This function prints link speed and status of a SATA link.
2638 *
2639 *	LOCKING:
2640 *	None.
2641 */
2642static void sata_print_link_status(struct ata_link *link)
2643{
2644	u32 sstatus, scontrol, tmp;
2645
2646	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2647		return;
2648	sata_scr_read(link, SCR_CONTROL, &scontrol);
 
2649
2650	if (ata_phys_link_online(link)) {
2651		tmp = (sstatus >> 4) & 0xf;
2652		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2653			      sata_spd_string(tmp), sstatus, scontrol);
2654	} else {
2655		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2656			      sstatus, scontrol);
2657	}
2658}
2659
2660/**
2661 *	ata_dev_pair		-	return other device on cable
2662 *	@adev: device
2663 *
2664 *	Obtain the other device on the same cable, or if none is
2665 *	present NULL is returned
2666 */
2667
2668struct ata_device *ata_dev_pair(struct ata_device *adev)
2669{
2670	struct ata_link *link = adev->link;
2671	struct ata_device *pair = &link->device[1 - adev->devno];
2672	if (!ata_dev_enabled(pair))
2673		return NULL;
2674	return pair;
2675}
 
2676
2677/**
2678 *	sata_down_spd_limit - adjust SATA spd limit downward
2679 *	@link: Link to adjust SATA spd limit for
2680 *	@spd_limit: Additional limit
2681 *
2682 *	Adjust SATA spd limit of @link downward.  Note that this
2683 *	function only adjusts the limit.  The change must be applied
2684 *	using sata_set_spd().
2685 *
2686 *	If @spd_limit is non-zero, the speed is limited to equal to or
2687 *	lower than @spd_limit if such speed is supported.  If
2688 *	@spd_limit is slower than any supported speed, only the lowest
2689 *	supported speed is allowed.
2690 *
2691 *	LOCKING:
2692 *	Inherited from caller.
2693 *
2694 *	RETURNS:
2695 *	0 on success, negative errno on failure
2696 */
2697int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2698{
2699	u32 sstatus, spd, mask;
2700	int rc, bit;
2701
2702	if (!sata_scr_valid(link))
2703		return -EOPNOTSUPP;
2704
2705	/* If SCR can be read, use it to determine the current SPD.
2706	 * If not, use cached value in link->sata_spd.
2707	 */
2708	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2709	if (rc == 0 && ata_sstatus_online(sstatus))
2710		spd = (sstatus >> 4) & 0xf;
2711	else
2712		spd = link->sata_spd;
2713
2714	mask = link->sata_spd_limit;
2715	if (mask <= 1)
2716		return -EINVAL;
2717
2718	/* unconditionally mask off the highest bit */
2719	bit = fls(mask) - 1;
2720	mask &= ~(1 << bit);
2721
2722	/* Mask off all speeds higher than or equal to the current
2723	 * one.  Force 1.5Gbps if current SPD is not available.
 
 
 
 
 
 
2724	 */
2725	if (spd > 1)
2726		mask &= (1 << (spd - 1)) - 1;
2727	else
2728		mask &= 1;
2729
2730	/* were we already at the bottom? */
2731	if (!mask)
2732		return -EINVAL;
2733
2734	if (spd_limit) {
2735		if (mask & ((1 << spd_limit) - 1))
2736			mask &= (1 << spd_limit) - 1;
2737		else {
2738			bit = ffs(mask) - 1;
2739			mask = 1 << bit;
2740		}
2741	}
2742
2743	link->sata_spd_limit = mask;
2744
2745	ata_link_warn(link, "limiting SATA link speed to %s\n",
2746		      sata_spd_string(fls(mask)));
2747
2748	return 0;
2749}
2750
2751static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2752{
2753	struct ata_link *host_link = &link->ap->link;
2754	u32 limit, target, spd;
2755
2756	limit = link->sata_spd_limit;
2757
2758	/* Don't configure downstream link faster than upstream link.
2759	 * It doesn't speed up anything and some PMPs choke on such
2760	 * configuration.
2761	 */
2762	if (!ata_is_host_link(link) && host_link->sata_spd)
2763		limit &= (1 << host_link->sata_spd) - 1;
2764
2765	if (limit == UINT_MAX)
2766		target = 0;
2767	else
2768		target = fls(limit);
2769
2770	spd = (*scontrol >> 4) & 0xf;
2771	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2772
2773	return spd != target;
2774}
2775
2776/**
2777 *	sata_set_spd_needed - is SATA spd configuration needed
2778 *	@link: Link in question
2779 *
2780 *	Test whether the spd limit in SControl matches
2781 *	@link->sata_spd_limit.  This function is used to determine
2782 *	whether hardreset is necessary to apply SATA spd
2783 *	configuration.
2784 *
2785 *	LOCKING:
2786 *	Inherited from caller.
2787 *
2788 *	RETURNS:
2789 *	1 if SATA spd configuration is needed, 0 otherwise.
2790 */
2791static int sata_set_spd_needed(struct ata_link *link)
2792{
2793	u32 scontrol;
2794
2795	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2796		return 1;
2797
2798	return __sata_set_spd_needed(link, &scontrol);
2799}
2800
2801/**
2802 *	sata_set_spd - set SATA spd according to spd limit
2803 *	@link: Link to set SATA spd for
2804 *
2805 *	Set SATA spd of @link according to sata_spd_limit.
2806 *
2807 *	LOCKING:
2808 *	Inherited from caller.
2809 *
2810 *	RETURNS:
2811 *	0 if spd doesn't need to be changed, 1 if spd has been
2812 *	changed.  Negative errno if SCR registers are inaccessible.
2813 */
2814int sata_set_spd(struct ata_link *link)
2815{
2816	u32 scontrol;
2817	int rc;
2818
2819	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2820		return rc;
2821
2822	if (!__sata_set_spd_needed(link, &scontrol))
2823		return 0;
2824
2825	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2826		return rc;
2827
2828	return 1;
2829}
2830
2831/*
2832 * This mode timing computation functionality is ported over from
2833 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2834 */
2835/*
2836 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2837 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2838 * for UDMA6, which is currently supported only by Maxtor drives.
2839 *
2840 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2841 */
2842
2843static const struct ata_timing ata_timing[] = {
2844/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
2845	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
2846	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
2847	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
2848	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
2849	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
2850	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
2851	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
2852
2853	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
2854	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
2855	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
2856
2857	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
2858	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
2859	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
2860	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
2861	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
2862
2863/*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
2864	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
2865	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
2866	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
2867	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
2868	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
2869	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
2870	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
2871
2872	{ 0xFF }
2873};
2874
2875#define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2876#define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2877
2878static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2879{
2880	q->setup	= EZ(t->setup      * 1000,  T);
2881	q->act8b	= EZ(t->act8b      * 1000,  T);
2882	q->rec8b	= EZ(t->rec8b      * 1000,  T);
2883	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
2884	q->active	= EZ(t->active     * 1000,  T);
2885	q->recover	= EZ(t->recover    * 1000,  T);
2886	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
2887	q->cycle	= EZ(t->cycle      * 1000,  T);
2888	q->udma		= EZ(t->udma       * 1000, UT);
2889}
2890
2891void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2892		      struct ata_timing *m, unsigned int what)
2893{
2894	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2895	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2896	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2897	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2898	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2899	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2900	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2901	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2902	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2903}
2904
2905const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2906{
2907	const struct ata_timing *t = ata_timing;
2908
2909	while (xfer_mode > t->mode)
2910		t++;
2911
2912	if (xfer_mode == t->mode)
2913		return t;
2914	return NULL;
2915}
2916
2917int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2918		       struct ata_timing *t, int T, int UT)
2919{
2920	const u16 *id = adev->id;
2921	const struct ata_timing *s;
2922	struct ata_timing p;
2923
2924	/*
2925	 * Find the mode.
2926	 */
2927
2928	if (!(s = ata_timing_find_mode(speed)))
2929		return -EINVAL;
2930
2931	memcpy(t, s, sizeof(*s));
2932
2933	/*
2934	 * If the drive is an EIDE drive, it can tell us it needs extended
2935	 * PIO/MW_DMA cycle timing.
2936	 */
2937
2938	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2939		memset(&p, 0, sizeof(p));
2940
2941		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2942			if (speed <= XFER_PIO_2)
2943				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2944			else if ((speed <= XFER_PIO_4) ||
2945				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2946				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2947		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2948			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2949
2950		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2951	}
2952
2953	/*
2954	 * Convert the timing to bus clock counts.
2955	 */
2956
2957	ata_timing_quantize(t, t, T, UT);
2958
2959	/*
2960	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2961	 * S.M.A.R.T * and some other commands. We have to ensure that the
2962	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2963	 */
2964
2965	if (speed > XFER_PIO_6) {
2966		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2967		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2968	}
2969
2970	/*
2971	 * Lengthen active & recovery time so that cycle time is correct.
2972	 */
2973
2974	if (t->act8b + t->rec8b < t->cyc8b) {
2975		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2976		t->rec8b = t->cyc8b - t->act8b;
2977	}
2978
2979	if (t->active + t->recover < t->cycle) {
2980		t->active += (t->cycle - (t->active + t->recover)) / 2;
2981		t->recover = t->cycle - t->active;
2982	}
2983
2984	/* In a few cases quantisation may produce enough errors to
2985	   leave t->cycle too low for the sum of active and recovery
2986	   if so we must correct this */
2987	if (t->active + t->recover > t->cycle)
2988		t->cycle = t->active + t->recover;
2989
2990	return 0;
2991}
2992
2993/**
2994 *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2995 *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2996 *	@cycle: cycle duration in ns
2997 *
2998 *	Return matching xfer mode for @cycle.  The returned mode is of
2999 *	the transfer type specified by @xfer_shift.  If @cycle is too
3000 *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3001 *	than the fastest known mode, the fasted mode is returned.
3002 *
3003 *	LOCKING:
3004 *	None.
3005 *
3006 *	RETURNS:
3007 *	Matching xfer_mode, 0xff if no match found.
3008 */
3009u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3010{
3011	u8 base_mode = 0xff, last_mode = 0xff;
3012	const struct ata_xfer_ent *ent;
3013	const struct ata_timing *t;
3014
3015	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3016		if (ent->shift == xfer_shift)
3017			base_mode = ent->base;
3018
3019	for (t = ata_timing_find_mode(base_mode);
3020	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3021		unsigned short this_cycle;
3022
3023		switch (xfer_shift) {
3024		case ATA_SHIFT_PIO:
3025		case ATA_SHIFT_MWDMA:
3026			this_cycle = t->cycle;
3027			break;
3028		case ATA_SHIFT_UDMA:
3029			this_cycle = t->udma;
3030			break;
3031		default:
3032			return 0xff;
3033		}
3034
3035		if (cycle > this_cycle)
3036			break;
3037
3038		last_mode = t->mode;
3039	}
3040
3041	return last_mode;
3042}
 
3043
3044/**
3045 *	ata_down_xfermask_limit - adjust dev xfer masks downward
3046 *	@dev: Device to adjust xfer masks
3047 *	@sel: ATA_DNXFER_* selector
3048 *
3049 *	Adjust xfer masks of @dev downward.  Note that this function
3050 *	does not apply the change.  Invoking ata_set_mode() afterwards
3051 *	will apply the limit.
3052 *
3053 *	LOCKING:
3054 *	Inherited from caller.
3055 *
3056 *	RETURNS:
3057 *	0 on success, negative errno on failure
3058 */
3059int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3060{
3061	char buf[32];
3062	unsigned long orig_mask, xfer_mask;
3063	unsigned long pio_mask, mwdma_mask, udma_mask;
3064	int quiet, highbit;
3065
3066	quiet = !!(sel & ATA_DNXFER_QUIET);
3067	sel &= ~ATA_DNXFER_QUIET;
3068
3069	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3070						  dev->mwdma_mask,
3071						  dev->udma_mask);
3072	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3073
3074	switch (sel) {
3075	case ATA_DNXFER_PIO:
3076		highbit = fls(pio_mask) - 1;
3077		pio_mask &= ~(1 << highbit);
3078		break;
3079
3080	case ATA_DNXFER_DMA:
3081		if (udma_mask) {
3082			highbit = fls(udma_mask) - 1;
3083			udma_mask &= ~(1 << highbit);
3084			if (!udma_mask)
3085				return -ENOENT;
3086		} else if (mwdma_mask) {
3087			highbit = fls(mwdma_mask) - 1;
3088			mwdma_mask &= ~(1 << highbit);
3089			if (!mwdma_mask)
3090				return -ENOENT;
3091		}
3092		break;
3093
3094	case ATA_DNXFER_40C:
3095		udma_mask &= ATA_UDMA_MASK_40C;
3096		break;
3097
3098	case ATA_DNXFER_FORCE_PIO0:
3099		pio_mask &= 1;
 
3100	case ATA_DNXFER_FORCE_PIO:
3101		mwdma_mask = 0;
3102		udma_mask = 0;
3103		break;
3104
3105	default:
3106		BUG();
3107	}
3108
3109	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3110
3111	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3112		return -ENOENT;
3113
3114	if (!quiet) {
3115		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3116			snprintf(buf, sizeof(buf), "%s:%s",
3117				 ata_mode_string(xfer_mask),
3118				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3119		else
3120			snprintf(buf, sizeof(buf), "%s",
3121				 ata_mode_string(xfer_mask));
3122
3123		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3124	}
3125
3126	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3127			    &dev->udma_mask);
3128
3129	return 0;
3130}
3131
3132static int ata_dev_set_mode(struct ata_device *dev)
3133{
3134	struct ata_port *ap = dev->link->ap;
3135	struct ata_eh_context *ehc = &dev->link->eh_context;
3136	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3137	const char *dev_err_whine = "";
3138	int ign_dev_err = 0;
3139	unsigned int err_mask = 0;
3140	int rc;
3141
3142	dev->flags &= ~ATA_DFLAG_PIO;
3143	if (dev->xfer_shift == ATA_SHIFT_PIO)
3144		dev->flags |= ATA_DFLAG_PIO;
3145
3146	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3147		dev_err_whine = " (SET_XFERMODE skipped)";
3148	else {
3149		if (nosetxfer)
3150			ata_dev_warn(dev,
3151				     "NOSETXFER but PATA detected - can't "
3152				     "skip SETXFER, might malfunction\n");
3153		err_mask = ata_dev_set_xfermode(dev);
3154	}
3155
3156	if (err_mask & ~AC_ERR_DEV)
3157		goto fail;
3158
3159	/* revalidate */
3160	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3161	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3162	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3163	if (rc)
3164		return rc;
3165
3166	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3167		/* Old CFA may refuse this command, which is just fine */
3168		if (ata_id_is_cfa(dev->id))
3169			ign_dev_err = 1;
3170		/* Catch several broken garbage emulations plus some pre
3171		   ATA devices */
3172		if (ata_id_major_version(dev->id) == 0 &&
3173					dev->pio_mode <= XFER_PIO_2)
3174			ign_dev_err = 1;
3175		/* Some very old devices and some bad newer ones fail
3176		   any kind of SET_XFERMODE request but support PIO0-2
3177		   timings and no IORDY */
3178		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3179			ign_dev_err = 1;
3180	}
3181	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3182	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3183	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3184	    dev->dma_mode == XFER_MW_DMA_0 &&
3185	    (dev->id[63] >> 8) & 1)
3186		ign_dev_err = 1;
3187
3188	/* if the device is actually configured correctly, ignore dev err */
3189	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3190		ign_dev_err = 1;
3191
3192	if (err_mask & AC_ERR_DEV) {
3193		if (!ign_dev_err)
3194			goto fail;
3195		else
3196			dev_err_whine = " (device error ignored)";
3197	}
3198
3199	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3200		dev->xfer_shift, (int)dev->xfer_mode);
3201
3202	ata_dev_info(dev, "configured for %s%s\n",
3203		     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3204		     dev_err_whine);
 
 
3205
3206	return 0;
3207
3208 fail:
3209	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3210	return -EIO;
3211}
3212
3213/**
3214 *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3215 *	@link: link on which timings will be programmed
3216 *	@r_failed_dev: out parameter for failed device
3217 *
3218 *	Standard implementation of the function used to tune and set
3219 *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3220 *	ata_dev_set_mode() fails, pointer to the failing device is
3221 *	returned in @r_failed_dev.
3222 *
3223 *	LOCKING:
3224 *	PCI/etc. bus probe sem.
3225 *
3226 *	RETURNS:
3227 *	0 on success, negative errno otherwise
3228 */
3229
3230int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3231{
3232	struct ata_port *ap = link->ap;
3233	struct ata_device *dev;
3234	int rc = 0, used_dma = 0, found = 0;
3235
3236	/* step 1: calculate xfer_mask */
3237	ata_for_each_dev(dev, link, ENABLED) {
3238		unsigned long pio_mask, dma_mask;
3239		unsigned int mode_mask;
3240
3241		mode_mask = ATA_DMA_MASK_ATA;
3242		if (dev->class == ATA_DEV_ATAPI)
3243			mode_mask = ATA_DMA_MASK_ATAPI;
3244		else if (ata_id_is_cfa(dev->id))
3245			mode_mask = ATA_DMA_MASK_CFA;
3246
3247		ata_dev_xfermask(dev);
3248		ata_force_xfermask(dev);
3249
3250		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3251		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3252
3253		if (libata_dma_mask & mode_mask)
3254			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
 
3255		else
3256			dma_mask = 0;
3257
3258		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3259		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3260
3261		found = 1;
3262		if (ata_dma_enabled(dev))
3263			used_dma = 1;
3264	}
3265	if (!found)
3266		goto out;
3267
3268	/* step 2: always set host PIO timings */
3269	ata_for_each_dev(dev, link, ENABLED) {
3270		if (dev->pio_mode == 0xff) {
3271			ata_dev_warn(dev, "no PIO support\n");
3272			rc = -EINVAL;
3273			goto out;
3274		}
3275
3276		dev->xfer_mode = dev->pio_mode;
3277		dev->xfer_shift = ATA_SHIFT_PIO;
3278		if (ap->ops->set_piomode)
3279			ap->ops->set_piomode(ap, dev);
3280	}
3281
3282	/* step 3: set host DMA timings */
3283	ata_for_each_dev(dev, link, ENABLED) {
3284		if (!ata_dma_enabled(dev))
3285			continue;
3286
3287		dev->xfer_mode = dev->dma_mode;
3288		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3289		if (ap->ops->set_dmamode)
3290			ap->ops->set_dmamode(ap, dev);
3291	}
3292
3293	/* step 4: update devices' xfer mode */
3294	ata_for_each_dev(dev, link, ENABLED) {
3295		rc = ata_dev_set_mode(dev);
3296		if (rc)
3297			goto out;
3298	}
3299
3300	/* Record simplex status. If we selected DMA then the other
3301	 * host channels are not permitted to do so.
3302	 */
3303	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3304		ap->host->simplex_claimed = ap;
3305
3306 out:
3307	if (rc)
3308		*r_failed_dev = dev;
3309	return rc;
3310}
 
3311
3312/**
3313 *	ata_wait_ready - wait for link to become ready
3314 *	@link: link to be waited on
3315 *	@deadline: deadline jiffies for the operation
3316 *	@check_ready: callback to check link readiness
3317 *
3318 *	Wait for @link to become ready.  @check_ready should return
3319 *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3320 *	link doesn't seem to be occupied, other errno for other error
3321 *	conditions.
3322 *
3323 *	Transient -ENODEV conditions are allowed for
3324 *	ATA_TMOUT_FF_WAIT.
3325 *
3326 *	LOCKING:
3327 *	EH context.
3328 *
3329 *	RETURNS:
3330 *	0 if @linke is ready before @deadline; otherwise, -errno.
3331 */
3332int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3333		   int (*check_ready)(struct ata_link *link))
3334{
3335	unsigned long start = jiffies;
3336	unsigned long nodev_deadline;
3337	int warned = 0;
3338
3339	/* choose which 0xff timeout to use, read comment in libata.h */
3340	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3341		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3342	else
3343		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3344
3345	/* Slave readiness can't be tested separately from master.  On
3346	 * M/S emulation configuration, this function should be called
3347	 * only on the master and it will handle both master and slave.
3348	 */
3349	WARN_ON(link == link->ap->slave_link);
3350
3351	if (time_after(nodev_deadline, deadline))
3352		nodev_deadline = deadline;
3353
3354	while (1) {
3355		unsigned long now = jiffies;
3356		int ready, tmp;
3357
3358		ready = tmp = check_ready(link);
3359		if (ready > 0)
3360			return 0;
3361
3362		/*
3363		 * -ENODEV could be transient.  Ignore -ENODEV if link
3364		 * is online.  Also, some SATA devices take a long
3365		 * time to clear 0xff after reset.  Wait for
3366		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3367		 * offline.
3368		 *
3369		 * Note that some PATA controllers (pata_ali) explode
3370		 * if status register is read more than once when
3371		 * there's no device attached.
3372		 */
3373		if (ready == -ENODEV) {
3374			if (ata_link_online(link))
3375				ready = 0;
3376			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3377				 !ata_link_offline(link) &&
3378				 time_before(now, nodev_deadline))
3379				ready = 0;
3380		}
3381
3382		if (ready)
3383			return ready;
3384		if (time_after(now, deadline))
3385			return -EBUSY;
3386
3387		if (!warned && time_after(now, start + 5 * HZ) &&
3388		    (deadline - now > 3 * HZ)) {
3389			ata_link_warn(link,
3390				"link is slow to respond, please be patient "
3391				"(ready=%d)\n", tmp);
3392			warned = 1;
3393		}
3394
3395		ata_msleep(link->ap, 50);
3396	}
3397}
3398
3399/**
3400 *	ata_wait_after_reset - wait for link to become ready after reset
3401 *	@link: link to be waited on
3402 *	@deadline: deadline jiffies for the operation
3403 *	@check_ready: callback to check link readiness
3404 *
3405 *	Wait for @link to become ready after reset.
3406 *
3407 *	LOCKING:
3408 *	EH context.
3409 *
3410 *	RETURNS:
3411 *	0 if @linke is ready before @deadline; otherwise, -errno.
3412 */
3413int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3414				int (*check_ready)(struct ata_link *link))
3415{
3416	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3417
3418	return ata_wait_ready(link, deadline, check_ready);
3419}
3420
3421/**
3422 *	sata_link_debounce - debounce SATA phy status
3423 *	@link: ATA link to debounce SATA phy status for
3424 *	@params: timing parameters { interval, duratinon, timeout } in msec
3425 *	@deadline: deadline jiffies for the operation
3426 *
3427 *	Make sure SStatus of @link reaches stable state, determined by
3428 *	holding the same value where DET is not 1 for @duration polled
3429 *	every @interval, before @timeout.  Timeout constraints the
3430 *	beginning of the stable state.  Because DET gets stuck at 1 on
3431 *	some controllers after hot unplugging, this functions waits
3432 *	until timeout then returns 0 if DET is stable at 1.
3433 *
3434 *	@timeout is further limited by @deadline.  The sooner of the
3435 *	two is used.
3436 *
3437 *	LOCKING:
3438 *	Kernel thread context (may sleep)
3439 *
3440 *	RETURNS:
3441 *	0 on success, -errno on failure.
3442 */
3443int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3444		       unsigned long deadline)
3445{
3446	unsigned long interval = params[0];
3447	unsigned long duration = params[1];
3448	unsigned long last_jiffies, t;
3449	u32 last, cur;
3450	int rc;
3451
3452	t = ata_deadline(jiffies, params[2]);
3453	if (time_before(t, deadline))
3454		deadline = t;
3455
3456	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3457		return rc;
3458	cur &= 0xf;
3459
3460	last = cur;
3461	last_jiffies = jiffies;
3462
3463	while (1) {
3464		ata_msleep(link->ap, interval);
3465		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3466			return rc;
3467		cur &= 0xf;
3468
3469		/* DET stable? */
3470		if (cur == last) {
3471			if (cur == 1 && time_before(jiffies, deadline))
3472				continue;
3473			if (time_after(jiffies,
3474				       ata_deadline(last_jiffies, duration)))
3475				return 0;
3476			continue;
3477		}
3478
3479		/* unstable, start over */
3480		last = cur;
3481		last_jiffies = jiffies;
3482
3483		/* Check deadline.  If debouncing failed, return
3484		 * -EPIPE to tell upper layer to lower link speed.
3485		 */
3486		if (time_after(jiffies, deadline))
3487			return -EPIPE;
3488	}
3489}
3490
3491/**
3492 *	sata_link_resume - resume SATA link
3493 *	@link: ATA link to resume SATA
3494 *	@params: timing parameters { interval, duratinon, timeout } in msec
3495 *	@deadline: deadline jiffies for the operation
3496 *
3497 *	Resume SATA phy @link and debounce it.
3498 *
3499 *	LOCKING:
3500 *	Kernel thread context (may sleep)
3501 *
3502 *	RETURNS:
3503 *	0 on success, -errno on failure.
3504 */
3505int sata_link_resume(struct ata_link *link, const unsigned long *params,
3506		     unsigned long deadline)
3507{
3508	int tries = ATA_LINK_RESUME_TRIES;
3509	u32 scontrol, serror;
3510	int rc;
3511
3512	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3513		return rc;
3514
3515	/*
3516	 * Writes to SControl sometimes get ignored under certain
3517	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3518	 * cleared.
3519	 */
3520	do {
3521		scontrol = (scontrol & 0x0f0) | 0x300;
3522		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3523			return rc;
3524		/*
3525		 * Some PHYs react badly if SStatus is pounded
3526		 * immediately after resuming.  Delay 200ms before
3527		 * debouncing.
3528		 */
3529		ata_msleep(link->ap, 200);
3530
3531		/* is SControl restored correctly? */
3532		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3533			return rc;
3534	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3535
3536	if ((scontrol & 0xf0f) != 0x300) {
3537		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3538			     scontrol);
3539		return 0;
3540	}
3541
3542	if (tries < ATA_LINK_RESUME_TRIES)
3543		ata_link_warn(link, "link resume succeeded after %d retries\n",
3544			      ATA_LINK_RESUME_TRIES - tries);
3545
3546	if ((rc = sata_link_debounce(link, params, deadline)))
3547		return rc;
3548
3549	/* clear SError, some PHYs require this even for SRST to work */
3550	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3551		rc = sata_scr_write(link, SCR_ERROR, serror);
3552
3553	return rc != -EINVAL ? rc : 0;
3554}
3555
3556/**
3557 *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3558 *	@link: ATA link to manipulate SControl for
3559 *	@policy: LPM policy to configure
3560 *	@spm_wakeup: initiate LPM transition to active state
3561 *
3562 *	Manipulate the IPM field of the SControl register of @link
3563 *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3564 *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3565 *	the link.  This function also clears PHYRDY_CHG before
3566 *	returning.
3567 *
3568 *	LOCKING:
3569 *	EH context.
3570 *
3571 *	RETURNS:
3572 *	0 on succes, -errno otherwise.
3573 */
3574int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3575		      bool spm_wakeup)
3576{
3577	struct ata_eh_context *ehc = &link->eh_context;
3578	bool woken_up = false;
3579	u32 scontrol;
3580	int rc;
3581
3582	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3583	if (rc)
3584		return rc;
3585
3586	switch (policy) {
3587	case ATA_LPM_MAX_POWER:
3588		/* disable all LPM transitions */
3589		scontrol |= (0x3 << 8);
3590		/* initiate transition to active state */
3591		if (spm_wakeup) {
3592			scontrol |= (0x4 << 12);
3593			woken_up = true;
3594		}
3595		break;
3596	case ATA_LPM_MED_POWER:
3597		/* allow LPM to PARTIAL */
3598		scontrol &= ~(0x1 << 8);
3599		scontrol |= (0x2 << 8);
3600		break;
3601	case ATA_LPM_MIN_POWER:
3602		if (ata_link_nr_enabled(link) > 0)
3603			/* no restrictions on LPM transitions */
3604			scontrol &= ~(0x3 << 8);
3605		else {
3606			/* empty port, power off */
3607			scontrol &= ~0xf;
3608			scontrol |= (0x1 << 2);
3609		}
3610		break;
3611	default:
3612		WARN_ON(1);
3613	}
3614
3615	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3616	if (rc)
3617		return rc;
3618
3619	/* give the link time to transit out of LPM state */
3620	if (woken_up)
3621		msleep(10);
3622
3623	/* clear PHYRDY_CHG from SError */
3624	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3625	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3626}
3627
3628/**
3629 *	ata_std_prereset - prepare for reset
3630 *	@link: ATA link to be reset
3631 *	@deadline: deadline jiffies for the operation
3632 *
3633 *	@link is about to be reset.  Initialize it.  Failure from
3634 *	prereset makes libata abort whole reset sequence and give up
3635 *	that port, so prereset should be best-effort.  It does its
3636 *	best to prepare for reset sequence but if things go wrong, it
3637 *	should just whine, not fail.
3638 *
3639 *	LOCKING:
3640 *	Kernel thread context (may sleep)
3641 *
3642 *	RETURNS:
3643 *	0 on success, -errno otherwise.
3644 */
3645int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3646{
3647	struct ata_port *ap = link->ap;
3648	struct ata_eh_context *ehc = &link->eh_context;
3649	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3650	int rc;
3651
3652	/* if we're about to do hardreset, nothing more to do */
3653	if (ehc->i.action & ATA_EH_HARDRESET)
3654		return 0;
3655
3656	/* if SATA, resume link */
3657	if (ap->flags & ATA_FLAG_SATA) {
3658		rc = sata_link_resume(link, timing, deadline);
3659		/* whine about phy resume failure but proceed */
3660		if (rc && rc != -EOPNOTSUPP)
3661			ata_link_warn(link,
3662				      "failed to resume link for reset (errno=%d)\n",
3663				      rc);
3664	}
3665
3666	/* no point in trying softreset on offline link */
3667	if (ata_phys_link_offline(link))
3668		ehc->i.action &= ~ATA_EH_SOFTRESET;
3669
3670	return 0;
3671}
3672
3673/**
3674 *	sata_link_hardreset - reset link via SATA phy reset
3675 *	@link: link to reset
3676 *	@timing: timing parameters { interval, duratinon, timeout } in msec
3677 *	@deadline: deadline jiffies for the operation
3678 *	@online: optional out parameter indicating link onlineness
3679 *	@check_ready: optional callback to check link readiness
3680 *
3681 *	SATA phy-reset @link using DET bits of SControl register.
3682 *	After hardreset, link readiness is waited upon using
3683 *	ata_wait_ready() if @check_ready is specified.  LLDs are
3684 *	allowed to not specify @check_ready and wait itself after this
3685 *	function returns.  Device classification is LLD's
3686 *	responsibility.
3687 *
3688 *	*@online is set to one iff reset succeeded and @link is online
3689 *	after reset.
3690 *
3691 *	LOCKING:
3692 *	Kernel thread context (may sleep)
3693 *
3694 *	RETURNS:
3695 *	0 on success, -errno otherwise.
3696 */
3697int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3698			unsigned long deadline,
3699			bool *online, int (*check_ready)(struct ata_link *))
3700{
3701	u32 scontrol;
3702	int rc;
3703
3704	DPRINTK("ENTER\n");
3705
3706	if (online)
3707		*online = false;
3708
3709	if (sata_set_spd_needed(link)) {
3710		/* SATA spec says nothing about how to reconfigure
3711		 * spd.  To be on the safe side, turn off phy during
3712		 * reconfiguration.  This works for at least ICH7 AHCI
3713		 * and Sil3124.
3714		 */
3715		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3716			goto out;
3717
3718		scontrol = (scontrol & 0x0f0) | 0x304;
3719
3720		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3721			goto out;
3722
3723		sata_set_spd(link);
3724	}
3725
3726	/* issue phy wake/reset */
3727	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3728		goto out;
3729
3730	scontrol = (scontrol & 0x0f0) | 0x301;
3731
3732	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3733		goto out;
3734
3735	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3736	 * 10.4.2 says at least 1 ms.
3737	 */
3738	ata_msleep(link->ap, 1);
3739
3740	/* bring link back */
3741	rc = sata_link_resume(link, timing, deadline);
3742	if (rc)
3743		goto out;
3744	/* if link is offline nothing more to do */
3745	if (ata_phys_link_offline(link))
3746		goto out;
3747
3748	/* Link is online.  From this point, -ENODEV too is an error. */
3749	if (online)
3750		*online = true;
3751
3752	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3753		/* If PMP is supported, we have to do follow-up SRST.
3754		 * Some PMPs don't send D2H Reg FIS after hardreset if
3755		 * the first port is empty.  Wait only for
3756		 * ATA_TMOUT_PMP_SRST_WAIT.
3757		 */
3758		if (check_ready) {
3759			unsigned long pmp_deadline;
3760
3761			pmp_deadline = ata_deadline(jiffies,
3762						    ATA_TMOUT_PMP_SRST_WAIT);
3763			if (time_after(pmp_deadline, deadline))
3764				pmp_deadline = deadline;
3765			ata_wait_ready(link, pmp_deadline, check_ready);
3766		}
3767		rc = -EAGAIN;
3768		goto out;
3769	}
3770
3771	rc = 0;
3772	if (check_ready)
3773		rc = ata_wait_ready(link, deadline, check_ready);
3774 out:
3775	if (rc && rc != -EAGAIN) {
3776		/* online is set iff link is online && reset succeeded */
3777		if (online)
3778			*online = false;
3779		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3780	}
3781	DPRINTK("EXIT, rc=%d\n", rc);
3782	return rc;
3783}
3784
3785/**
3786 *	sata_std_hardreset - COMRESET w/o waiting or classification
3787 *	@link: link to reset
3788 *	@class: resulting class of attached device
3789 *	@deadline: deadline jiffies for the operation
3790 *
3791 *	Standard SATA COMRESET w/o waiting or classification.
3792 *
3793 *	LOCKING:
3794 *	Kernel thread context (may sleep)
3795 *
3796 *	RETURNS:
3797 *	0 if link offline, -EAGAIN if link online, -errno on errors.
3798 */
3799int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3800		       unsigned long deadline)
3801{
3802	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3803	bool online;
3804	int rc;
3805
3806	/* do hardreset */
3807	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3808	return online ? -EAGAIN : rc;
3809}
 
3810
3811/**
3812 *	ata_std_postreset - standard postreset callback
3813 *	@link: the target ata_link
3814 *	@classes: classes of attached devices
3815 *
3816 *	This function is invoked after a successful reset.  Note that
3817 *	the device might have been reset more than once using
3818 *	different reset methods before postreset is invoked.
3819 *
3820 *	LOCKING:
3821 *	Kernel thread context (may sleep)
3822 */
3823void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3824{
3825	u32 serror;
3826
3827	DPRINTK("ENTER\n");
3828
3829	/* reset complete, clear SError */
3830	if (!sata_scr_read(link, SCR_ERROR, &serror))
3831		sata_scr_write(link, SCR_ERROR, serror);
3832
3833	/* print link status */
3834	sata_print_link_status(link);
3835
3836	DPRINTK("EXIT\n");
3837}
 
3838
3839/**
3840 *	ata_dev_same_device - Determine whether new ID matches configured device
3841 *	@dev: device to compare against
3842 *	@new_class: class of the new device
3843 *	@new_id: IDENTIFY page of the new device
3844 *
3845 *	Compare @new_class and @new_id against @dev and determine
3846 *	whether @dev is the device indicated by @new_class and
3847 *	@new_id.
3848 *
3849 *	LOCKING:
3850 *	None.
3851 *
3852 *	RETURNS:
3853 *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3854 */
3855static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3856			       const u16 *new_id)
3857{
3858	const u16 *old_id = dev->id;
3859	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3860	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3861
3862	if (dev->class != new_class) {
3863		ata_dev_info(dev, "class mismatch %d != %d\n",
3864			     dev->class, new_class);
3865		return 0;
3866	}
3867
3868	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3869	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3870	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3871	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3872
3873	if (strcmp(model[0], model[1])) {
3874		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3875			     model[0], model[1]);
3876		return 0;
3877	}
3878
3879	if (strcmp(serial[0], serial[1])) {
3880		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3881			     serial[0], serial[1]);
3882		return 0;
3883	}
3884
3885	return 1;
3886}
3887
3888/**
3889 *	ata_dev_reread_id - Re-read IDENTIFY data
3890 *	@dev: target ATA device
3891 *	@readid_flags: read ID flags
3892 *
3893 *	Re-read IDENTIFY page and make sure @dev is still attached to
3894 *	the port.
3895 *
3896 *	LOCKING:
3897 *	Kernel thread context (may sleep)
3898 *
3899 *	RETURNS:
3900 *	0 on success, negative errno otherwise
3901 */
3902int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3903{
3904	unsigned int class = dev->class;
3905	u16 *id = (void *)dev->link->ap->sector_buf;
3906	int rc;
3907
3908	/* read ID data */
3909	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3910	if (rc)
3911		return rc;
3912
3913	/* is the device still there? */
3914	if (!ata_dev_same_device(dev, class, id))
3915		return -ENODEV;
3916
3917	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3918	return 0;
3919}
3920
3921/**
3922 *	ata_dev_revalidate - Revalidate ATA device
3923 *	@dev: device to revalidate
3924 *	@new_class: new class code
3925 *	@readid_flags: read ID flags
3926 *
3927 *	Re-read IDENTIFY page, make sure @dev is still attached to the
3928 *	port and reconfigure it according to the new IDENTIFY page.
3929 *
3930 *	LOCKING:
3931 *	Kernel thread context (may sleep)
3932 *
3933 *	RETURNS:
3934 *	0 on success, negative errno otherwise
3935 */
3936int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3937		       unsigned int readid_flags)
3938{
3939	u64 n_sectors = dev->n_sectors;
3940	u64 n_native_sectors = dev->n_native_sectors;
3941	int rc;
3942
3943	if (!ata_dev_enabled(dev))
3944		return -ENODEV;
3945
3946	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3947	if (ata_class_enabled(new_class) &&
3948	    new_class != ATA_DEV_ATA &&
3949	    new_class != ATA_DEV_ATAPI &&
3950	    new_class != ATA_DEV_SEMB) {
3951		ata_dev_info(dev, "class mismatch %u != %u\n",
3952			     dev->class, new_class);
3953		rc = -ENODEV;
3954		goto fail;
3955	}
3956
3957	/* re-read ID */
3958	rc = ata_dev_reread_id(dev, readid_flags);
3959	if (rc)
3960		goto fail;
3961
3962	/* configure device according to the new ID */
3963	rc = ata_dev_configure(dev);
3964	if (rc)
3965		goto fail;
3966
3967	/* verify n_sectors hasn't changed */
3968	if (dev->class != ATA_DEV_ATA || !n_sectors ||
3969	    dev->n_sectors == n_sectors)
3970		return 0;
3971
3972	/* n_sectors has changed */
3973	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3974		     (unsigned long long)n_sectors,
3975		     (unsigned long long)dev->n_sectors);
3976
3977	/*
3978	 * Something could have caused HPA to be unlocked
3979	 * involuntarily.  If n_native_sectors hasn't changed and the
3980	 * new size matches it, keep the device.
3981	 */
3982	if (dev->n_native_sectors == n_native_sectors &&
3983	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3984		ata_dev_warn(dev,
3985			     "new n_sectors matches native, probably "
3986			     "late HPA unlock, n_sectors updated\n");
3987		/* use the larger n_sectors */
3988		return 0;
3989	}
3990
3991	/*
3992	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
3993	 * unlocking HPA in those cases.
3994	 *
3995	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3996	 */
3997	if (dev->n_native_sectors == n_native_sectors &&
3998	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3999	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4000		ata_dev_warn(dev,
4001			     "old n_sectors matches native, probably "
4002			     "late HPA lock, will try to unlock HPA\n");
4003		/* try unlocking HPA */
4004		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4005		rc = -EIO;
4006	} else
4007		rc = -ENODEV;
4008
4009	/* restore original n_[native_]sectors and fail */
4010	dev->n_native_sectors = n_native_sectors;
4011	dev->n_sectors = n_sectors;
4012 fail:
4013	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4014	return rc;
4015}
4016
4017struct ata_blacklist_entry {
4018	const char *model_num;
4019	const char *model_rev;
4020	unsigned long horkage;
4021};
4022
4023static const struct ata_blacklist_entry ata_device_blacklist [] = {
4024	/* Devices with DMA related problems under Linux */
4025	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4026	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4027	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4028	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4029	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4030	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4031	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4032	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4033	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4034	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4035	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4036	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4037	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4038	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4039	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4040	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4041	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4042	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4043	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4044	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4045	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4046	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4047	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4048	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4049	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4050	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4051	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4052	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
 
 
4053	/* Odd clown on sil3726/4726 PMPs */
4054	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
 
 
4055
4056	/* Weird ATAPI devices */
4057	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4058	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4059
4060	/* Devices we expect to fail diagnostics */
4061
4062	/* Devices where NCQ should be avoided */
4063	/* NCQ is slow */
4064	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4065	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4066	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4067	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4068	/* NCQ is broken */
4069	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4070	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4071	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4072	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4073	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4074
4075	/* Seagate NCQ + FLUSH CACHE firmware bug */
4076	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4077						ATA_HORKAGE_FIRMWARE_WARN },
4078
4079	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4080						ATA_HORKAGE_FIRMWARE_WARN },
4081
4082	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4083						ATA_HORKAGE_FIRMWARE_WARN },
4084
4085	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4086						ATA_HORKAGE_FIRMWARE_WARN },
4087
 
 
 
 
 
 
4088	/* Blacklist entries taken from Silicon Image 3124/3132
4089	   Windows driver .inf file - also several Linux problem reports */
4090	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4091	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4092	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4093
4094	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4095	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
 
 
 
4096
4097	/* devices which puke on READ_NATIVE_MAX */
4098	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4099	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4100	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4101	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4102
4103	/* this one allows HPA unlocking but fails IOs on the area */
4104	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4105
4106	/* Devices which report 1 sector over size HPA */
4107	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4108	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4109	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4110
4111	/* Devices which get the IVB wrong */
4112	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4113	/* Maybe we should just blacklist TSSTcorp... */
4114	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4115
4116	/* Devices that do not need bridging limits applied */
4117	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
 
4118
4119	/* Devices which aren't very happy with higher link speeds */
4120	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
 
4121
4122	/*
4123	 * Devices which choke on SETXFER.  Applies only if both the
4124	 * device and controller are SATA.
4125	 */
4126	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
 
 
4127	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4128	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4130	/* End Marker */
4131	{ }
4132};
4133
4134/**
4135 *	glob_match - match a text string against a glob-style pattern
4136 *	@text: the string to be examined
4137 *	@pattern: the glob-style pattern to be matched against
4138 *
4139 *	Either/both of text and pattern can be empty strings.
4140 *
4141 *	Match text against a glob-style pattern, with wildcards and simple sets:
4142 *
4143 *		?	matches any single character.
4144 *		*	matches any run of characters.
4145 *		[xyz]	matches a single character from the set: x, y, or z.
4146 *		[a-d]	matches a single character from the range: a, b, c, or d.
4147 *		[a-d0-9] matches a single character from either range.
4148 *
4149 *	The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4150 *	Behaviour with malformed patterns is undefined, though generally reasonable.
4151 *
4152 *	Sample patterns:  "SD1?",  "SD1[0-5]",  "*R0",  "SD*1?[012]*xx"
4153 *
4154 *	This function uses one level of recursion per '*' in pattern.
4155 *	Since it calls _nothing_ else, and has _no_ explicit local variables,
4156 *	this will not cause stack problems for any reasonable use here.
4157 *
4158 *	RETURNS:
4159 *	0 on match, 1 otherwise.
4160 */
4161static int glob_match (const char *text, const char *pattern)
4162{
4163	do {
4164		/* Match single character or a '?' wildcard */
4165		if (*text == *pattern || *pattern == '?') {
4166			if (!*pattern++)
4167				return 0;  /* End of both strings: match */
4168		} else {
4169			/* Match single char against a '[' bracketed ']' pattern set */
4170			if (!*text || *pattern != '[')
4171				break;  /* Not a pattern set */
4172			while (*++pattern && *pattern != ']' && *text != *pattern) {
4173				if (*pattern == '-' && *(pattern - 1) != '[')
4174					if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4175						++pattern;
4176						break;
4177					}
4178			}
4179			if (!*pattern || *pattern == ']')
4180				return 1;  /* No match */
4181			while (*pattern && *pattern++ != ']');
4182		}
4183	} while (*++text && *pattern);
4184
4185	/* Match any run of chars against a '*' wildcard */
4186	if (*pattern == '*') {
4187		if (!*++pattern)
4188			return 0;  /* Match: avoid recursion at end of pattern */
4189		/* Loop to handle additional pattern chars after the wildcard */
4190		while (*text) {
4191			if (glob_match(text, pattern) == 0)
4192				return 0;  /* Remainder matched */
4193			++text;  /* Absorb (match) this char and try again */
4194		}
4195	}
4196	if (!*text && !*pattern)
4197		return 0;  /* End of both strings: match */
4198	return 1;  /* No match */
4199}
4200
4201static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4202{
4203	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4204	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4205	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4206
4207	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4208	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4209
4210	while (ad->model_num) {
4211		if (!glob_match(model_num, ad->model_num)) {
4212			if (ad->model_rev == NULL)
4213				return ad->horkage;
4214			if (!glob_match(model_rev, ad->model_rev))
4215				return ad->horkage;
4216		}
4217		ad++;
4218	}
4219	return 0;
4220}
4221
4222static int ata_dma_blacklisted(const struct ata_device *dev)
4223{
4224	/* We don't support polling DMA.
4225	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4226	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4227	 */
4228	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4229	    (dev->flags & ATA_DFLAG_CDB_INTR))
4230		return 1;
4231	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4232}
4233
4234/**
4235 *	ata_is_40wire		-	check drive side detection
4236 *	@dev: device
4237 *
4238 *	Perform drive side detection decoding, allowing for device vendors
4239 *	who can't follow the documentation.
4240 */
4241
4242static int ata_is_40wire(struct ata_device *dev)
4243{
4244	if (dev->horkage & ATA_HORKAGE_IVB)
4245		return ata_drive_40wire_relaxed(dev->id);
4246	return ata_drive_40wire(dev->id);
4247}
4248
4249/**
4250 *	cable_is_40wire		-	40/80/SATA decider
4251 *	@ap: port to consider
4252 *
4253 *	This function encapsulates the policy for speed management
4254 *	in one place. At the moment we don't cache the result but
4255 *	there is a good case for setting ap->cbl to the result when
4256 *	we are called with unknown cables (and figuring out if it
4257 *	impacts hotplug at all).
4258 *
4259 *	Return 1 if the cable appears to be 40 wire.
4260 */
4261
4262static int cable_is_40wire(struct ata_port *ap)
4263{
4264	struct ata_link *link;
4265	struct ata_device *dev;
4266
4267	/* If the controller thinks we are 40 wire, we are. */
4268	if (ap->cbl == ATA_CBL_PATA40)
4269		return 1;
4270
4271	/* If the controller thinks we are 80 wire, we are. */
4272	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4273		return 0;
4274
4275	/* If the system is known to be 40 wire short cable (eg
4276	 * laptop), then we allow 80 wire modes even if the drive
4277	 * isn't sure.
4278	 */
4279	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4280		return 0;
4281
4282	/* If the controller doesn't know, we scan.
4283	 *
4284	 * Note: We look for all 40 wire detects at this point.  Any
4285	 *       80 wire detect is taken to be 80 wire cable because
4286	 * - in many setups only the one drive (slave if present) will
4287	 *   give a valid detect
4288	 * - if you have a non detect capable drive you don't want it
4289	 *   to colour the choice
4290	 */
4291	ata_for_each_link(link, ap, EDGE) {
4292		ata_for_each_dev(dev, link, ENABLED) {
4293			if (!ata_is_40wire(dev))
4294				return 0;
4295		}
4296	}
4297	return 1;
4298}
4299
4300/**
4301 *	ata_dev_xfermask - Compute supported xfermask of the given device
4302 *	@dev: Device to compute xfermask for
4303 *
4304 *	Compute supported xfermask of @dev and store it in
4305 *	dev->*_mask.  This function is responsible for applying all
4306 *	known limits including host controller limits, device
4307 *	blacklist, etc...
4308 *
4309 *	LOCKING:
4310 *	None.
4311 */
4312static void ata_dev_xfermask(struct ata_device *dev)
4313{
4314	struct ata_link *link = dev->link;
4315	struct ata_port *ap = link->ap;
4316	struct ata_host *host = ap->host;
4317	unsigned long xfer_mask;
4318
4319	/* controller modes available */
4320	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4321				      ap->mwdma_mask, ap->udma_mask);
4322
4323	/* drive modes available */
4324	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4325				       dev->mwdma_mask, dev->udma_mask);
4326	xfer_mask &= ata_id_xfermask(dev->id);
4327
4328	/*
4329	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4330	 *	cable
4331	 */
4332	if (ata_dev_pair(dev)) {
4333		/* No PIO5 or PIO6 */
4334		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4335		/* No MWDMA3 or MWDMA 4 */
4336		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4337	}
4338
4339	if (ata_dma_blacklisted(dev)) {
4340		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4341		ata_dev_warn(dev,
4342			     "device is on DMA blacklist, disabling DMA\n");
4343	}
4344
4345	if ((host->flags & ATA_HOST_SIMPLEX) &&
4346	    host->simplex_claimed && host->simplex_claimed != ap) {
4347		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4348		ata_dev_warn(dev,
4349			     "simplex DMA is claimed by other device, disabling DMA\n");
4350	}
4351
4352	if (ap->flags & ATA_FLAG_NO_IORDY)
4353		xfer_mask &= ata_pio_mask_no_iordy(dev);
4354
4355	if (ap->ops->mode_filter)
4356		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4357
4358	/* Apply cable rule here.  Don't apply it early because when
4359	 * we handle hot plug the cable type can itself change.
4360	 * Check this last so that we know if the transfer rate was
4361	 * solely limited by the cable.
4362	 * Unknown or 80 wire cables reported host side are checked
4363	 * drive side as well. Cases where we know a 40wire cable
4364	 * is used safely for 80 are not checked here.
4365	 */
4366	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4367		/* UDMA/44 or higher would be available */
4368		if (cable_is_40wire(ap)) {
4369			ata_dev_warn(dev,
4370				     "limited to UDMA/33 due to 40-wire cable\n");
4371			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4372		}
4373
4374	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4375			    &dev->mwdma_mask, &dev->udma_mask);
4376}
4377
4378/**
4379 *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4380 *	@dev: Device to which command will be sent
4381 *
4382 *	Issue SET FEATURES - XFER MODE command to device @dev
4383 *	on port @ap.
4384 *
4385 *	LOCKING:
4386 *	PCI/etc. bus probe sem.
4387 *
4388 *	RETURNS:
4389 *	0 on success, AC_ERR_* mask otherwise.
4390 */
4391
4392static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4393{
4394	struct ata_taskfile tf;
4395	unsigned int err_mask;
4396
4397	/* set up set-features taskfile */
4398	DPRINTK("set features - xfer mode\n");
4399
4400	/* Some controllers and ATAPI devices show flaky interrupt
4401	 * behavior after setting xfer mode.  Use polling instead.
4402	 */
4403	ata_tf_init(dev, &tf);
4404	tf.command = ATA_CMD_SET_FEATURES;
4405	tf.feature = SETFEATURES_XFER;
4406	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4407	tf.protocol = ATA_PROT_NODATA;
4408	/* If we are using IORDY we must send the mode setting command */
4409	if (ata_pio_need_iordy(dev))
4410		tf.nsect = dev->xfer_mode;
4411	/* If the device has IORDY and the controller does not - turn it off */
4412 	else if (ata_id_has_iordy(dev->id))
4413		tf.nsect = 0x01;
4414	else /* In the ancient relic department - skip all of this */
4415		return 0;
4416
4417	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4418
4419	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4420	return err_mask;
 
4421}
4422
4423/**
4424 *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4425 *	@dev: Device to which command will be sent
4426 *	@enable: Whether to enable or disable the feature
4427 *	@feature: The sector count represents the feature to set
4428 *
4429 *	Issue SET FEATURES - SATA FEATURES command to device @dev
4430 *	on port @ap with sector count
4431 *
4432 *	LOCKING:
4433 *	PCI/etc. bus probe sem.
4434 *
4435 *	RETURNS:
4436 *	0 on success, AC_ERR_* mask otherwise.
4437 */
4438unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4439{
4440	struct ata_taskfile tf;
4441	unsigned int err_mask;
4442
4443	/* set up set-features taskfile */
4444	DPRINTK("set features - SATA features\n");
4445
4446	ata_tf_init(dev, &tf);
4447	tf.command = ATA_CMD_SET_FEATURES;
4448	tf.feature = enable;
4449	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4450	tf.protocol = ATA_PROT_NODATA;
4451	tf.nsect = feature;
4452
4453	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
 
 
4454
4455	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4456	return err_mask;
4457}
 
4458
4459/**
4460 *	ata_dev_init_params - Issue INIT DEV PARAMS command
4461 *	@dev: Device to which command will be sent
4462 *	@heads: Number of heads (taskfile parameter)
4463 *	@sectors: Number of sectors (taskfile parameter)
4464 *
4465 *	LOCKING:
4466 *	Kernel thread context (may sleep)
4467 *
4468 *	RETURNS:
4469 *	0 on success, AC_ERR_* mask otherwise.
4470 */
4471static unsigned int ata_dev_init_params(struct ata_device *dev,
4472					u16 heads, u16 sectors)
4473{
4474	struct ata_taskfile tf;
4475	unsigned int err_mask;
4476
4477	/* Number of sectors per track 1-255. Number of heads 1-16 */
4478	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4479		return AC_ERR_INVALID;
4480
4481	/* set up init dev params taskfile */
4482	DPRINTK("init dev params \n");
4483
4484	ata_tf_init(dev, &tf);
4485	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4486	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4487	tf.protocol = ATA_PROT_NODATA;
4488	tf.nsect = sectors;
4489	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4490
4491	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4492	/* A clean abort indicates an original or just out of spec drive
4493	   and we should continue as we issue the setup based on the
4494	   drive reported working geometry */
4495	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4496		err_mask = 0;
4497
4498	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4499	return err_mask;
4500}
4501
4502/**
4503 *	ata_sg_clean - Unmap DMA memory associated with command
4504 *	@qc: Command containing DMA memory to be released
4505 *
4506 *	Unmap all mapped DMA memory associated with this command.
4507 *
4508 *	LOCKING:
4509 *	spin_lock_irqsave(host lock)
4510 */
4511void ata_sg_clean(struct ata_queued_cmd *qc)
4512{
4513	struct ata_port *ap = qc->ap;
4514	struct scatterlist *sg = qc->sg;
4515	int dir = qc->dma_dir;
4516
4517	WARN_ON_ONCE(sg == NULL);
4518
4519	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4520
4521	if (qc->n_elem)
4522		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4523
4524	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4525	qc->sg = NULL;
4526}
4527
4528/**
4529 *	atapi_check_dma - Check whether ATAPI DMA can be supported
4530 *	@qc: Metadata associated with taskfile to check
4531 *
4532 *	Allow low-level driver to filter ATA PACKET commands, returning
4533 *	a status indicating whether or not it is OK to use DMA for the
4534 *	supplied PACKET command.
4535 *
4536 *	LOCKING:
4537 *	spin_lock_irqsave(host lock)
4538 *
4539 *	RETURNS: 0 when ATAPI DMA can be used
4540 *               nonzero otherwise
4541 */
4542int atapi_check_dma(struct ata_queued_cmd *qc)
4543{
4544	struct ata_port *ap = qc->ap;
4545
4546	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4547	 * few ATAPI devices choke on such DMA requests.
4548	 */
4549	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4550	    unlikely(qc->nbytes & 15))
4551		return 1;
4552
4553	if (ap->ops->check_atapi_dma)
4554		return ap->ops->check_atapi_dma(qc);
4555
4556	return 0;
4557}
4558
4559/**
4560 *	ata_std_qc_defer - Check whether a qc needs to be deferred
4561 *	@qc: ATA command in question
4562 *
4563 *	Non-NCQ commands cannot run with any other command, NCQ or
4564 *	not.  As upper layer only knows the queue depth, we are
4565 *	responsible for maintaining exclusion.  This function checks
4566 *	whether a new command @qc can be issued.
4567 *
4568 *	LOCKING:
4569 *	spin_lock_irqsave(host lock)
4570 *
4571 *	RETURNS:
4572 *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4573 */
4574int ata_std_qc_defer(struct ata_queued_cmd *qc)
4575{
4576	struct ata_link *link = qc->dev->link;
4577
4578	if (qc->tf.protocol == ATA_PROT_NCQ) {
4579		if (!ata_tag_valid(link->active_tag))
4580			return 0;
4581	} else {
4582		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4583			return 0;
4584	}
4585
4586	return ATA_DEFER_LINK;
4587}
 
4588
4589void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
 
 
 
 
4590
4591/**
4592 *	ata_sg_init - Associate command with scatter-gather table.
4593 *	@qc: Command to be associated
4594 *	@sg: Scatter-gather table.
4595 *	@n_elem: Number of elements in s/g table.
4596 *
4597 *	Initialize the data-related elements of queued_cmd @qc
4598 *	to point to a scatter-gather table @sg, containing @n_elem
4599 *	elements.
4600 *
4601 *	LOCKING:
4602 *	spin_lock_irqsave(host lock)
4603 */
4604void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4605		 unsigned int n_elem)
4606{
4607	qc->sg = sg;
4608	qc->n_elem = n_elem;
4609	qc->cursg = qc->sg;
4610}
4611
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4612/**
4613 *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4614 *	@qc: Command with scatter-gather table to be mapped.
4615 *
4616 *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4617 *
4618 *	LOCKING:
4619 *	spin_lock_irqsave(host lock)
4620 *
4621 *	RETURNS:
4622 *	Zero on success, negative on error.
4623 *
4624 */
4625static int ata_sg_setup(struct ata_queued_cmd *qc)
4626{
4627	struct ata_port *ap = qc->ap;
4628	unsigned int n_elem;
4629
4630	VPRINTK("ENTER, ata%u\n", ap->print_id);
4631
4632	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4633	if (n_elem < 1)
4634		return -1;
4635
4636	DPRINTK("%d sg elements mapped\n", n_elem);
4637	qc->orig_n_elem = qc->n_elem;
4638	qc->n_elem = n_elem;
4639	qc->flags |= ATA_QCFLAG_DMAMAP;
4640
4641	return 0;
4642}
4643
 
 
 
 
 
 
 
4644/**
4645 *	swap_buf_le16 - swap halves of 16-bit words in place
4646 *	@buf:  Buffer to swap
4647 *	@buf_words:  Number of 16-bit words in buffer.
4648 *
4649 *	Swap halves of 16-bit words if needed to convert from
4650 *	little-endian byte order to native cpu byte order, or
4651 *	vice-versa.
4652 *
4653 *	LOCKING:
4654 *	Inherited from caller.
4655 */
4656void swap_buf_le16(u16 *buf, unsigned int buf_words)
4657{
4658#ifdef __BIG_ENDIAN
4659	unsigned int i;
4660
4661	for (i = 0; i < buf_words; i++)
4662		buf[i] = le16_to_cpu(buf[i]);
4663#endif /* __BIG_ENDIAN */
4664}
4665
4666/**
4667 *	ata_qc_new - Request an available ATA command, for queueing
4668 *	@ap: target port
4669 *
4670 *	LOCKING:
4671 *	None.
4672 */
4673
4674static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4675{
4676	struct ata_queued_cmd *qc = NULL;
4677	unsigned int i;
4678
4679	/* no command while frozen */
4680	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4681		return NULL;
4682
4683	/* the last tag is reserved for internal command. */
4684	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4685		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4686			qc = __ata_qc_from_tag(ap, i);
4687			break;
4688		}
4689
4690	if (qc)
4691		qc->tag = i;
4692
4693	return qc;
4694}
4695
4696/**
4697 *	ata_qc_new_init - Request an available ATA command, and initialize it
4698 *	@dev: Device from whom we request an available command structure
4699 *
4700 *	LOCKING:
4701 *	None.
4702 */
4703
4704struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4705{
4706	struct ata_port *ap = dev->link->ap;
4707	struct ata_queued_cmd *qc;
4708
4709	qc = ata_qc_new(ap);
4710	if (qc) {
4711		qc->scsicmd = NULL;
4712		qc->ap = ap;
4713		qc->dev = dev;
4714
4715		ata_qc_reinit(qc);
4716	}
4717
4718	return qc;
4719}
4720
4721/**
4722 *	ata_qc_free - free unused ata_queued_cmd
4723 *	@qc: Command to complete
4724 *
4725 *	Designed to free unused ata_queued_cmd object
4726 *	in case something prevents using it.
4727 *
4728 *	LOCKING:
4729 *	spin_lock_irqsave(host lock)
4730 */
4731void ata_qc_free(struct ata_queued_cmd *qc)
4732{
4733	struct ata_port *ap;
4734	unsigned int tag;
4735
4736	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4737	ap = qc->ap;
4738
4739	qc->flags = 0;
4740	tag = qc->tag;
4741	if (likely(ata_tag_valid(tag))) {
4742		qc->tag = ATA_TAG_POISON;
4743		clear_bit(tag, &ap->qc_allocated);
4744	}
4745}
4746
4747void __ata_qc_complete(struct ata_queued_cmd *qc)
4748{
4749	struct ata_port *ap;
4750	struct ata_link *link;
4751
4752	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4753	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4754	ap = qc->ap;
4755	link = qc->dev->link;
4756
4757	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4758		ata_sg_clean(qc);
4759
4760	/* command should be marked inactive atomically with qc completion */
4761	if (qc->tf.protocol == ATA_PROT_NCQ) {
4762		link->sactive &= ~(1 << qc->tag);
4763		if (!link->sactive)
4764			ap->nr_active_links--;
4765	} else {
4766		link->active_tag = ATA_TAG_POISON;
4767		ap->nr_active_links--;
4768	}
4769
4770	/* clear exclusive status */
4771	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4772		     ap->excl_link == link))
4773		ap->excl_link = NULL;
4774
4775	/* atapi: mark qc as inactive to prevent the interrupt handler
4776	 * from completing the command twice later, before the error handler
4777	 * is called. (when rc != 0 and atapi request sense is needed)
4778	 */
4779	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4780	ap->qc_active &= ~(1 << qc->tag);
4781
4782	/* call completion callback */
4783	qc->complete_fn(qc);
4784}
4785
4786static void fill_result_tf(struct ata_queued_cmd *qc)
4787{
4788	struct ata_port *ap = qc->ap;
4789
4790	qc->result_tf.flags = qc->tf.flags;
4791	ap->ops->qc_fill_rtf(qc);
4792}
4793
4794static void ata_verify_xfer(struct ata_queued_cmd *qc)
4795{
4796	struct ata_device *dev = qc->dev;
4797
4798	if (ata_is_nodata(qc->tf.protocol))
4799		return;
4800
4801	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4802		return;
4803
4804	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4805}
4806
4807/**
4808 *	ata_qc_complete - Complete an active ATA command
4809 *	@qc: Command to complete
4810 *
4811 *	Indicate to the mid and upper layers that an ATA command has
4812 *	completed, with either an ok or not-ok status.
4813 *
4814 *	Refrain from calling this function multiple times when
4815 *	successfully completing multiple NCQ commands.
4816 *	ata_qc_complete_multiple() should be used instead, which will
4817 *	properly update IRQ expect state.
4818 *
4819 *	LOCKING:
4820 *	spin_lock_irqsave(host lock)
4821 */
4822void ata_qc_complete(struct ata_queued_cmd *qc)
4823{
4824	struct ata_port *ap = qc->ap;
 
 
4825
4826	/* XXX: New EH and old EH use different mechanisms to
4827	 * synchronize EH with regular execution path.
4828	 *
4829	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4830	 * Normal execution path is responsible for not accessing a
4831	 * failed qc.  libata core enforces the rule by returning NULL
4832	 * from ata_qc_from_tag() for failed qcs.
4833	 *
4834	 * Old EH depends on ata_qc_complete() nullifying completion
4835	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4836	 * not synchronize with interrupt handler.  Only PIO task is
4837	 * taken care of.
4838	 */
4839	if (ap->ops->error_handler) {
4840		struct ata_device *dev = qc->dev;
4841		struct ata_eh_info *ehi = &dev->link->eh_info;
4842
4843		if (unlikely(qc->err_mask))
4844			qc->flags |= ATA_QCFLAG_FAILED;
 
 
 
 
 
 
 
 
4845
4846		/*
4847		 * Finish internal commands without any further processing
4848		 * and always with the result TF filled.
4849		 */
4850		if (unlikely(ata_tag_internal(qc->tag))) {
4851			fill_result_tf(qc);
4852			__ata_qc_complete(qc);
4853			return;
4854		}
4855
4856		/*
4857		 * Non-internal qc has failed.  Fill the result TF and
4858		 * summon EH.
4859		 */
4860		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4861			fill_result_tf(qc);
4862			ata_qc_schedule_eh(qc);
4863			return;
4864		}
4865
4866		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
 
 
4867
4868		/* read result TF if requested */
4869		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4870			fill_result_tf(qc);
4871
4872		/* Some commands need post-processing after successful
4873		 * completion.
 
 
 
 
 
 
 
 
 
4874		 */
4875		switch (qc->tf.command) {
4876		case ATA_CMD_SET_FEATURES:
4877			if (qc->tf.feature != SETFEATURES_WC_ON &&
4878			    qc->tf.feature != SETFEATURES_WC_OFF)
4879				break;
4880			/* fall through */
4881		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4882		case ATA_CMD_SET_MULTI: /* multi_count changed */
4883			/* revalidate device */
4884			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4885			ata_port_schedule_eh(ap);
4886			break;
4887
4888		case ATA_CMD_SLEEP:
4889			dev->flags |= ATA_DFLAG_SLEEPING;
4890			break;
4891		}
 
 
 
 
4892
4893		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4894			ata_verify_xfer(qc);
 
 
 
 
 
 
 
 
 
 
 
 
 
4895
4896		__ata_qc_complete(qc);
4897	} else {
4898		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4899			return;
4900
4901		/* read result TF if failed or requested */
4902		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4903			fill_result_tf(qc);
4904
4905		__ata_qc_complete(qc);
4906	}
4907}
 
4908
4909/**
4910 *	ata_qc_complete_multiple - Complete multiple qcs successfully
4911 *	@ap: port in question
4912 *	@qc_active: new qc_active mask
4913 *
4914 *	Complete in-flight commands.  This functions is meant to be
4915 *	called from low-level driver's interrupt routine to complete
4916 *	requests normally.  ap->qc_active and @qc_active is compared
4917 *	and commands are completed accordingly.
4918 *
4919 *	Always use this function when completing multiple NCQ commands
4920 *	from IRQ handlers instead of calling ata_qc_complete()
4921 *	multiple times to keep IRQ expect status properly in sync.
4922 *
4923 *	LOCKING:
4924 *	spin_lock_irqsave(host lock)
4925 *
4926 *	RETURNS:
4927 *	Number of completed commands on success, -errno otherwise.
4928 */
4929int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4930{
4931	int nr_done = 0;
4932	u32 done_mask;
4933
4934	done_mask = ap->qc_active ^ qc_active;
4935
4936	if (unlikely(done_mask & qc_active)) {
4937		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
4938			     ap->qc_active, qc_active);
4939		return -EINVAL;
4940	}
4941
4942	while (done_mask) {
4943		struct ata_queued_cmd *qc;
4944		unsigned int tag = __ffs(done_mask);
4945
4946		qc = ata_qc_from_tag(ap, tag);
4947		if (qc) {
4948			ata_qc_complete(qc);
4949			nr_done++;
4950		}
4951		done_mask &= ~(1 << tag);
4952	}
4953
4954	return nr_done;
4955}
 
4956
4957/**
4958 *	ata_qc_issue - issue taskfile to device
4959 *	@qc: command to issue to device
4960 *
4961 *	Prepare an ATA command to submission to device.
4962 *	This includes mapping the data into a DMA-able
4963 *	area, filling in the S/G table, and finally
4964 *	writing the taskfile to hardware, starting the command.
4965 *
4966 *	LOCKING:
4967 *	spin_lock_irqsave(host lock)
4968 */
4969void ata_qc_issue(struct ata_queued_cmd *qc)
4970{
4971	struct ata_port *ap = qc->ap;
4972	struct ata_link *link = qc->dev->link;
4973	u8 prot = qc->tf.protocol;
4974
4975	/* Make sure only one non-NCQ command is outstanding.  The
4976	 * check is skipped for old EH because it reuses active qc to
4977	 * request ATAPI sense.
4978	 */
4979	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4980
4981	if (ata_is_ncq(prot)) {
4982		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
4983
4984		if (!link->sactive)
4985			ap->nr_active_links++;
4986		link->sactive |= 1 << qc->tag;
4987	} else {
4988		WARN_ON_ONCE(link->sactive);
4989
4990		ap->nr_active_links++;
4991		link->active_tag = qc->tag;
4992	}
4993
4994	qc->flags |= ATA_QCFLAG_ACTIVE;
4995	ap->qc_active |= 1 << qc->tag;
4996
4997	/*
4998	 * We guarantee to LLDs that they will have at least one
4999	 * non-zero sg if the command is a data command.
5000	 */
5001	if (WARN_ON_ONCE(ata_is_data(prot) &&
5002			 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5003		goto sys_err;
5004
5005	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5006				 (ap->flags & ATA_FLAG_PIO_DMA)))
5007		if (ata_sg_setup(qc))
5008			goto sys_err;
5009
5010	/* if device is sleeping, schedule reset and abort the link */
5011	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5012		link->eh_info.action |= ATA_EH_RESET;
5013		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5014		ata_link_abort(link);
5015		return;
5016	}
5017
5018	ap->ops->qc_prep(qc);
5019
 
 
 
5020	qc->err_mask |= ap->ops->qc_issue(qc);
5021	if (unlikely(qc->err_mask))
5022		goto err;
5023	return;
5024
5025sys_err:
5026	qc->err_mask |= AC_ERR_SYSTEM;
5027err:
5028	ata_qc_complete(qc);
5029}
5030
5031/**
5032 *	sata_scr_valid - test whether SCRs are accessible
5033 *	@link: ATA link to test SCR accessibility for
5034 *
5035 *	Test whether SCRs are accessible for @link.
5036 *
5037 *	LOCKING:
5038 *	None.
5039 *
5040 *	RETURNS:
5041 *	1 if SCRs are accessible, 0 otherwise.
5042 */
5043int sata_scr_valid(struct ata_link *link)
5044{
5045	struct ata_port *ap = link->ap;
5046
5047	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5048}
5049
5050/**
5051 *	sata_scr_read - read SCR register of the specified port
5052 *	@link: ATA link to read SCR for
5053 *	@reg: SCR to read
5054 *	@val: Place to store read value
5055 *
5056 *	Read SCR register @reg of @link into *@val.  This function is
5057 *	guaranteed to succeed if @link is ap->link, the cable type of
5058 *	the port is SATA and the port implements ->scr_read.
5059 *
5060 *	LOCKING:
5061 *	None if @link is ap->link.  Kernel thread context otherwise.
5062 *
5063 *	RETURNS:
5064 *	0 on success, negative errno on failure.
5065 */
5066int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5067{
5068	if (ata_is_host_link(link)) {
5069		if (sata_scr_valid(link))
5070			return link->ap->ops->scr_read(link, reg, val);
5071		return -EOPNOTSUPP;
5072	}
5073
5074	return sata_pmp_scr_read(link, reg, val);
5075}
5076
5077/**
5078 *	sata_scr_write - write SCR register of the specified port
5079 *	@link: ATA link to write SCR for
5080 *	@reg: SCR to write
5081 *	@val: value to write
5082 *
5083 *	Write @val to SCR register @reg of @link.  This function is
5084 *	guaranteed to succeed if @link is ap->link, the cable type of
5085 *	the port is SATA and the port implements ->scr_read.
5086 *
5087 *	LOCKING:
5088 *	None if @link is ap->link.  Kernel thread context otherwise.
5089 *
5090 *	RETURNS:
5091 *	0 on success, negative errno on failure.
5092 */
5093int sata_scr_write(struct ata_link *link, int reg, u32 val)
5094{
5095	if (ata_is_host_link(link)) {
5096		if (sata_scr_valid(link))
5097			return link->ap->ops->scr_write(link, reg, val);
5098		return -EOPNOTSUPP;
5099	}
5100
5101	return sata_pmp_scr_write(link, reg, val);
5102}
5103
5104/**
5105 *	sata_scr_write_flush - write SCR register of the specified port and flush
5106 *	@link: ATA link to write SCR for
5107 *	@reg: SCR to write
5108 *	@val: value to write
5109 *
5110 *	This function is identical to sata_scr_write() except that this
5111 *	function performs flush after writing to the register.
5112 *
5113 *	LOCKING:
5114 *	None if @link is ap->link.  Kernel thread context otherwise.
5115 *
5116 *	RETURNS:
5117 *	0 on success, negative errno on failure.
5118 */
5119int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5120{
5121	if (ata_is_host_link(link)) {
5122		int rc;
5123
5124		if (sata_scr_valid(link)) {
5125			rc = link->ap->ops->scr_write(link, reg, val);
5126			if (rc == 0)
5127				rc = link->ap->ops->scr_read(link, reg, &val);
5128			return rc;
5129		}
5130		return -EOPNOTSUPP;
5131	}
5132
5133	return sata_pmp_scr_write(link, reg, val);
5134}
5135
5136/**
5137 *	ata_phys_link_online - test whether the given link is online
5138 *	@link: ATA link to test
5139 *
5140 *	Test whether @link is online.  Note that this function returns
5141 *	0 if online status of @link cannot be obtained, so
5142 *	ata_link_online(link) != !ata_link_offline(link).
5143 *
5144 *	LOCKING:
5145 *	None.
5146 *
5147 *	RETURNS:
5148 *	True if the port online status is available and online.
5149 */
5150bool ata_phys_link_online(struct ata_link *link)
5151{
5152	u32 sstatus;
5153
5154	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5155	    ata_sstatus_online(sstatus))
5156		return true;
5157	return false;
5158}
5159
5160/**
5161 *	ata_phys_link_offline - test whether the given link is offline
5162 *	@link: ATA link to test
5163 *
5164 *	Test whether @link is offline.  Note that this function
5165 *	returns 0 if offline status of @link cannot be obtained, so
5166 *	ata_link_online(link) != !ata_link_offline(link).
5167 *
5168 *	LOCKING:
5169 *	None.
5170 *
5171 *	RETURNS:
5172 *	True if the port offline status is available and offline.
5173 */
5174bool ata_phys_link_offline(struct ata_link *link)
5175{
5176	u32 sstatus;
5177
5178	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5179	    !ata_sstatus_online(sstatus))
5180		return true;
5181	return false;
5182}
5183
5184/**
5185 *	ata_link_online - test whether the given link is online
5186 *	@link: ATA link to test
5187 *
5188 *	Test whether @link is online.  This is identical to
5189 *	ata_phys_link_online() when there's no slave link.  When
5190 *	there's a slave link, this function should only be called on
5191 *	the master link and will return true if any of M/S links is
5192 *	online.
5193 *
5194 *	LOCKING:
5195 *	None.
5196 *
5197 *	RETURNS:
5198 *	True if the port online status is available and online.
5199 */
5200bool ata_link_online(struct ata_link *link)
5201{
5202	struct ata_link *slave = link->ap->slave_link;
5203
5204	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5205
5206	return ata_phys_link_online(link) ||
5207		(slave && ata_phys_link_online(slave));
5208}
 
5209
5210/**
5211 *	ata_link_offline - test whether the given link is offline
5212 *	@link: ATA link to test
5213 *
5214 *	Test whether @link is offline.  This is identical to
5215 *	ata_phys_link_offline() when there's no slave link.  When
5216 *	there's a slave link, this function should only be called on
5217 *	the master link and will return true if both M/S links are
5218 *	offline.
5219 *
5220 *	LOCKING:
5221 *	None.
5222 *
5223 *	RETURNS:
5224 *	True if the port offline status is available and offline.
5225 */
5226bool ata_link_offline(struct ata_link *link)
5227{
5228	struct ata_link *slave = link->ap->slave_link;
5229
5230	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5231
5232	return ata_phys_link_offline(link) &&
5233		(!slave || ata_phys_link_offline(slave));
5234}
 
5235
5236#ifdef CONFIG_PM
5237static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5238			       unsigned int action, unsigned int ehi_flags,
5239			       int wait)
5240{
 
5241	unsigned long flags;
5242	int i, rc;
5243
5244	for (i = 0; i < host->n_ports; i++) {
5245		struct ata_port *ap = host->ports[i];
5246		struct ata_link *link;
5247
5248		/* Previous resume operation might still be in
5249		 * progress.  Wait for PM_PENDING to clear.
5250		 */
5251		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5252			ata_port_wait_eh(ap);
5253			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5254		}
5255
5256		/* request PM ops to EH */
 
 
 
 
 
 
5257		spin_lock_irqsave(ap->lock, flags);
 
5258
5259		ap->pm_mesg = mesg;
5260		if (wait) {
5261			rc = 0;
5262			ap->pm_result = &rc;
5263		}
 
 
5264
5265		ap->pflags |= ATA_PFLAG_PM_PENDING;
5266		ata_for_each_link(link, ap, HOST_FIRST) {
5267			link->eh_info.action |= action;
5268			link->eh_info.flags |= ehi_flags;
5269		}
5270
5271		ata_port_schedule_eh(ap);
5272
5273		spin_unlock_irqrestore(ap->lock, flags);
 
 
5274
5275		/* wait and check result */
5276		if (wait) {
5277			ata_port_wait_eh(ap);
5278			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5279			if (rc)
5280				return rc;
5281		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5282	}
5283
5284	return 0;
5285}
5286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5287/**
5288 *	ata_host_suspend - suspend host
5289 *	@host: host to suspend
5290 *	@mesg: PM message
5291 *
5292 *	Suspend @host.  Actual operation is performed by EH.  This
5293 *	function requests EH to perform PM operations and waits for EH
5294 *	to finish.
5295 *
5296 *	LOCKING:
5297 *	Kernel thread context (may sleep).
5298 *
5299 *	RETURNS:
5300 *	0 on success, -errno on failure.
5301 */
5302int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5303{
5304	unsigned int ehi_flags = ATA_EHI_QUIET;
5305	int rc;
5306
5307	/*
5308	 * On some hardware, device fails to respond after spun down
5309	 * for suspend.  As the device won't be used before being
5310	 * resumed, we don't need to touch the device.  Ask EH to skip
5311	 * the usual stuff and proceed directly to suspend.
5312	 *
5313	 * http://thread.gmane.org/gmane.linux.ide/46764
5314	 */
5315	if (mesg.event == PM_EVENT_SUSPEND)
5316		ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5317
5318	rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5319	if (rc == 0)
5320		host->dev->power.power_state = mesg;
5321	return rc;
5322}
 
5323
5324/**
5325 *	ata_host_resume - resume host
5326 *	@host: host to resume
5327 *
5328 *	Resume @host.  Actual operation is performed by EH.  This
5329 *	function requests EH to perform PM operations and returns.
5330 *	Note that all resume operations are performed parallelly.
5331 *
5332 *	LOCKING:
5333 *	Kernel thread context (may sleep).
5334 */
5335void ata_host_resume(struct ata_host *host)
5336{
5337	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5338			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5339	host->dev->power.power_state = PMSG_ON;
5340}
 
 
 
 
 
 
 
5341#endif
 
5342
5343/**
5344 *	ata_dev_init - Initialize an ata_device structure
5345 *	@dev: Device structure to initialize
5346 *
5347 *	Initialize @dev in preparation for probing.
5348 *
5349 *	LOCKING:
5350 *	Inherited from caller.
5351 */
5352void ata_dev_init(struct ata_device *dev)
5353{
5354	struct ata_link *link = ata_dev_phys_link(dev);
5355	struct ata_port *ap = link->ap;
5356	unsigned long flags;
5357
5358	/* SATA spd limit is bound to the attached device, reset together */
5359	link->sata_spd_limit = link->hw_sata_spd_limit;
5360	link->sata_spd = 0;
5361
5362	/* High bits of dev->flags are used to record warm plug
5363	 * requests which occur asynchronously.  Synchronize using
5364	 * host lock.
5365	 */
5366	spin_lock_irqsave(ap->lock, flags);
5367	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5368	dev->horkage = 0;
5369	spin_unlock_irqrestore(ap->lock, flags);
5370
5371	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5372	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5373	dev->pio_mask = UINT_MAX;
5374	dev->mwdma_mask = UINT_MAX;
5375	dev->udma_mask = UINT_MAX;
5376}
5377
5378/**
5379 *	ata_link_init - Initialize an ata_link structure
5380 *	@ap: ATA port link is attached to
5381 *	@link: Link structure to initialize
5382 *	@pmp: Port multiplier port number
5383 *
5384 *	Initialize @link.
5385 *
5386 *	LOCKING:
5387 *	Kernel thread context (may sleep)
5388 */
5389void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5390{
5391	int i;
5392
5393	/* clear everything except for devices */
5394	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5395	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5396
5397	link->ap = ap;
5398	link->pmp = pmp;
5399	link->active_tag = ATA_TAG_POISON;
5400	link->hw_sata_spd_limit = UINT_MAX;
5401
5402	/* can't use iterator, ap isn't initialized yet */
5403	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5404		struct ata_device *dev = &link->device[i];
5405
5406		dev->link = link;
5407		dev->devno = dev - link->device;
5408#ifdef CONFIG_ATA_ACPI
5409		dev->gtf_filter = ata_acpi_gtf_filter;
5410#endif
5411		ata_dev_init(dev);
5412	}
5413}
5414
5415/**
5416 *	sata_link_init_spd - Initialize link->sata_spd_limit
5417 *	@link: Link to configure sata_spd_limit for
5418 *
5419 *	Initialize @link->[hw_]sata_spd_limit to the currently
5420 *	configured value.
5421 *
5422 *	LOCKING:
5423 *	Kernel thread context (may sleep).
5424 *
5425 *	RETURNS:
5426 *	0 on success, -errno on failure.
5427 */
5428int sata_link_init_spd(struct ata_link *link)
5429{
5430	u8 spd;
5431	int rc;
5432
5433	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5434	if (rc)
5435		return rc;
5436
5437	spd = (link->saved_scontrol >> 4) & 0xf;
5438	if (spd)
5439		link->hw_sata_spd_limit &= (1 << spd) - 1;
5440
5441	ata_force_link_limits(link);
5442
5443	link->sata_spd_limit = link->hw_sata_spd_limit;
5444
5445	return 0;
5446}
5447
5448/**
5449 *	ata_port_alloc - allocate and initialize basic ATA port resources
5450 *	@host: ATA host this allocated port belongs to
5451 *
5452 *	Allocate and initialize basic ATA port resources.
5453 *
5454 *	RETURNS:
5455 *	Allocate ATA port on success, NULL on failure.
5456 *
5457 *	LOCKING:
5458 *	Inherited from calling layer (may sleep).
5459 */
5460struct ata_port *ata_port_alloc(struct ata_host *host)
5461{
5462	struct ata_port *ap;
5463
5464	DPRINTK("ENTER\n");
5465
5466	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5467	if (!ap)
5468		return NULL;
5469
5470	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5471	ap->lock = &host->lock;
5472	ap->print_id = -1;
 
5473	ap->host = host;
5474	ap->dev = host->dev;
5475
5476#if defined(ATA_VERBOSE_DEBUG)
5477	/* turn on all debugging levels */
5478	ap->msg_enable = 0x00FF;
5479#elif defined(ATA_DEBUG)
5480	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5481#else
5482	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5483#endif
5484
5485	mutex_init(&ap->scsi_scan_mutex);
5486	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5487	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5488	INIT_LIST_HEAD(&ap->eh_done_q);
5489	init_waitqueue_head(&ap->eh_wait_q);
5490	init_completion(&ap->park_req_pending);
5491	init_timer_deferrable(&ap->fastdrain_timer);
5492	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5493	ap->fastdrain_timer.data = (unsigned long)ap;
5494
5495	ap->cbl = ATA_CBL_NONE;
5496
5497	ata_link_init(ap, &ap->link, 0);
5498
5499#ifdef ATA_IRQ_TRAP
5500	ap->stats.unhandled_irq = 1;
5501	ap->stats.idle_irq = 1;
5502#endif
5503	ata_sff_port_init(ap);
5504
5505	return ap;
5506}
5507
5508static void ata_host_release(struct device *gendev, void *res)
5509{
5510	struct ata_host *host = dev_get_drvdata(gendev);
5511	int i;
5512
5513	for (i = 0; i < host->n_ports; i++) {
5514		struct ata_port *ap = host->ports[i];
5515
5516		if (!ap)
5517			continue;
5518
5519		if (ap->scsi_host)
5520			scsi_host_put(ap->scsi_host);
5521
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5522		kfree(ap->pmp_link);
5523		kfree(ap->slave_link);
 
5524		kfree(ap);
5525		host->ports[i] = NULL;
5526	}
 
 
5527
5528	dev_set_drvdata(gendev, NULL);
 
 
 
 
 
 
 
5529}
 
5530
5531/**
5532 *	ata_host_alloc - allocate and init basic ATA host resources
5533 *	@dev: generic device this host is associated with
5534 *	@max_ports: maximum number of ATA ports associated with this host
5535 *
5536 *	Allocate and initialize basic ATA host resources.  LLD calls
5537 *	this function to allocate a host, initializes it fully and
5538 *	attaches it using ata_host_register().
5539 *
5540 *	@max_ports ports are allocated and host->n_ports is
5541 *	initialized to @max_ports.  The caller is allowed to decrease
5542 *	host->n_ports before calling ata_host_register().  The unused
5543 *	ports will be automatically freed on registration.
5544 *
5545 *	RETURNS:
5546 *	Allocate ATA host on success, NULL on failure.
5547 *
5548 *	LOCKING:
5549 *	Inherited from calling layer (may sleep).
5550 */
5551struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5552{
5553	struct ata_host *host;
5554	size_t sz;
5555	int i;
5556
5557	DPRINTK("ENTER\n");
5558
5559	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5560		return NULL;
5561
5562	/* alloc a container for our list of ATA ports (buses) */
5563	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5564	/* alloc a container for our list of ATA ports (buses) */
5565	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5566	if (!host)
 
 
 
 
 
 
 
5567		goto err_out;
5568
5569	devres_add(dev, host);
5570	dev_set_drvdata(dev, host);
5571
5572	spin_lock_init(&host->lock);
5573	mutex_init(&host->eh_mutex);
5574	host->dev = dev;
5575	host->n_ports = max_ports;
 
5576
5577	/* allocate ports bound to this host */
5578	for (i = 0; i < max_ports; i++) {
5579		struct ata_port *ap;
5580
5581		ap = ata_port_alloc(host);
5582		if (!ap)
5583			goto err_out;
5584
5585		ap->port_no = i;
5586		host->ports[i] = ap;
5587	}
5588
5589	devres_remove_group(dev, NULL);
5590	return host;
5591
5592 err_out:
5593	devres_release_group(dev, NULL);
 
 
5594	return NULL;
5595}
 
5596
5597/**
5598 *	ata_host_alloc_pinfo - alloc host and init with port_info array
5599 *	@dev: generic device this host is associated with
5600 *	@ppi: array of ATA port_info to initialize host with
5601 *	@n_ports: number of ATA ports attached to this host
5602 *
5603 *	Allocate ATA host and initialize with info from @ppi.  If NULL
5604 *	terminated, @ppi may contain fewer entries than @n_ports.  The
5605 *	last entry will be used for the remaining ports.
5606 *
5607 *	RETURNS:
5608 *	Allocate ATA host on success, NULL on failure.
5609 *
5610 *	LOCKING:
5611 *	Inherited from calling layer (may sleep).
5612 */
5613struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5614				      const struct ata_port_info * const * ppi,
5615				      int n_ports)
5616{
5617	const struct ata_port_info *pi;
5618	struct ata_host *host;
5619	int i, j;
5620
5621	host = ata_host_alloc(dev, n_ports);
5622	if (!host)
5623		return NULL;
5624
5625	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5626		struct ata_port *ap = host->ports[i];
5627
5628		if (ppi[j])
5629			pi = ppi[j++];
5630
5631		ap->pio_mask = pi->pio_mask;
5632		ap->mwdma_mask = pi->mwdma_mask;
5633		ap->udma_mask = pi->udma_mask;
5634		ap->flags |= pi->flags;
5635		ap->link.flags |= pi->link_flags;
5636		ap->ops = pi->port_ops;
5637
5638		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5639			host->ops = pi->port_ops;
5640	}
5641
5642	return host;
5643}
5644
5645/**
5646 *	ata_slave_link_init - initialize slave link
5647 *	@ap: port to initialize slave link for
5648 *
5649 *	Create and initialize slave link for @ap.  This enables slave
5650 *	link handling on the port.
5651 *
5652 *	In libata, a port contains links and a link contains devices.
5653 *	There is single host link but if a PMP is attached to it,
5654 *	there can be multiple fan-out links.  On SATA, there's usually
5655 *	a single device connected to a link but PATA and SATA
5656 *	controllers emulating TF based interface can have two - master
5657 *	and slave.
5658 *
5659 *	However, there are a few controllers which don't fit into this
5660 *	abstraction too well - SATA controllers which emulate TF
5661 *	interface with both master and slave devices but also have
5662 *	separate SCR register sets for each device.  These controllers
5663 *	need separate links for physical link handling
5664 *	(e.g. onlineness, link speed) but should be treated like a
5665 *	traditional M/S controller for everything else (e.g. command
5666 *	issue, softreset).
5667 *
5668 *	slave_link is libata's way of handling this class of
5669 *	controllers without impacting core layer too much.  For
5670 *	anything other than physical link handling, the default host
5671 *	link is used for both master and slave.  For physical link
5672 *	handling, separate @ap->slave_link is used.  All dirty details
5673 *	are implemented inside libata core layer.  From LLD's POV, the
5674 *	only difference is that prereset, hardreset and postreset are
5675 *	called once more for the slave link, so the reset sequence
5676 *	looks like the following.
5677 *
5678 *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5679 *	softreset(M) -> postreset(M) -> postreset(S)
5680 *
5681 *	Note that softreset is called only for the master.  Softreset
5682 *	resets both M/S by definition, so SRST on master should handle
5683 *	both (the standard method will work just fine).
5684 *
5685 *	LOCKING:
5686 *	Should be called before host is registered.
5687 *
5688 *	RETURNS:
5689 *	0 on success, -errno on failure.
5690 */
5691int ata_slave_link_init(struct ata_port *ap)
5692{
5693	struct ata_link *link;
5694
5695	WARN_ON(ap->slave_link);
5696	WARN_ON(ap->flags & ATA_FLAG_PMP);
5697
5698	link = kzalloc(sizeof(*link), GFP_KERNEL);
5699	if (!link)
5700		return -ENOMEM;
5701
5702	ata_link_init(ap, link, 1);
5703	ap->slave_link = link;
5704	return 0;
5705}
5706
5707static void ata_host_stop(struct device *gendev, void *res)
5708{
5709	struct ata_host *host = dev_get_drvdata(gendev);
5710	int i;
5711
5712	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5713
5714	for (i = 0; i < host->n_ports; i++) {
5715		struct ata_port *ap = host->ports[i];
5716
5717		if (ap->ops->port_stop)
5718			ap->ops->port_stop(ap);
5719	}
5720
5721	if (host->ops->host_stop)
5722		host->ops->host_stop(host);
5723}
5724
5725/**
5726 *	ata_finalize_port_ops - finalize ata_port_operations
5727 *	@ops: ata_port_operations to finalize
5728 *
5729 *	An ata_port_operations can inherit from another ops and that
5730 *	ops can again inherit from another.  This can go on as many
5731 *	times as necessary as long as there is no loop in the
5732 *	inheritance chain.
5733 *
5734 *	Ops tables are finalized when the host is started.  NULL or
5735 *	unspecified entries are inherited from the closet ancestor
5736 *	which has the method and the entry is populated with it.
5737 *	After finalization, the ops table directly points to all the
5738 *	methods and ->inherits is no longer necessary and cleared.
5739 *
5740 *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5741 *
5742 *	LOCKING:
5743 *	None.
5744 */
5745static void ata_finalize_port_ops(struct ata_port_operations *ops)
5746{
5747	static DEFINE_SPINLOCK(lock);
5748	const struct ata_port_operations *cur;
5749	void **begin = (void **)ops;
5750	void **end = (void **)&ops->inherits;
5751	void **pp;
5752
5753	if (!ops || !ops->inherits)
5754		return;
5755
5756	spin_lock(&lock);
5757
5758	for (cur = ops->inherits; cur; cur = cur->inherits) {
5759		void **inherit = (void **)cur;
5760
5761		for (pp = begin; pp < end; pp++, inherit++)
5762			if (!*pp)
5763				*pp = *inherit;
5764	}
5765
5766	for (pp = begin; pp < end; pp++)
5767		if (IS_ERR(*pp))
5768			*pp = NULL;
5769
5770	ops->inherits = NULL;
5771
5772	spin_unlock(&lock);
5773}
5774
5775/**
5776 *	ata_host_start - start and freeze ports of an ATA host
5777 *	@host: ATA host to start ports for
5778 *
5779 *	Start and then freeze ports of @host.  Started status is
5780 *	recorded in host->flags, so this function can be called
5781 *	multiple times.  Ports are guaranteed to get started only
5782 *	once.  If host->ops isn't initialized yet, its set to the
5783 *	first non-dummy port ops.
5784 *
5785 *	LOCKING:
5786 *	Inherited from calling layer (may sleep).
5787 *
5788 *	RETURNS:
5789 *	0 if all ports are started successfully, -errno otherwise.
5790 */
5791int ata_host_start(struct ata_host *host)
5792{
5793	int have_stop = 0;
5794	void *start_dr = NULL;
5795	int i, rc;
5796
5797	if (host->flags & ATA_HOST_STARTED)
5798		return 0;
5799
5800	ata_finalize_port_ops(host->ops);
5801
5802	for (i = 0; i < host->n_ports; i++) {
5803		struct ata_port *ap = host->ports[i];
5804
5805		ata_finalize_port_ops(ap->ops);
5806
5807		if (!host->ops && !ata_port_is_dummy(ap))
5808			host->ops = ap->ops;
5809
5810		if (ap->ops->port_stop)
5811			have_stop = 1;
5812	}
5813
5814	if (host->ops->host_stop)
5815		have_stop = 1;
5816
5817	if (have_stop) {
5818		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5819		if (!start_dr)
5820			return -ENOMEM;
5821	}
5822
5823	for (i = 0; i < host->n_ports; i++) {
5824		struct ata_port *ap = host->ports[i];
5825
5826		if (ap->ops->port_start) {
5827			rc = ap->ops->port_start(ap);
5828			if (rc) {
5829				if (rc != -ENODEV)
5830					dev_err(host->dev,
5831						"failed to start port %d (errno=%d)\n",
5832						i, rc);
5833				goto err_out;
5834			}
5835		}
5836		ata_eh_freeze_port(ap);
5837	}
5838
5839	if (start_dr)
5840		devres_add(host->dev, start_dr);
5841	host->flags |= ATA_HOST_STARTED;
5842	return 0;
5843
5844 err_out:
5845	while (--i >= 0) {
5846		struct ata_port *ap = host->ports[i];
5847
5848		if (ap->ops->port_stop)
5849			ap->ops->port_stop(ap);
5850	}
5851	devres_free(start_dr);
5852	return rc;
5853}
 
5854
5855/**
5856 *	ata_sas_host_init - Initialize a host struct
5857 *	@host:	host to initialize
5858 *	@dev:	device host is attached to
5859 *	@flags:	host flags
5860 *	@ops:	port_ops
5861 *
5862 *	LOCKING:
5863 *	PCI/etc. bus probe sem.
5864 *
5865 */
5866/* KILLME - the only user left is ipr */
5867void ata_host_init(struct ata_host *host, struct device *dev,
5868		   unsigned long flags, struct ata_port_operations *ops)
5869{
5870	spin_lock_init(&host->lock);
5871	mutex_init(&host->eh_mutex);
 
5872	host->dev = dev;
5873	host->flags = flags;
5874	host->ops = ops;
 
5875}
 
5876
5877int ata_port_probe(struct ata_port *ap)
5878{
5879	int rc = 0;
5880
5881	/* probe */
5882	if (ap->ops->error_handler) {
5883		struct ata_eh_info *ehi = &ap->link.eh_info;
5884		unsigned long flags;
5885
5886		/* kick EH for boot probing */
5887		spin_lock_irqsave(ap->lock, flags);
5888
5889		ehi->probe_mask |= ATA_ALL_DEVICES;
5890		ehi->action |= ATA_EH_RESET;
5891		ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5892
5893		ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5894		ap->pflags |= ATA_PFLAG_LOADING;
5895		ata_port_schedule_eh(ap);
5896
5897		spin_unlock_irqrestore(ap->lock, flags);
 
 
5898
5899		/* wait for EH to finish */
5900		ata_port_wait_eh(ap);
5901	} else {
5902		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5903		rc = ata_bus_probe(ap);
5904		DPRINTK("ata%u: bus probe end\n", ap->print_id);
5905	}
5906	return rc;
5907}
5908
5909
5910static void async_port_probe(void *data, async_cookie_t cookie)
5911{
5912	struct ata_port *ap = data;
5913
5914	/*
5915	 * If we're not allowed to scan this host in parallel,
5916	 * we need to wait until all previous scans have completed
5917	 * before going further.
5918	 * Jeff Garzik says this is only within a controller, so we
5919	 * don't need to wait for port 0, only for later ports.
5920	 */
5921	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5922		async_synchronize_cookie(cookie);
5923
5924	(void)ata_port_probe(ap);
 
5925
5926	/* in order to keep device order, we need to synchronize at this point */
5927	async_synchronize_cookie(cookie);
5928
5929	ata_scsi_scan_host(ap, 1);
5930}
5931
5932/**
5933 *	ata_host_register - register initialized ATA host
5934 *	@host: ATA host to register
5935 *	@sht: template for SCSI host
5936 *
5937 *	Register initialized ATA host.  @host is allocated using
5938 *	ata_host_alloc() and fully initialized by LLD.  This function
5939 *	starts ports, registers @host with ATA and SCSI layers and
5940 *	probe registered devices.
5941 *
5942 *	LOCKING:
5943 *	Inherited from calling layer (may sleep).
5944 *
5945 *	RETURNS:
5946 *	0 on success, -errno otherwise.
5947 */
5948int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5949{
5950	int i, rc;
5951
 
 
5952	/* host must have been started */
5953	if (!(host->flags & ATA_HOST_STARTED)) {
5954		dev_err(host->dev, "BUG: trying to register unstarted host\n");
5955		WARN_ON(1);
5956		return -EINVAL;
5957	}
5958
5959	/* Blow away unused ports.  This happens when LLD can't
5960	 * determine the exact number of ports to allocate at
5961	 * allocation time.
5962	 */
5963	for (i = host->n_ports; host->ports[i]; i++)
5964		kfree(host->ports[i]);
5965
5966	/* give ports names and add SCSI hosts */
5967	for (i = 0; i < host->n_ports; i++)
5968		host->ports[i]->print_id = ata_print_id++;
5969
 
5970
5971	/* Create associated sysfs transport objects  */
5972	for (i = 0; i < host->n_ports; i++) {
5973		rc = ata_tport_add(host->dev,host->ports[i]);
5974		if (rc) {
5975			goto err_tadd;
5976		}
5977	}
5978
5979	rc = ata_scsi_add_hosts(host, sht);
5980	if (rc)
5981		goto err_tadd;
5982
5983	/* associate with ACPI nodes */
5984	ata_acpi_associate(host);
5985
5986	/* set cable, sata_spd_limit and report */
5987	for (i = 0; i < host->n_ports; i++) {
5988		struct ata_port *ap = host->ports[i];
5989		unsigned long xfer_mask;
5990
5991		/* set SATA cable type if still unset */
5992		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5993			ap->cbl = ATA_CBL_SATA;
5994
5995		/* init sata_spd_limit to the current value */
5996		sata_link_init_spd(&ap->link);
5997		if (ap->slave_link)
5998			sata_link_init_spd(ap->slave_link);
5999
6000		/* print per-port info to dmesg */
6001		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6002					      ap->udma_mask);
6003
6004		if (!ata_port_is_dummy(ap)) {
6005			ata_port_info(ap, "%cATA max %s %s\n",
6006				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6007				      ata_mode_string(xfer_mask),
6008				      ap->link.eh_info.desc);
6009			ata_ehi_clear_desc(&ap->link.eh_info);
6010		} else
6011			ata_port_info(ap, "DUMMY\n");
6012	}
6013
6014	/* perform each probe asynchronously */
6015	for (i = 0; i < host->n_ports; i++) {
6016		struct ata_port *ap = host->ports[i];
6017		async_schedule(async_port_probe, ap);
6018	}
6019
6020	return 0;
6021
6022 err_tadd:
6023	while (--i >= 0) {
6024		ata_tport_delete(host->ports[i]);
6025	}
6026	return rc;
6027
6028}
 
6029
6030/**
6031 *	ata_host_activate - start host, request IRQ and register it
6032 *	@host: target ATA host
6033 *	@irq: IRQ to request
6034 *	@irq_handler: irq_handler used when requesting IRQ
6035 *	@irq_flags: irq_flags used when requesting IRQ
6036 *	@sht: scsi_host_template to use when registering the host
6037 *
6038 *	After allocating an ATA host and initializing it, most libata
6039 *	LLDs perform three steps to activate the host - start host,
6040 *	request IRQ and register it.  This helper takes necessasry
6041 *	arguments and performs the three steps in one go.
6042 *
6043 *	An invalid IRQ skips the IRQ registration and expects the host to
6044 *	have set polling mode on the port. In this case, @irq_handler
6045 *	should be NULL.
6046 *
6047 *	LOCKING:
6048 *	Inherited from calling layer (may sleep).
6049 *
6050 *	RETURNS:
6051 *	0 on success, -errno otherwise.
6052 */
6053int ata_host_activate(struct ata_host *host, int irq,
6054		      irq_handler_t irq_handler, unsigned long irq_flags,
6055		      struct scsi_host_template *sht)
6056{
6057	int i, rc;
 
6058
6059	rc = ata_host_start(host);
6060	if (rc)
6061		return rc;
6062
6063	/* Special case for polling mode */
6064	if (!irq) {
6065		WARN_ON(irq_handler);
6066		return ata_host_register(host, sht);
6067	}
6068
 
 
 
 
 
 
6069	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6070			      dev_driver_string(host->dev), host);
6071	if (rc)
6072		return rc;
6073
6074	for (i = 0; i < host->n_ports; i++)
6075		ata_port_desc(host->ports[i], "irq %d", irq);
6076
6077	rc = ata_host_register(host, sht);
6078	/* if failed, just free the IRQ and leave ports alone */
6079	if (rc)
6080		devm_free_irq(host->dev, irq, host);
6081
6082	return rc;
6083}
 
6084
6085/**
6086 *	ata_port_detach - Detach ATA port in prepration of device removal
6087 *	@ap: ATA port to be detached
6088 *
6089 *	Detach all ATA devices and the associated SCSI devices of @ap;
6090 *	then, remove the associated SCSI host.  @ap is guaranteed to
6091 *	be quiescent on return from this function.
6092 *
6093 *	LOCKING:
6094 *	Kernel thread context (may sleep).
6095 */
6096static void ata_port_detach(struct ata_port *ap)
6097{
6098	unsigned long flags;
 
 
 
 
 
6099
6100	if (!ap->ops->error_handler)
6101		goto skip_eh;
6102
6103	/* tell EH we're leaving & flush EH */
6104	spin_lock_irqsave(ap->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6105	ap->pflags |= ATA_PFLAG_UNLOADING;
6106	ata_port_schedule_eh(ap);
 
6107	spin_unlock_irqrestore(ap->lock, flags);
 
6108
6109	/* wait till EH commits suicide */
6110	ata_port_wait_eh(ap);
6111
6112	/* it better be dead now */
6113	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6114
6115	cancel_delayed_work_sync(&ap->hotplug_task);
 
6116
6117 skip_eh:
 
 
 
 
 
 
6118	if (ap->pmp_link) {
6119		int i;
6120		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6121			ata_tlink_delete(&ap->pmp_link[i]);
6122	}
6123	ata_tport_delete(ap);
6124
6125	/* remove the associated SCSI host */
6126	scsi_remove_host(ap->scsi_host);
 
6127}
6128
6129/**
6130 *	ata_host_detach - Detach all ports of an ATA host
6131 *	@host: Host to detach
6132 *
6133 *	Detach all ports of @host.
6134 *
6135 *	LOCKING:
6136 *	Kernel thread context (may sleep).
6137 */
6138void ata_host_detach(struct ata_host *host)
6139{
6140	int i;
6141
6142	for (i = 0; i < host->n_ports; i++)
6143		ata_port_detach(host->ports[i]);
6144
6145	/* the host is dead now, dissociate ACPI */
6146	ata_acpi_dissociate(host);
6147}
 
6148
6149#ifdef CONFIG_PCI
6150
6151/**
6152 *	ata_pci_remove_one - PCI layer callback for device removal
6153 *	@pdev: PCI device that was removed
6154 *
6155 *	PCI layer indicates to libata via this hook that hot-unplug or
6156 *	module unload event has occurred.  Detach all ports.  Resource
6157 *	release is handled via devres.
6158 *
6159 *	LOCKING:
6160 *	Inherited from PCI layer (may sleep).
6161 */
6162void ata_pci_remove_one(struct pci_dev *pdev)
6163{
6164	struct device *dev = &pdev->dev;
6165	struct ata_host *host = dev_get_drvdata(dev);
6166
6167	ata_host_detach(host);
6168}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6169
6170/* move to PCI subsystem */
6171int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6172{
6173	unsigned long tmp = 0;
6174
6175	switch (bits->width) {
6176	case 1: {
6177		u8 tmp8 = 0;
6178		pci_read_config_byte(pdev, bits->reg, &tmp8);
6179		tmp = tmp8;
6180		break;
6181	}
6182	case 2: {
6183		u16 tmp16 = 0;
6184		pci_read_config_word(pdev, bits->reg, &tmp16);
6185		tmp = tmp16;
6186		break;
6187	}
6188	case 4: {
6189		u32 tmp32 = 0;
6190		pci_read_config_dword(pdev, bits->reg, &tmp32);
6191		tmp = tmp32;
6192		break;
6193	}
6194
6195	default:
6196		return -EINVAL;
6197	}
6198
6199	tmp &= bits->mask;
6200
6201	return (tmp == bits->val) ? 1 : 0;
6202}
 
6203
6204#ifdef CONFIG_PM
6205void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6206{
6207	pci_save_state(pdev);
6208	pci_disable_device(pdev);
6209
6210	if (mesg.event & PM_EVENT_SLEEP)
6211		pci_set_power_state(pdev, PCI_D3hot);
6212}
 
6213
6214int ata_pci_device_do_resume(struct pci_dev *pdev)
6215{
6216	int rc;
6217
6218	pci_set_power_state(pdev, PCI_D0);
6219	pci_restore_state(pdev);
6220
6221	rc = pcim_enable_device(pdev);
6222	if (rc) {
6223		dev_err(&pdev->dev,
6224			"failed to enable device after resume (%d)\n", rc);
6225		return rc;
6226	}
6227
6228	pci_set_master(pdev);
6229	return 0;
6230}
 
6231
6232int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6233{
6234	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6235	int rc = 0;
6236
6237	rc = ata_host_suspend(host, mesg);
6238	if (rc)
6239		return rc;
6240
6241	ata_pci_device_do_suspend(pdev, mesg);
6242
6243	return 0;
6244}
 
6245
6246int ata_pci_device_resume(struct pci_dev *pdev)
6247{
6248	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6249	int rc;
6250
6251	rc = ata_pci_device_do_resume(pdev);
6252	if (rc == 0)
6253		ata_host_resume(host);
6254	return rc;
6255}
 
6256#endif /* CONFIG_PM */
6257
6258#endif /* CONFIG_PCI */
6259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6260static int __init ata_parse_force_one(char **cur,
6261				      struct ata_force_ent *force_ent,
6262				      const char **reason)
6263{
6264	/* FIXME: Currently, there's no way to tag init const data and
6265	 * using __initdata causes build failure on some versions of
6266	 * gcc.  Once __initdataconst is implemented, add const to the
6267	 * following structure.
6268	 */
6269	static struct ata_force_param force_tbl[] __initdata = {
6270		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6271		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6272		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6273		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6274		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6275		{ "sata",	.cbl		= ATA_CBL_SATA },
6276		{ "1.5Gbps",	.spd_limit	= 1 },
6277		{ "3.0Gbps",	.spd_limit	= 2 },
6278		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6279		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6280		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6281		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6282		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6283		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6284		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6285		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6286		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6287		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6288		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6289		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6290		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6291		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6292		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6293		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6294		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6295		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6296		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6297		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6298		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6299		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6300		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6301		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6302		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6303		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6304		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6305		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6306		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6307		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6308		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6309		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6310		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6311		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6312		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6313		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6314		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6315		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6316		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6317		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6318	};
6319	char *start = *cur, *p = *cur;
6320	char *id, *val, *endp;
6321	const struct ata_force_param *match_fp = NULL;
6322	int nr_matches = 0, i;
6323
6324	/* find where this param ends and update *cur */
6325	while (*p != '\0' && *p != ',')
6326		p++;
6327
6328	if (*p == '\0')
6329		*cur = p;
6330	else
6331		*cur = p + 1;
6332
6333	*p = '\0';
6334
6335	/* parse */
6336	p = strchr(start, ':');
6337	if (!p) {
6338		val = strstrip(start);
6339		goto parse_val;
6340	}
6341	*p = '\0';
6342
6343	id = strstrip(start);
6344	val = strstrip(p + 1);
6345
6346	/* parse id */
6347	p = strchr(id, '.');
6348	if (p) {
6349		*p++ = '\0';
6350		force_ent->device = simple_strtoul(p, &endp, 10);
6351		if (p == endp || *endp != '\0') {
6352			*reason = "invalid device";
6353			return -EINVAL;
6354		}
6355	}
6356
6357	force_ent->port = simple_strtoul(id, &endp, 10);
6358	if (p == endp || *endp != '\0') {
6359		*reason = "invalid port/link";
6360		return -EINVAL;
6361	}
6362
6363 parse_val:
6364	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6365	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6366		const struct ata_force_param *fp = &force_tbl[i];
6367
6368		if (strncasecmp(val, fp->name, strlen(val)))
6369			continue;
6370
6371		nr_matches++;
6372		match_fp = fp;
6373
6374		if (strcasecmp(val, fp->name) == 0) {
6375			nr_matches = 1;
6376			break;
6377		}
6378	}
6379
6380	if (!nr_matches) {
6381		*reason = "unknown value";
6382		return -EINVAL;
6383	}
6384	if (nr_matches > 1) {
6385		*reason = "ambigious value";
6386		return -EINVAL;
6387	}
6388
6389	force_ent->param = *match_fp;
6390
6391	return 0;
6392}
6393
6394static void __init ata_parse_force_param(void)
6395{
6396	int idx = 0, size = 1;
6397	int last_port = -1, last_device = -1;
6398	char *p, *cur, *next;
6399
6400	/* calculate maximum number of params and allocate force_tbl */
6401	for (p = ata_force_param_buf; *p; p++)
6402		if (*p == ',')
6403			size++;
6404
6405	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6406	if (!ata_force_tbl) {
6407		printk(KERN_WARNING "ata: failed to extend force table, "
6408		       "libata.force ignored\n");
6409		return;
6410	}
6411
6412	/* parse and populate the table */
6413	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6414		const char *reason = "";
6415		struct ata_force_ent te = { .port = -1, .device = -1 };
6416
6417		next = cur;
6418		if (ata_parse_force_one(&next, &te, &reason)) {
6419			printk(KERN_WARNING "ata: failed to parse force "
6420			       "parameter \"%s\" (%s)\n",
6421			       cur, reason);
6422			continue;
6423		}
6424
6425		if (te.port == -1) {
6426			te.port = last_port;
6427			te.device = last_device;
6428		}
6429
6430		ata_force_tbl[idx++] = te;
6431
6432		last_port = te.port;
6433		last_device = te.device;
6434	}
6435
6436	ata_force_tbl_size = idx;
6437}
6438
 
 
 
 
 
 
 
 
 
6439static int __init ata_init(void)
6440{
6441	int rc;
6442
6443	ata_parse_force_param();
6444
6445	rc = ata_sff_init();
6446	if (rc) {
6447		kfree(ata_force_tbl);
6448		return rc;
6449	}
6450
6451	libata_transport_init();
6452	ata_scsi_transport_template = ata_attach_transport();
6453	if (!ata_scsi_transport_template) {
6454		ata_sff_exit();
6455		rc = -ENOMEM;
6456		goto err_out;
6457	}
6458
6459	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6460	return 0;
6461
6462err_out:
6463	return rc;
6464}
6465
6466static void __exit ata_exit(void)
6467{
6468	ata_release_transport(ata_scsi_transport_template);
6469	libata_transport_exit();
6470	ata_sff_exit();
6471	kfree(ata_force_tbl);
6472}
6473
6474subsys_initcall(ata_init);
6475module_exit(ata_exit);
6476
6477static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6478
6479int ata_ratelimit(void)
6480{
6481	return __ratelimit(&ratelimit);
6482}
 
6483
6484/**
6485 *	ata_msleep - ATA EH owner aware msleep
6486 *	@ap: ATA port to attribute the sleep to
6487 *	@msecs: duration to sleep in milliseconds
6488 *
6489 *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6490 *	ownership is released before going to sleep and reacquired
6491 *	after the sleep is complete.  IOW, other ports sharing the
6492 *	@ap->host will be allowed to own the EH while this task is
6493 *	sleeping.
6494 *
6495 *	LOCKING:
6496 *	Might sleep.
6497 */
6498void ata_msleep(struct ata_port *ap, unsigned int msecs)
6499{
6500	bool owns_eh = ap && ap->host->eh_owner == current;
6501
6502	if (owns_eh)
6503		ata_eh_release(ap);
6504
6505	msleep(msecs);
 
 
 
 
 
6506
6507	if (owns_eh)
6508		ata_eh_acquire(ap);
6509}
 
6510
6511/**
6512 *	ata_wait_register - wait until register value changes
6513 *	@ap: ATA port to wait register for, can be NULL
6514 *	@reg: IO-mapped register
6515 *	@mask: Mask to apply to read register value
6516 *	@val: Wait condition
6517 *	@interval: polling interval in milliseconds
6518 *	@timeout: timeout in milliseconds
6519 *
6520 *	Waiting for some bits of register to change is a common
6521 *	operation for ATA controllers.  This function reads 32bit LE
6522 *	IO-mapped register @reg and tests for the following condition.
6523 *
6524 *	(*@reg & mask) != val
6525 *
6526 *	If the condition is met, it returns; otherwise, the process is
6527 *	repeated after @interval_msec until timeout.
6528 *
6529 *	LOCKING:
6530 *	Kernel thread context (may sleep)
6531 *
6532 *	RETURNS:
6533 *	The final register value.
6534 */
6535u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6536		      unsigned long interval, unsigned long timeout)
6537{
6538	unsigned long deadline;
6539	u32 tmp;
6540
6541	tmp = ioread32(reg);
6542
6543	/* Calculate timeout _after_ the first read to make sure
6544	 * preceding writes reach the controller before starting to
6545	 * eat away the timeout.
6546	 */
6547	deadline = ata_deadline(jiffies, timeout);
6548
6549	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6550		ata_msleep(ap, interval);
6551		tmp = ioread32(reg);
6552	}
6553
6554	return tmp;
6555}
 
6556
6557/*
6558 * Dummy port_ops
6559 */
6560static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6561{
6562	return AC_ERR_SYSTEM;
6563}
6564
6565static void ata_dummy_error_handler(struct ata_port *ap)
6566{
6567	/* truly dummy */
6568}
6569
6570struct ata_port_operations ata_dummy_port_ops = {
6571	.qc_prep		= ata_noop_qc_prep,
6572	.qc_issue		= ata_dummy_qc_issue,
6573	.error_handler		= ata_dummy_error_handler,
 
 
6574};
 
6575
6576const struct ata_port_info ata_dummy_port_info = {
6577	.port_ops		= &ata_dummy_port_ops,
6578};
6579
6580/*
6581 * Utility print functions
6582 */
6583int ata_port_printk(const struct ata_port *ap, const char *level,
6584		    const char *fmt, ...)
6585{
6586	struct va_format vaf;
6587	va_list args;
6588	int r;
6589
6590	va_start(args, fmt);
6591
6592	vaf.fmt = fmt;
6593	vaf.va = &args;
6594
6595	r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6596
6597	va_end(args);
6598
6599	return r;
6600}
6601EXPORT_SYMBOL(ata_port_printk);
6602
6603int ata_link_printk(const struct ata_link *link, const char *level,
6604		    const char *fmt, ...)
6605{
6606	struct va_format vaf;
6607	va_list args;
6608	int r;
6609
6610	va_start(args, fmt);
6611
6612	vaf.fmt = fmt;
6613	vaf.va = &args;
6614
6615	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6616		r = printk("%sata%u.%02u: %pV",
6617			   level, link->ap->print_id, link->pmp, &vaf);
6618	else
6619		r = printk("%sata%u: %pV",
6620			   level, link->ap->print_id, &vaf);
6621
6622	va_end(args);
6623
6624	return r;
6625}
6626EXPORT_SYMBOL(ata_link_printk);
6627
6628int ata_dev_printk(const struct ata_device *dev, const char *level,
6629		    const char *fmt, ...)
6630{
6631	struct va_format vaf;
6632	va_list args;
6633	int r;
6634
6635	va_start(args, fmt);
6636
6637	vaf.fmt = fmt;
6638	vaf.va = &args;
6639
6640	r = printk("%sata%u.%02u: %pV",
6641		   level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6642		   &vaf);
6643
6644	va_end(args);
6645
6646	return r;
6647}
6648EXPORT_SYMBOL(ata_dev_printk);
6649
6650void ata_print_version(const struct device *dev, const char *version)
6651{
6652	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6653}
6654EXPORT_SYMBOL(ata_print_version);
6655
6656/*
6657 * libata is essentially a library of internal helper functions for
6658 * low-level ATA host controller drivers.  As such, the API/ABI is
6659 * likely to change as new drivers are added and updated.
6660 * Do not depend on ABI/API stability.
6661 */
6662EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6663EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6664EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6665EXPORT_SYMBOL_GPL(ata_base_port_ops);
6666EXPORT_SYMBOL_GPL(sata_port_ops);
6667EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6668EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6669EXPORT_SYMBOL_GPL(ata_link_next);
6670EXPORT_SYMBOL_GPL(ata_dev_next);
6671EXPORT_SYMBOL_GPL(ata_std_bios_param);
6672EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6673EXPORT_SYMBOL_GPL(ata_host_init);
6674EXPORT_SYMBOL_GPL(ata_host_alloc);
6675EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6676EXPORT_SYMBOL_GPL(ata_slave_link_init);
6677EXPORT_SYMBOL_GPL(ata_host_start);
6678EXPORT_SYMBOL_GPL(ata_host_register);
6679EXPORT_SYMBOL_GPL(ata_host_activate);
6680EXPORT_SYMBOL_GPL(ata_host_detach);
6681EXPORT_SYMBOL_GPL(ata_sg_init);
6682EXPORT_SYMBOL_GPL(ata_qc_complete);
6683EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6684EXPORT_SYMBOL_GPL(atapi_cmd_type);
6685EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6686EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6687EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6688EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6689EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6690EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6691EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6692EXPORT_SYMBOL_GPL(ata_mode_string);
6693EXPORT_SYMBOL_GPL(ata_id_xfermask);
6694EXPORT_SYMBOL_GPL(ata_do_set_mode);
6695EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6696EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6697EXPORT_SYMBOL_GPL(ata_dev_disable);
6698EXPORT_SYMBOL_GPL(sata_set_spd);
6699EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6700EXPORT_SYMBOL_GPL(sata_link_debounce);
6701EXPORT_SYMBOL_GPL(sata_link_resume);
6702EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6703EXPORT_SYMBOL_GPL(ata_std_prereset);
6704EXPORT_SYMBOL_GPL(sata_link_hardreset);
6705EXPORT_SYMBOL_GPL(sata_std_hardreset);
6706EXPORT_SYMBOL_GPL(ata_std_postreset);
6707EXPORT_SYMBOL_GPL(ata_dev_classify);
6708EXPORT_SYMBOL_GPL(ata_dev_pair);
6709EXPORT_SYMBOL_GPL(ata_ratelimit);
6710EXPORT_SYMBOL_GPL(ata_msleep);
6711EXPORT_SYMBOL_GPL(ata_wait_register);
6712EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6713EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6714EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6715EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6716EXPORT_SYMBOL_GPL(sata_scr_valid);
6717EXPORT_SYMBOL_GPL(sata_scr_read);
6718EXPORT_SYMBOL_GPL(sata_scr_write);
6719EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6720EXPORT_SYMBOL_GPL(ata_link_online);
6721EXPORT_SYMBOL_GPL(ata_link_offline);
6722#ifdef CONFIG_PM
6723EXPORT_SYMBOL_GPL(ata_host_suspend);
6724EXPORT_SYMBOL_GPL(ata_host_resume);
6725#endif /* CONFIG_PM */
6726EXPORT_SYMBOL_GPL(ata_id_string);
6727EXPORT_SYMBOL_GPL(ata_id_c_string);
6728EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6729EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6730
6731EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6732EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6733EXPORT_SYMBOL_GPL(ata_timing_compute);
6734EXPORT_SYMBOL_GPL(ata_timing_merge);
6735EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6736
6737#ifdef CONFIG_PCI
6738EXPORT_SYMBOL_GPL(pci_test_config_bits);
6739EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6740#ifdef CONFIG_PM
6741EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6742EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6743EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6744EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6745#endif /* CONFIG_PM */
6746#endif /* CONFIG_PCI */
6747
6748EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6749EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6750EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6751EXPORT_SYMBOL_GPL(ata_port_desc);
6752#ifdef CONFIG_PCI
6753EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6754#endif /* CONFIG_PCI */
6755EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6756EXPORT_SYMBOL_GPL(ata_link_abort);
6757EXPORT_SYMBOL_GPL(ata_port_abort);
6758EXPORT_SYMBOL_GPL(ata_port_freeze);
6759EXPORT_SYMBOL_GPL(sata_async_notification);
6760EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6761EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6762EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6763EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6764EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6765EXPORT_SYMBOL_GPL(ata_do_eh);
6766EXPORT_SYMBOL_GPL(ata_std_error_handler);
6767
6768EXPORT_SYMBOL_GPL(ata_cable_40wire);
6769EXPORT_SYMBOL_GPL(ata_cable_80wire);
6770EXPORT_SYMBOL_GPL(ata_cable_unknown);
6771EXPORT_SYMBOL_GPL(ata_cable_ignore);
6772EXPORT_SYMBOL_GPL(ata_cable_sata);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  libata-core.c - helper library for ATA
   4 *
 
 
 
 
   5 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
   6 *  Copyright 2003-2004 Jeff Garzik
   7 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   8 *  libata documentation is available via 'make {ps|pdf}docs',
   9 *  as Documentation/driver-api/libata.rst
  10 *
  11 *  Hardware documentation available from http://www.t13.org/ and
  12 *  http://www.sata-io.org/
  13 *
  14 *  Standards documents from:
  15 *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
  16 *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
  17 *	http://www.sata-io.org (SATA)
  18 *	http://www.compactflash.org (CF)
  19 *	http://www.qic.org (QIC157 - Tape and DSC)
  20 *	http://www.ce-ata.org (CE-ATA: not supported)
  21 *
  22 * libata is essentially a library of internal helper functions for
  23 * low-level ATA host controller drivers.  As such, the API/ABI is
  24 * likely to change as new drivers are added and updated.
  25 * Do not depend on ABI/API stability.
  26 */
  27
  28#include <linux/kernel.h>
  29#include <linux/module.h>
  30#include <linux/pci.h>
  31#include <linux/init.h>
  32#include <linux/list.h>
  33#include <linux/mm.h>
  34#include <linux/spinlock.h>
  35#include <linux/blkdev.h>
  36#include <linux/delay.h>
  37#include <linux/timer.h>
  38#include <linux/time.h>
  39#include <linux/interrupt.h>
  40#include <linux/completion.h>
  41#include <linux/suspend.h>
  42#include <linux/workqueue.h>
  43#include <linux/scatterlist.h>
  44#include <linux/io.h>
 
  45#include <linux/log2.h>
  46#include <linux/slab.h>
  47#include <linux/glob.h>
  48#include <scsi/scsi.h>
  49#include <scsi/scsi_cmnd.h>
  50#include <scsi/scsi_host.h>
  51#include <linux/libata.h>
  52#include <asm/byteorder.h>
  53#include <asm/unaligned.h>
  54#include <linux/cdrom.h>
  55#include <linux/ratelimit.h>
  56#include <linux/leds.h>
  57#include <linux/pm_runtime.h>
  58#include <linux/platform_device.h>
  59#include <asm/setup.h>
  60
  61#define CREATE_TRACE_POINTS
  62#include <trace/events/libata.h>
  63
  64#include "libata.h"
  65#include "libata-transport.h"
  66
 
 
 
 
 
  67const struct ata_port_operations ata_base_port_ops = {
  68	.prereset		= ata_std_prereset,
  69	.postreset		= ata_std_postreset,
  70	.error_handler		= ata_std_error_handler,
  71	.sched_eh		= ata_std_sched_eh,
  72	.end_eh			= ata_std_end_eh,
  73};
  74
  75const struct ata_port_operations sata_port_ops = {
  76	.inherits		= &ata_base_port_ops,
  77
  78	.qc_defer		= ata_std_qc_defer,
  79	.hardreset		= sata_std_hardreset,
  80};
  81EXPORT_SYMBOL_GPL(sata_port_ops);
  82
  83static unsigned int ata_dev_init_params(struct ata_device *dev,
  84					u16 heads, u16 sectors);
  85static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
  86static void ata_dev_xfermask(struct ata_device *dev);
  87static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
  88
  89atomic_t ata_print_id = ATOMIC_INIT(0);
  90
  91#ifdef CONFIG_ATA_FORCE
  92struct ata_force_param {
  93	const char	*name;
  94	u8		cbl;
  95	u8		spd_limit;
  96	unsigned int	xfer_mask;
  97	unsigned int	horkage_on;
  98	unsigned int	horkage_off;
  99	u16		lflags_on;
 100	u16		lflags_off;
 101};
 102
 103struct ata_force_ent {
 104	int			port;
 105	int			device;
 106	struct ata_force_param	param;
 107};
 108
 109static struct ata_force_ent *ata_force_tbl;
 110static int ata_force_tbl_size;
 111
 112static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
 113/* param_buf is thrown away after initialization, disallow read */
 114module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
 115MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
 116#endif
 117
 118static int atapi_enabled = 1;
 119module_param(atapi_enabled, int, 0444);
 120MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
 121
 122static int atapi_dmadir = 0;
 123module_param(atapi_dmadir, int, 0444);
 124MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
 125
 126int atapi_passthru16 = 1;
 127module_param(atapi_passthru16, int, 0444);
 128MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
 129
 130int libata_fua = 0;
 131module_param_named(fua, libata_fua, int, 0444);
 132MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
 133
 134static int ata_ignore_hpa;
 135module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
 136MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
 137
 138static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
 139module_param_named(dma, libata_dma_mask, int, 0444);
 140MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
 141
 142static int ata_probe_timeout;
 143module_param(ata_probe_timeout, int, 0444);
 144MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
 145
 146int libata_noacpi = 0;
 147module_param_named(noacpi, libata_noacpi, int, 0444);
 148MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
 149
 150int libata_allow_tpm = 0;
 151module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
 152MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
 153
 154static int atapi_an;
 155module_param(atapi_an, int, 0444);
 156MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
 157
 158MODULE_AUTHOR("Jeff Garzik");
 159MODULE_DESCRIPTION("Library module for ATA devices");
 160MODULE_LICENSE("GPL");
 161MODULE_VERSION(DRV_VERSION);
 162
 163static inline bool ata_dev_print_info(struct ata_device *dev)
 164{
 165	struct ata_eh_context *ehc = &dev->link->eh_context;
 166
 167	return ehc->i.flags & ATA_EHI_PRINTINFO;
 168}
 169
 170static bool ata_sstatus_online(u32 sstatus)
 171{
 172	return (sstatus & 0xf) == 0x3;
 173}
 174
 175/**
 176 *	ata_link_next - link iteration helper
 177 *	@link: the previous link, NULL to start
 178 *	@ap: ATA port containing links to iterate
 179 *	@mode: iteration mode, one of ATA_LITER_*
 180 *
 181 *	LOCKING:
 182 *	Host lock or EH context.
 183 *
 184 *	RETURNS:
 185 *	Pointer to the next link.
 186 */
 187struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
 188			       enum ata_link_iter_mode mode)
 189{
 190	BUG_ON(mode != ATA_LITER_EDGE &&
 191	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
 192
 193	/* NULL link indicates start of iteration */
 194	if (!link)
 195		switch (mode) {
 196		case ATA_LITER_EDGE:
 197		case ATA_LITER_PMP_FIRST:
 198			if (sata_pmp_attached(ap))
 199				return ap->pmp_link;
 200			fallthrough;
 201		case ATA_LITER_HOST_FIRST:
 202			return &ap->link;
 203		}
 204
 205	/* we just iterated over the host link, what's next? */
 206	if (link == &ap->link)
 207		switch (mode) {
 208		case ATA_LITER_HOST_FIRST:
 209			if (sata_pmp_attached(ap))
 210				return ap->pmp_link;
 211			fallthrough;
 212		case ATA_LITER_PMP_FIRST:
 213			if (unlikely(ap->slave_link))
 214				return ap->slave_link;
 215			fallthrough;
 216		case ATA_LITER_EDGE:
 217			return NULL;
 218		}
 219
 220	/* slave_link excludes PMP */
 221	if (unlikely(link == ap->slave_link))
 222		return NULL;
 223
 224	/* we were over a PMP link */
 225	if (++link < ap->pmp_link + ap->nr_pmp_links)
 226		return link;
 227
 228	if (mode == ATA_LITER_PMP_FIRST)
 229		return &ap->link;
 230
 231	return NULL;
 232}
 233EXPORT_SYMBOL_GPL(ata_link_next);
 234
 235/**
 236 *	ata_dev_next - device iteration helper
 237 *	@dev: the previous device, NULL to start
 238 *	@link: ATA link containing devices to iterate
 239 *	@mode: iteration mode, one of ATA_DITER_*
 240 *
 241 *	LOCKING:
 242 *	Host lock or EH context.
 243 *
 244 *	RETURNS:
 245 *	Pointer to the next device.
 246 */
 247struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
 248				enum ata_dev_iter_mode mode)
 249{
 250	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
 251	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
 252
 253	/* NULL dev indicates start of iteration */
 254	if (!dev)
 255		switch (mode) {
 256		case ATA_DITER_ENABLED:
 257		case ATA_DITER_ALL:
 258			dev = link->device;
 259			goto check;
 260		case ATA_DITER_ENABLED_REVERSE:
 261		case ATA_DITER_ALL_REVERSE:
 262			dev = link->device + ata_link_max_devices(link) - 1;
 263			goto check;
 264		}
 265
 266 next:
 267	/* move to the next one */
 268	switch (mode) {
 269	case ATA_DITER_ENABLED:
 270	case ATA_DITER_ALL:
 271		if (++dev < link->device + ata_link_max_devices(link))
 272			goto check;
 273		return NULL;
 274	case ATA_DITER_ENABLED_REVERSE:
 275	case ATA_DITER_ALL_REVERSE:
 276		if (--dev >= link->device)
 277			goto check;
 278		return NULL;
 279	}
 280
 281 check:
 282	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
 283	    !ata_dev_enabled(dev))
 284		goto next;
 285	return dev;
 286}
 287EXPORT_SYMBOL_GPL(ata_dev_next);
 288
 289/**
 290 *	ata_dev_phys_link - find physical link for a device
 291 *	@dev: ATA device to look up physical link for
 292 *
 293 *	Look up physical link which @dev is attached to.  Note that
 294 *	this is different from @dev->link only when @dev is on slave
 295 *	link.  For all other cases, it's the same as @dev->link.
 296 *
 297 *	LOCKING:
 298 *	Don't care.
 299 *
 300 *	RETURNS:
 301 *	Pointer to the found physical link.
 302 */
 303struct ata_link *ata_dev_phys_link(struct ata_device *dev)
 304{
 305	struct ata_port *ap = dev->link->ap;
 306
 307	if (!ap->slave_link)
 308		return dev->link;
 309	if (!dev->devno)
 310		return &ap->link;
 311	return ap->slave_link;
 312}
 313
 314#ifdef CONFIG_ATA_FORCE
 315/**
 316 *	ata_force_cbl - force cable type according to libata.force
 317 *	@ap: ATA port of interest
 318 *
 319 *	Force cable type according to libata.force and whine about it.
 320 *	The last entry which has matching port number is used, so it
 321 *	can be specified as part of device force parameters.  For
 322 *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
 323 *	same effect.
 324 *
 325 *	LOCKING:
 326 *	EH context.
 327 */
 328void ata_force_cbl(struct ata_port *ap)
 329{
 330	int i;
 331
 332	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 333		const struct ata_force_ent *fe = &ata_force_tbl[i];
 334
 335		if (fe->port != -1 && fe->port != ap->print_id)
 336			continue;
 337
 338		if (fe->param.cbl == ATA_CBL_NONE)
 339			continue;
 340
 341		ap->cbl = fe->param.cbl;
 342		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
 343		return;
 344	}
 345}
 346
 347/**
 348 *	ata_force_link_limits - force link limits according to libata.force
 349 *	@link: ATA link of interest
 350 *
 351 *	Force link flags and SATA spd limit according to libata.force
 352 *	and whine about it.  When only the port part is specified
 353 *	(e.g. 1:), the limit applies to all links connected to both
 354 *	the host link and all fan-out ports connected via PMP.  If the
 355 *	device part is specified as 0 (e.g. 1.00:), it specifies the
 356 *	first fan-out link not the host link.  Device number 15 always
 357 *	points to the host link whether PMP is attached or not.  If the
 358 *	controller has slave link, device number 16 points to it.
 359 *
 360 *	LOCKING:
 361 *	EH context.
 362 */
 363static void ata_force_link_limits(struct ata_link *link)
 364{
 365	bool did_spd = false;
 366	int linkno = link->pmp;
 367	int i;
 368
 369	if (ata_is_host_link(link))
 370		linkno += 15;
 371
 372	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 373		const struct ata_force_ent *fe = &ata_force_tbl[i];
 374
 375		if (fe->port != -1 && fe->port != link->ap->print_id)
 376			continue;
 377
 378		if (fe->device != -1 && fe->device != linkno)
 379			continue;
 380
 381		/* only honor the first spd limit */
 382		if (!did_spd && fe->param.spd_limit) {
 383			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
 384			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
 385					fe->param.name);
 386			did_spd = true;
 387		}
 388
 389		/* let lflags stack */
 390		if (fe->param.lflags_on) {
 391			link->flags |= fe->param.lflags_on;
 392			ata_link_notice(link,
 393					"FORCE: link flag 0x%x forced -> 0x%x\n",
 394					fe->param.lflags_on, link->flags);
 395		}
 396		if (fe->param.lflags_off) {
 397			link->flags &= ~fe->param.lflags_off;
 398			ata_link_notice(link,
 399				"FORCE: link flag 0x%x cleared -> 0x%x\n",
 400				fe->param.lflags_off, link->flags);
 401		}
 402	}
 403}
 404
 405/**
 406 *	ata_force_xfermask - force xfermask according to libata.force
 407 *	@dev: ATA device of interest
 408 *
 409 *	Force xfer_mask according to libata.force and whine about it.
 410 *	For consistency with link selection, device number 15 selects
 411 *	the first device connected to the host link.
 412 *
 413 *	LOCKING:
 414 *	EH context.
 415 */
 416static void ata_force_xfermask(struct ata_device *dev)
 417{
 418	int devno = dev->link->pmp + dev->devno;
 419	int alt_devno = devno;
 420	int i;
 421
 422	/* allow n.15/16 for devices attached to host port */
 423	if (ata_is_host_link(dev->link))
 424		alt_devno += 15;
 425
 426	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
 427		const struct ata_force_ent *fe = &ata_force_tbl[i];
 428		unsigned int pio_mask, mwdma_mask, udma_mask;
 429
 430		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
 431			continue;
 432
 433		if (fe->device != -1 && fe->device != devno &&
 434		    fe->device != alt_devno)
 435			continue;
 436
 437		if (!fe->param.xfer_mask)
 438			continue;
 439
 440		ata_unpack_xfermask(fe->param.xfer_mask,
 441				    &pio_mask, &mwdma_mask, &udma_mask);
 442		if (udma_mask)
 443			dev->udma_mask = udma_mask;
 444		else if (mwdma_mask) {
 445			dev->udma_mask = 0;
 446			dev->mwdma_mask = mwdma_mask;
 447		} else {
 448			dev->udma_mask = 0;
 449			dev->mwdma_mask = 0;
 450			dev->pio_mask = pio_mask;
 451		}
 452
 453		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
 454			       fe->param.name);
 455		return;
 456	}
 457}
 458
 459/**
 460 *	ata_force_horkage - force horkage according to libata.force
 461 *	@dev: ATA device of interest
 462 *
 463 *	Force horkage according to libata.force and whine about it.
 464 *	For consistency with link selection, device number 15 selects
 465 *	the first device connected to the host link.
 466 *
 467 *	LOCKING:
 468 *	EH context.
 469 */
 470static void ata_force_horkage(struct ata_device *dev)
 471{
 472	int devno = dev->link->pmp + dev->devno;
 473	int alt_devno = devno;
 474	int i;
 475
 476	/* allow n.15/16 for devices attached to host port */
 477	if (ata_is_host_link(dev->link))
 478		alt_devno += 15;
 479
 480	for (i = 0; i < ata_force_tbl_size; i++) {
 481		const struct ata_force_ent *fe = &ata_force_tbl[i];
 482
 483		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
 484			continue;
 485
 486		if (fe->device != -1 && fe->device != devno &&
 487		    fe->device != alt_devno)
 488			continue;
 489
 490		if (!(~dev->horkage & fe->param.horkage_on) &&
 491		    !(dev->horkage & fe->param.horkage_off))
 492			continue;
 493
 494		dev->horkage |= fe->param.horkage_on;
 495		dev->horkage &= ~fe->param.horkage_off;
 496
 497		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
 498			       fe->param.name);
 499	}
 500}
 501#else
 502static inline void ata_force_link_limits(struct ata_link *link) { }
 503static inline void ata_force_xfermask(struct ata_device *dev) { }
 504static inline void ata_force_horkage(struct ata_device *dev) { }
 505#endif
 506
 507/**
 508 *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
 509 *	@opcode: SCSI opcode
 510 *
 511 *	Determine ATAPI command type from @opcode.
 512 *
 513 *	LOCKING:
 514 *	None.
 515 *
 516 *	RETURNS:
 517 *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
 518 */
 519int atapi_cmd_type(u8 opcode)
 520{
 521	switch (opcode) {
 522	case GPCMD_READ_10:
 523	case GPCMD_READ_12:
 524		return ATAPI_READ;
 525
 526	case GPCMD_WRITE_10:
 527	case GPCMD_WRITE_12:
 528	case GPCMD_WRITE_AND_VERIFY_10:
 529		return ATAPI_WRITE;
 530
 531	case GPCMD_READ_CD:
 532	case GPCMD_READ_CD_MSF:
 533		return ATAPI_READ_CD;
 534
 535	case ATA_16:
 536	case ATA_12:
 537		if (atapi_passthru16)
 538			return ATAPI_PASS_THRU;
 539		fallthrough;
 540	default:
 541		return ATAPI_MISC;
 542	}
 543}
 544EXPORT_SYMBOL_GPL(atapi_cmd_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 545
 546static const u8 ata_rw_cmds[] = {
 547	/* pio multi */
 548	ATA_CMD_READ_MULTI,
 549	ATA_CMD_WRITE_MULTI,
 550	ATA_CMD_READ_MULTI_EXT,
 551	ATA_CMD_WRITE_MULTI_EXT,
 552	0,
 553	0,
 554	0,
 555	0,
 556	/* pio */
 557	ATA_CMD_PIO_READ,
 558	ATA_CMD_PIO_WRITE,
 559	ATA_CMD_PIO_READ_EXT,
 560	ATA_CMD_PIO_WRITE_EXT,
 561	0,
 562	0,
 563	0,
 564	0,
 565	/* dma */
 566	ATA_CMD_READ,
 567	ATA_CMD_WRITE,
 568	ATA_CMD_READ_EXT,
 569	ATA_CMD_WRITE_EXT,
 570	0,
 571	0,
 572	0,
 573	ATA_CMD_WRITE_FUA_EXT
 574};
 575
 576/**
 577 *	ata_set_rwcmd_protocol - set taskfile r/w command and protocol
 578 *	@dev: target device for the taskfile
 579 *	@tf: taskfile to examine and configure
 580 *
 581 *	Examine the device configuration and tf->flags to determine
 582 *	the proper read/write command and protocol to use for @tf.
 583 *
 584 *	LOCKING:
 585 *	caller.
 586 */
 587static bool ata_set_rwcmd_protocol(struct ata_device *dev,
 588				   struct ata_taskfile *tf)
 589{
 590	u8 cmd;
 591
 592	int index, fua, lba48, write;
 593
 594	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
 595	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
 596	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
 597
 598	if (dev->flags & ATA_DFLAG_PIO) {
 599		tf->protocol = ATA_PROT_PIO;
 600		index = dev->multi_count ? 0 : 8;
 601	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
 602		/* Unable to use DMA due to host limitation */
 603		tf->protocol = ATA_PROT_PIO;
 604		index = dev->multi_count ? 0 : 8;
 605	} else {
 606		tf->protocol = ATA_PROT_DMA;
 607		index = 16;
 608	}
 609
 610	cmd = ata_rw_cmds[index + fua + lba48 + write];
 611	if (!cmd)
 612		return false;
 613
 614	tf->command = cmd;
 615
 616	return true;
 617}
 618
 619/**
 620 *	ata_tf_read_block - Read block address from ATA taskfile
 621 *	@tf: ATA taskfile of interest
 622 *	@dev: ATA device @tf belongs to
 623 *
 624 *	LOCKING:
 625 *	None.
 626 *
 627 *	Read block address from @tf.  This function can handle all
 628 *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
 629 *	flags select the address format to use.
 630 *
 631 *	RETURNS:
 632 *	Block address read from @tf.
 633 */
 634u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
 635{
 636	u64 block = 0;
 637
 638	if (tf->flags & ATA_TFLAG_LBA) {
 639		if (tf->flags & ATA_TFLAG_LBA48) {
 640			block |= (u64)tf->hob_lbah << 40;
 641			block |= (u64)tf->hob_lbam << 32;
 642			block |= (u64)tf->hob_lbal << 24;
 643		} else
 644			block |= (tf->device & 0xf) << 24;
 645
 646		block |= tf->lbah << 16;
 647		block |= tf->lbam << 8;
 648		block |= tf->lbal;
 649	} else {
 650		u32 cyl, head, sect;
 651
 652		cyl = tf->lbam | (tf->lbah << 8);
 653		head = tf->device & 0xf;
 654		sect = tf->lbal;
 655
 656		if (!sect) {
 657			ata_dev_warn(dev,
 658				     "device reported invalid CHS sector 0\n");
 659			return U64_MAX;
 660		}
 661
 662		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
 663	}
 664
 665	return block;
 666}
 667
 668/*
 669 * Set a taskfile command duration limit index.
 670 */
 671static inline void ata_set_tf_cdl(struct ata_queued_cmd *qc, int cdl)
 672{
 673	struct ata_taskfile *tf = &qc->tf;
 674
 675	if (tf->protocol == ATA_PROT_NCQ)
 676		tf->auxiliary |= cdl;
 677	else
 678		tf->feature |= cdl;
 679
 680	/*
 681	 * Mark this command as having a CDL and request the result
 682	 * task file so that we can inspect the sense data available
 683	 * bit on completion.
 684	 */
 685	qc->flags |= ATA_QCFLAG_HAS_CDL | ATA_QCFLAG_RESULT_TF;
 686}
 687
 688/**
 689 *	ata_build_rw_tf - Build ATA taskfile for given read/write request
 690 *	@qc: Metadata associated with the taskfile to build
 
 691 *	@block: Block address
 692 *	@n_block: Number of blocks
 693 *	@tf_flags: RW/FUA etc...
 694 *	@cdl: Command duration limit index
 695 *	@class: IO priority class
 696 *
 697 *	LOCKING:
 698 *	None.
 699 *
 700 *	Build ATA taskfile for the command @qc for read/write request described
 701 *	by @block, @n_block, @tf_flags and @class.
 702 *
 703 *	RETURNS:
 704 *
 705 *	0 on success, -ERANGE if the request is too large for @dev,
 706 *	-EINVAL if the request is invalid.
 707 */
 708int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
 709		    unsigned int tf_flags, int cdl, int class)
 
 710{
 711	struct ata_taskfile *tf = &qc->tf;
 712	struct ata_device *dev = qc->dev;
 713
 714	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 715	tf->flags |= tf_flags;
 716
 717	if (ata_ncq_enabled(dev)) {
 718		/* yay, NCQ */
 719		if (!lba_48_ok(block, n_block))
 720			return -ERANGE;
 721
 722		tf->protocol = ATA_PROT_NCQ;
 723		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
 724
 725		if (tf->flags & ATA_TFLAG_WRITE)
 726			tf->command = ATA_CMD_FPDMA_WRITE;
 727		else
 728			tf->command = ATA_CMD_FPDMA_READ;
 729
 730		tf->nsect = qc->hw_tag << 3;
 731		tf->hob_feature = (n_block >> 8) & 0xff;
 732		tf->feature = n_block & 0xff;
 733
 734		tf->hob_lbah = (block >> 40) & 0xff;
 735		tf->hob_lbam = (block >> 32) & 0xff;
 736		tf->hob_lbal = (block >> 24) & 0xff;
 737		tf->lbah = (block >> 16) & 0xff;
 738		tf->lbam = (block >> 8) & 0xff;
 739		tf->lbal = block & 0xff;
 740
 741		tf->device = ATA_LBA;
 742		if (tf->flags & ATA_TFLAG_FUA)
 743			tf->device |= 1 << 7;
 744
 745		if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED &&
 746		    class == IOPRIO_CLASS_RT)
 747			tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
 748
 749		if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl)
 750			ata_set_tf_cdl(qc, cdl);
 751
 752	} else if (dev->flags & ATA_DFLAG_LBA) {
 753		tf->flags |= ATA_TFLAG_LBA;
 754
 755		if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl)
 756			ata_set_tf_cdl(qc, cdl);
 757
 758		/* Both FUA writes and a CDL index require 48-bit commands */
 759		if (!(tf->flags & ATA_TFLAG_FUA) &&
 760		    !(qc->flags & ATA_QCFLAG_HAS_CDL) &&
 761		    lba_28_ok(block, n_block)) {
 762			/* use LBA28 */
 763			tf->device |= (block >> 24) & 0xf;
 764		} else if (lba_48_ok(block, n_block)) {
 765			if (!(dev->flags & ATA_DFLAG_LBA48))
 766				return -ERANGE;
 767
 768			/* use LBA48 */
 769			tf->flags |= ATA_TFLAG_LBA48;
 770
 771			tf->hob_nsect = (n_block >> 8) & 0xff;
 772
 773			tf->hob_lbah = (block >> 40) & 0xff;
 774			tf->hob_lbam = (block >> 32) & 0xff;
 775			tf->hob_lbal = (block >> 24) & 0xff;
 776		} else {
 777			/* request too large even for LBA48 */
 778			return -ERANGE;
 779		}
 780
 781		if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
 782			return -EINVAL;
 783
 784		tf->nsect = n_block & 0xff;
 785
 786		tf->lbah = (block >> 16) & 0xff;
 787		tf->lbam = (block >> 8) & 0xff;
 788		tf->lbal = block & 0xff;
 789
 790		tf->device |= ATA_LBA;
 791	} else {
 792		/* CHS */
 793		u32 sect, head, cyl, track;
 794
 795		/* The request -may- be too large for CHS addressing. */
 796		if (!lba_28_ok(block, n_block))
 797			return -ERANGE;
 798
 799		if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
 800			return -EINVAL;
 801
 802		/* Convert LBA to CHS */
 803		track = (u32)block / dev->sectors;
 804		cyl   = track / dev->heads;
 805		head  = track % dev->heads;
 806		sect  = (u32)block % dev->sectors + 1;
 807
 
 
 
 808		/* Check whether the converted CHS can fit.
 809		   Cylinder: 0-65535
 810		   Head: 0-15
 811		   Sector: 1-255*/
 812		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
 813			return -ERANGE;
 814
 815		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
 816		tf->lbal = sect;
 817		tf->lbam = cyl;
 818		tf->lbah = cyl >> 8;
 819		tf->device |= head;
 820	}
 821
 822	return 0;
 823}
 824
 825/**
 826 *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
 827 *	@pio_mask: pio_mask
 828 *	@mwdma_mask: mwdma_mask
 829 *	@udma_mask: udma_mask
 830 *
 831 *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
 832 *	unsigned int xfer_mask.
 833 *
 834 *	LOCKING:
 835 *	None.
 836 *
 837 *	RETURNS:
 838 *	Packed xfer_mask.
 839 */
 840unsigned int ata_pack_xfermask(unsigned int pio_mask,
 841			       unsigned int mwdma_mask,
 842			       unsigned int udma_mask)
 843{
 844	return	((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
 845		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
 846		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
 847}
 848EXPORT_SYMBOL_GPL(ata_pack_xfermask);
 849
 850/**
 851 *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
 852 *	@xfer_mask: xfer_mask to unpack
 853 *	@pio_mask: resulting pio_mask
 854 *	@mwdma_mask: resulting mwdma_mask
 855 *	@udma_mask: resulting udma_mask
 856 *
 857 *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
 858 *	Any NULL destination masks will be ignored.
 859 */
 860void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
 861			 unsigned int *mwdma_mask, unsigned int *udma_mask)
 862{
 863	if (pio_mask)
 864		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
 865	if (mwdma_mask)
 866		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
 867	if (udma_mask)
 868		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
 869}
 870
 871static const struct ata_xfer_ent {
 872	int shift, bits;
 873	u8 base;
 874} ata_xfer_tbl[] = {
 875	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
 876	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
 877	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
 878	{ -1, },
 879};
 880
 881/**
 882 *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
 883 *	@xfer_mask: xfer_mask of interest
 884 *
 885 *	Return matching XFER_* value for @xfer_mask.  Only the highest
 886 *	bit of @xfer_mask is considered.
 887 *
 888 *	LOCKING:
 889 *	None.
 890 *
 891 *	RETURNS:
 892 *	Matching XFER_* value, 0xff if no match found.
 893 */
 894u8 ata_xfer_mask2mode(unsigned int xfer_mask)
 895{
 896	int highbit = fls(xfer_mask) - 1;
 897	const struct ata_xfer_ent *ent;
 898
 899	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 900		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
 901			return ent->base + highbit - ent->shift;
 902	return 0xff;
 903}
 904EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
 905
 906/**
 907 *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
 908 *	@xfer_mode: XFER_* of interest
 909 *
 910 *	Return matching xfer_mask for @xfer_mode.
 911 *
 912 *	LOCKING:
 913 *	None.
 914 *
 915 *	RETURNS:
 916 *	Matching xfer_mask, 0 if no match found.
 917 */
 918unsigned int ata_xfer_mode2mask(u8 xfer_mode)
 919{
 920	const struct ata_xfer_ent *ent;
 921
 922	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 923		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
 924			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
 925				& ~((1 << ent->shift) - 1);
 926	return 0;
 927}
 928EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
 929
 930/**
 931 *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
 932 *	@xfer_mode: XFER_* of interest
 933 *
 934 *	Return matching xfer_shift for @xfer_mode.
 935 *
 936 *	LOCKING:
 937 *	None.
 938 *
 939 *	RETURNS:
 940 *	Matching xfer_shift, -1 if no match found.
 941 */
 942int ata_xfer_mode2shift(u8 xfer_mode)
 943{
 944	const struct ata_xfer_ent *ent;
 945
 946	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
 947		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
 948			return ent->shift;
 949	return -1;
 950}
 951EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
 952
 953/**
 954 *	ata_mode_string - convert xfer_mask to string
 955 *	@xfer_mask: mask of bits supported; only highest bit counts.
 956 *
 957 *	Determine string which represents the highest speed
 958 *	(highest bit in @modemask).
 959 *
 960 *	LOCKING:
 961 *	None.
 962 *
 963 *	RETURNS:
 964 *	Constant C string representing highest speed listed in
 965 *	@mode_mask, or the constant C string "<n/a>".
 966 */
 967const char *ata_mode_string(unsigned int xfer_mask)
 968{
 969	static const char * const xfer_mode_str[] = {
 970		"PIO0",
 971		"PIO1",
 972		"PIO2",
 973		"PIO3",
 974		"PIO4",
 975		"PIO5",
 976		"PIO6",
 977		"MWDMA0",
 978		"MWDMA1",
 979		"MWDMA2",
 980		"MWDMA3",
 981		"MWDMA4",
 982		"UDMA/16",
 983		"UDMA/25",
 984		"UDMA/33",
 985		"UDMA/44",
 986		"UDMA/66",
 987		"UDMA/100",
 988		"UDMA/133",
 989		"UDMA7",
 990	};
 991	int highbit;
 992
 993	highbit = fls(xfer_mask) - 1;
 994	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
 995		return xfer_mode_str[highbit];
 996	return "<n/a>";
 997}
 998EXPORT_SYMBOL_GPL(ata_mode_string);
 999
1000const char *sata_spd_string(unsigned int spd)
1001{
1002	static const char * const spd_str[] = {
1003		"1.5 Gbps",
1004		"3.0 Gbps",
1005		"6.0 Gbps",
1006	};
1007
1008	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1009		return "<unknown>";
1010	return spd_str[spd - 1];
1011}
1012
1013/**
1014 *	ata_dev_classify - determine device type based on ATA-spec signature
1015 *	@tf: ATA taskfile register set for device to be identified
1016 *
1017 *	Determine from taskfile register contents whether a device is
1018 *	ATA or ATAPI, as per "Signature and persistence" section
1019 *	of ATA/PI spec (volume 1, sect 5.14).
1020 *
1021 *	LOCKING:
1022 *	None.
1023 *
1024 *	RETURNS:
1025 *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1026 *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1027 */
1028unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1029{
1030	/* Apple's open source Darwin code hints that some devices only
1031	 * put a proper signature into the LBA mid/high registers,
1032	 * So, we only check those.  It's sufficient for uniqueness.
1033	 *
1034	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1035	 * signatures for ATA and ATAPI devices attached on SerialATA,
1036	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1037	 * spec has never mentioned about using different signatures
1038	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1039	 * Multiplier specification began to use 0x69/0x96 to identify
1040	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1041	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1042	 * 0x69/0x96 shortly and described them as reserved for
1043	 * SerialATA.
1044	 *
1045	 * We follow the current spec and consider that 0x69/0x96
1046	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1047	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1048	 * SEMB signature.  This is worked around in
1049	 * ata_dev_read_id().
1050	 */
1051	if (tf->lbam == 0 && tf->lbah == 0)
 
1052		return ATA_DEV_ATA;
 
1053
1054	if (tf->lbam == 0x14 && tf->lbah == 0xeb)
 
1055		return ATA_DEV_ATAPI;
 
1056
1057	if (tf->lbam == 0x69 && tf->lbah == 0x96)
 
1058		return ATA_DEV_PMP;
 
1059
1060	if (tf->lbam == 0x3c && tf->lbah == 0xc3)
 
1061		return ATA_DEV_SEMB;
 
1062
1063	if (tf->lbam == 0xcd && tf->lbah == 0xab)
1064		return ATA_DEV_ZAC;
1065
1066	return ATA_DEV_UNKNOWN;
1067}
1068EXPORT_SYMBOL_GPL(ata_dev_classify);
1069
1070/**
1071 *	ata_id_string - Convert IDENTIFY DEVICE page into string
1072 *	@id: IDENTIFY DEVICE results we will examine
1073 *	@s: string into which data is output
1074 *	@ofs: offset into identify device page
1075 *	@len: length of string to return. must be an even number.
1076 *
1077 *	The strings in the IDENTIFY DEVICE page are broken up into
1078 *	16-bit chunks.  Run through the string, and output each
1079 *	8-bit chunk linearly, regardless of platform.
1080 *
1081 *	LOCKING:
1082 *	caller.
1083 */
1084
1085void ata_id_string(const u16 *id, unsigned char *s,
1086		   unsigned int ofs, unsigned int len)
1087{
1088	unsigned int c;
1089
1090	BUG_ON(len & 1);
1091
1092	while (len > 0) {
1093		c = id[ofs] >> 8;
1094		*s = c;
1095		s++;
1096
1097		c = id[ofs] & 0xff;
1098		*s = c;
1099		s++;
1100
1101		ofs++;
1102		len -= 2;
1103	}
1104}
1105EXPORT_SYMBOL_GPL(ata_id_string);
1106
1107/**
1108 *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1109 *	@id: IDENTIFY DEVICE results we will examine
1110 *	@s: string into which data is output
1111 *	@ofs: offset into identify device page
1112 *	@len: length of string to return. must be an odd number.
1113 *
1114 *	This function is identical to ata_id_string except that it
1115 *	trims trailing spaces and terminates the resulting string with
1116 *	null.  @len must be actual maximum length (even number) + 1.
1117 *
1118 *	LOCKING:
1119 *	caller.
1120 */
1121void ata_id_c_string(const u16 *id, unsigned char *s,
1122		     unsigned int ofs, unsigned int len)
1123{
1124	unsigned char *p;
1125
1126	ata_id_string(id, s, ofs, len - 1);
1127
1128	p = s + strnlen(s, len - 1);
1129	while (p > s && p[-1] == ' ')
1130		p--;
1131	*p = '\0';
1132}
1133EXPORT_SYMBOL_GPL(ata_id_c_string);
1134
1135static u64 ata_id_n_sectors(const u16 *id)
1136{
1137	if (ata_id_has_lba(id)) {
1138		if (ata_id_has_lba48(id))
1139			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1140
1141		return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
 
 
 
 
 
 
 
1142	}
1143
1144	if (ata_id_current_chs_valid(id))
1145		return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
1146		       (u32)id[ATA_ID_CUR_SECTORS];
1147
1148	return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
1149	       (u32)id[ATA_ID_SECTORS];
1150}
1151
1152u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1153{
1154	u64 sectors = 0;
1155
1156	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1157	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1158	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1159	sectors |= (tf->lbah & 0xff) << 16;
1160	sectors |= (tf->lbam & 0xff) << 8;
1161	sectors |= (tf->lbal & 0xff);
1162
1163	return sectors;
1164}
1165
1166u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1167{
1168	u64 sectors = 0;
1169
1170	sectors |= (tf->device & 0x0f) << 24;
1171	sectors |= (tf->lbah & 0xff) << 16;
1172	sectors |= (tf->lbam & 0xff) << 8;
1173	sectors |= (tf->lbal & 0xff);
1174
1175	return sectors;
1176}
1177
1178/**
1179 *	ata_read_native_max_address - Read native max address
1180 *	@dev: target device
1181 *	@max_sectors: out parameter for the result native max address
1182 *
1183 *	Perform an LBA48 or LBA28 native size query upon the device in
1184 *	question.
1185 *
1186 *	RETURNS:
1187 *	0 on success, -EACCES if command is aborted by the drive.
1188 *	-EIO on other errors.
1189 */
1190static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1191{
1192	unsigned int err_mask;
1193	struct ata_taskfile tf;
1194	int lba48 = ata_id_has_lba48(dev->id);
1195
1196	ata_tf_init(dev, &tf);
1197
1198	/* always clear all address registers */
1199	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1200
1201	if (lba48) {
1202		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1203		tf.flags |= ATA_TFLAG_LBA48;
1204	} else
1205		tf.command = ATA_CMD_READ_NATIVE_MAX;
1206
1207	tf.protocol = ATA_PROT_NODATA;
1208	tf.device |= ATA_LBA;
1209
1210	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1211	if (err_mask) {
1212		ata_dev_warn(dev,
1213			     "failed to read native max address (err_mask=0x%x)\n",
1214			     err_mask);
1215		if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
1216			return -EACCES;
1217		return -EIO;
1218	}
1219
1220	if (lba48)
1221		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1222	else
1223		*max_sectors = ata_tf_to_lba(&tf) + 1;
1224	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1225		(*max_sectors)--;
1226	return 0;
1227}
1228
1229/**
1230 *	ata_set_max_sectors - Set max sectors
1231 *	@dev: target device
1232 *	@new_sectors: new max sectors value to set for the device
1233 *
1234 *	Set max sectors of @dev to @new_sectors.
1235 *
1236 *	RETURNS:
1237 *	0 on success, -EACCES if command is aborted or denied (due to
1238 *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1239 *	errors.
1240 */
1241static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1242{
1243	unsigned int err_mask;
1244	struct ata_taskfile tf;
1245	int lba48 = ata_id_has_lba48(dev->id);
1246
1247	new_sectors--;
1248
1249	ata_tf_init(dev, &tf);
1250
1251	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1252
1253	if (lba48) {
1254		tf.command = ATA_CMD_SET_MAX_EXT;
1255		tf.flags |= ATA_TFLAG_LBA48;
1256
1257		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1258		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1259		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1260	} else {
1261		tf.command = ATA_CMD_SET_MAX;
1262
1263		tf.device |= (new_sectors >> 24) & 0xf;
1264	}
1265
1266	tf.protocol = ATA_PROT_NODATA;
1267	tf.device |= ATA_LBA;
1268
1269	tf.lbal = (new_sectors >> 0) & 0xff;
1270	tf.lbam = (new_sectors >> 8) & 0xff;
1271	tf.lbah = (new_sectors >> 16) & 0xff;
1272
1273	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1274	if (err_mask) {
1275		ata_dev_warn(dev,
1276			     "failed to set max address (err_mask=0x%x)\n",
1277			     err_mask);
1278		if (err_mask == AC_ERR_DEV &&
1279		    (tf.error & (ATA_ABORTED | ATA_IDNF)))
1280			return -EACCES;
1281		return -EIO;
1282	}
1283
1284	return 0;
1285}
1286
1287/**
1288 *	ata_hpa_resize		-	Resize a device with an HPA set
1289 *	@dev: Device to resize
1290 *
1291 *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1292 *	it if required to the full size of the media. The caller must check
1293 *	the drive has the HPA feature set enabled.
1294 *
1295 *	RETURNS:
1296 *	0 on success, -errno on failure.
1297 */
1298static int ata_hpa_resize(struct ata_device *dev)
1299{
1300	bool print_info = ata_dev_print_info(dev);
 
1301	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1302	u64 sectors = ata_id_n_sectors(dev->id);
1303	u64 native_sectors;
1304	int rc;
1305
1306	/* do we need to do it? */
1307	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1308	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1309	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1310		return 0;
1311
1312	/* read native max address */
1313	rc = ata_read_native_max_address(dev, &native_sectors);
1314	if (rc) {
1315		/* If device aborted the command or HPA isn't going to
1316		 * be unlocked, skip HPA resizing.
1317		 */
1318		if (rc == -EACCES || !unlock_hpa) {
1319			ata_dev_warn(dev,
1320				     "HPA support seems broken, skipping HPA handling\n");
1321			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1322
1323			/* we can continue if device aborted the command */
1324			if (rc == -EACCES)
1325				rc = 0;
1326		}
1327
1328		return rc;
1329	}
1330	dev->n_native_sectors = native_sectors;
1331
1332	/* nothing to do? */
1333	if (native_sectors <= sectors || !unlock_hpa) {
1334		if (!print_info || native_sectors == sectors)
1335			return 0;
1336
1337		if (native_sectors > sectors)
1338			ata_dev_info(dev,
1339				"HPA detected: current %llu, native %llu\n",
1340				(unsigned long long)sectors,
1341				(unsigned long long)native_sectors);
1342		else if (native_sectors < sectors)
1343			ata_dev_warn(dev,
1344				"native sectors (%llu) is smaller than sectors (%llu)\n",
1345				(unsigned long long)native_sectors,
1346				(unsigned long long)sectors);
1347		return 0;
1348	}
1349
1350	/* let's unlock HPA */
1351	rc = ata_set_max_sectors(dev, native_sectors);
1352	if (rc == -EACCES) {
1353		/* if device aborted the command, skip HPA resizing */
1354		ata_dev_warn(dev,
1355			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1356			     (unsigned long long)sectors,
1357			     (unsigned long long)native_sectors);
1358		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1359		return 0;
1360	} else if (rc)
1361		return rc;
1362
1363	/* re-read IDENTIFY data */
1364	rc = ata_dev_reread_id(dev, 0);
1365	if (rc) {
1366		ata_dev_err(dev,
1367			    "failed to re-read IDENTIFY data after HPA resizing\n");
1368		return rc;
1369	}
1370
1371	if (print_info) {
1372		u64 new_sectors = ata_id_n_sectors(dev->id);
1373		ata_dev_info(dev,
1374			"HPA unlocked: %llu -> %llu, native %llu\n",
1375			(unsigned long long)sectors,
1376			(unsigned long long)new_sectors,
1377			(unsigned long long)native_sectors);
1378	}
1379
1380	return 0;
1381}
1382
1383/**
1384 *	ata_dump_id - IDENTIFY DEVICE info debugging output
1385 *	@dev: device from which the information is fetched
1386 *	@id: IDENTIFY DEVICE page to dump
1387 *
1388 *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1389 *	page.
1390 *
1391 *	LOCKING:
1392 *	caller.
1393 */
1394
1395static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
1396{
1397	ata_dev_dbg(dev,
1398		"49==0x%04x  53==0x%04x  63==0x%04x  64==0x%04x  75==0x%04x\n"
1399		"80==0x%04x  81==0x%04x  82==0x%04x  83==0x%04x  84==0x%04x\n"
1400		"88==0x%04x  93==0x%04x\n",
1401		id[49], id[53], id[63], id[64], id[75], id[80],
1402		id[81], id[82], id[83], id[84], id[88], id[93]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1403}
1404
1405/**
1406 *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1407 *	@id: IDENTIFY data to compute xfer mask from
1408 *
1409 *	Compute the xfermask for this device. This is not as trivial
1410 *	as it seems if we must consider early devices correctly.
1411 *
1412 *	FIXME: pre IDE drive timing (do we care ?).
1413 *
1414 *	LOCKING:
1415 *	None.
1416 *
1417 *	RETURNS:
1418 *	Computed xfermask
1419 */
1420unsigned int ata_id_xfermask(const u16 *id)
1421{
1422	unsigned int pio_mask, mwdma_mask, udma_mask;
1423
1424	/* Usual case. Word 53 indicates word 64 is valid */
1425	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1426		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1427		pio_mask <<= 3;
1428		pio_mask |= 0x7;
1429	} else {
1430		/* If word 64 isn't valid then Word 51 high byte holds
1431		 * the PIO timing number for the maximum. Turn it into
1432		 * a mask.
1433		 */
1434		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1435		if (mode < 5)	/* Valid PIO range */
1436			pio_mask = (2 << mode) - 1;
1437		else
1438			pio_mask = 1;
1439
1440		/* But wait.. there's more. Design your standards by
1441		 * committee and you too can get a free iordy field to
1442		 * process. However it is the speeds not the modes that
1443		 * are supported... Note drivers using the timing API
1444		 * will get this right anyway
1445		 */
1446	}
1447
1448	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1449
1450	if (ata_id_is_cfa(id)) {
1451		/*
1452		 *	Process compact flash extended modes
1453		 */
1454		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1455		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1456
1457		if (pio)
1458			pio_mask |= (1 << 5);
1459		if (pio > 1)
1460			pio_mask |= (1 << 6);
1461		if (dma)
1462			mwdma_mask |= (1 << 3);
1463		if (dma > 1)
1464			mwdma_mask |= (1 << 4);
1465	}
1466
1467	udma_mask = 0;
1468	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1469		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1470
1471	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1472}
1473EXPORT_SYMBOL_GPL(ata_id_xfermask);
1474
1475static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1476{
1477	struct completion *waiting = qc->private_data;
1478
1479	complete(waiting);
1480}
1481
1482/**
1483 *	ata_exec_internal_sg - execute libata internal command
1484 *	@dev: Device to which the command is sent
1485 *	@tf: Taskfile registers for the command and the result
1486 *	@cdb: CDB for packet command
1487 *	@dma_dir: Data transfer direction of the command
1488 *	@sgl: sg list for the data buffer of the command
1489 *	@n_elem: Number of sg entries
1490 *	@timeout: Timeout in msecs (0 for default)
1491 *
1492 *	Executes libata internal command with timeout.  @tf contains
1493 *	command on entry and result on return.  Timeout and error
1494 *	conditions are reported via return value.  No recovery action
1495 *	is taken after a command times out.  It's caller's duty to
1496 *	clean up after timeout.
1497 *
1498 *	LOCKING:
1499 *	None.  Should be called with kernel context, might sleep.
1500 *
1501 *	RETURNS:
1502 *	Zero on success, AC_ERR_* mask on failure
1503 */
1504static unsigned ata_exec_internal_sg(struct ata_device *dev,
1505				     struct ata_taskfile *tf, const u8 *cdb,
1506				     int dma_dir, struct scatterlist *sgl,
1507				     unsigned int n_elem, unsigned int timeout)
1508{
1509	struct ata_link *link = dev->link;
1510	struct ata_port *ap = link->ap;
1511	u8 command = tf->command;
1512	int auto_timeout = 0;
1513	struct ata_queued_cmd *qc;
1514	unsigned int preempted_tag;
1515	u32 preempted_sactive;
1516	u64 preempted_qc_active;
1517	int preempted_nr_active_links;
1518	DECLARE_COMPLETION_ONSTACK(wait);
1519	unsigned long flags;
1520	unsigned int err_mask;
1521	int rc;
1522
1523	spin_lock_irqsave(ap->lock, flags);
1524
1525	/* no internal command while frozen */
1526	if (ata_port_is_frozen(ap)) {
1527		spin_unlock_irqrestore(ap->lock, flags);
1528		return AC_ERR_SYSTEM;
1529	}
1530
1531	/* initialize internal qc */
1532	qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1533
1534	qc->tag = ATA_TAG_INTERNAL;
1535	qc->hw_tag = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
1536	qc->scsicmd = NULL;
1537	qc->ap = ap;
1538	qc->dev = dev;
1539	ata_qc_reinit(qc);
1540
1541	preempted_tag = link->active_tag;
1542	preempted_sactive = link->sactive;
1543	preempted_qc_active = ap->qc_active;
1544	preempted_nr_active_links = ap->nr_active_links;
1545	link->active_tag = ATA_TAG_POISON;
1546	link->sactive = 0;
1547	ap->qc_active = 0;
1548	ap->nr_active_links = 0;
1549
1550	/* prepare & issue qc */
1551	qc->tf = *tf;
1552	if (cdb)
1553		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1554
1555	/* some SATA bridges need us to indicate data xfer direction */
1556	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1557	    dma_dir == DMA_FROM_DEVICE)
1558		qc->tf.feature |= ATAPI_DMADIR;
1559
1560	qc->flags |= ATA_QCFLAG_RESULT_TF;
1561	qc->dma_dir = dma_dir;
1562	if (dma_dir != DMA_NONE) {
1563		unsigned int i, buflen = 0;
1564		struct scatterlist *sg;
1565
1566		for_each_sg(sgl, sg, n_elem, i)
1567			buflen += sg->length;
1568
1569		ata_sg_init(qc, sgl, n_elem);
1570		qc->nbytes = buflen;
1571	}
1572
1573	qc->private_data = &wait;
1574	qc->complete_fn = ata_qc_complete_internal;
1575
1576	ata_qc_issue(qc);
1577
1578	spin_unlock_irqrestore(ap->lock, flags);
1579
1580	if (!timeout) {
1581		if (ata_probe_timeout)
1582			timeout = ata_probe_timeout * 1000;
1583		else {
1584			timeout = ata_internal_cmd_timeout(dev, command);
1585			auto_timeout = 1;
1586		}
1587	}
1588
1589	ata_eh_release(ap);
 
1590
1591	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1592
1593	ata_eh_acquire(ap);
 
1594
1595	ata_sff_flush_pio_task(ap);
1596
1597	if (!rc) {
1598		spin_lock_irqsave(ap->lock, flags);
1599
1600		/* We're racing with irq here.  If we lose, the
1601		 * following test prevents us from completing the qc
1602		 * twice.  If we win, the port is frozen and will be
1603		 * cleaned up by ->post_internal_cmd().
1604		 */
1605		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1606			qc->err_mask |= AC_ERR_TIMEOUT;
1607
1608			ata_port_freeze(ap);
1609
1610			ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
1611				     timeout, command);
 
 
 
 
1612		}
1613
1614		spin_unlock_irqrestore(ap->lock, flags);
1615	}
1616
1617	/* do post_internal_cmd */
1618	if (ap->ops->post_internal_cmd)
1619		ap->ops->post_internal_cmd(qc);
1620
1621	/* perform minimal error analysis */
1622	if (qc->flags & ATA_QCFLAG_EH) {
1623		if (qc->result_tf.status & (ATA_ERR | ATA_DF))
1624			qc->err_mask |= AC_ERR_DEV;
1625
1626		if (!qc->err_mask)
1627			qc->err_mask |= AC_ERR_OTHER;
1628
1629		if (qc->err_mask & ~AC_ERR_OTHER)
1630			qc->err_mask &= ~AC_ERR_OTHER;
1631	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1632		qc->result_tf.status |= ATA_SENSE;
1633	}
1634
1635	/* finish up */
1636	spin_lock_irqsave(ap->lock, flags);
1637
1638	*tf = qc->result_tf;
1639	err_mask = qc->err_mask;
1640
1641	ata_qc_free(qc);
1642	link->active_tag = preempted_tag;
1643	link->sactive = preempted_sactive;
1644	ap->qc_active = preempted_qc_active;
1645	ap->nr_active_links = preempted_nr_active_links;
1646
1647	spin_unlock_irqrestore(ap->lock, flags);
1648
1649	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1650		ata_internal_cmd_timed_out(dev, command);
1651
1652	return err_mask;
1653}
1654
1655/**
1656 *	ata_exec_internal - execute libata internal command
1657 *	@dev: Device to which the command is sent
1658 *	@tf: Taskfile registers for the command and the result
1659 *	@cdb: CDB for packet command
1660 *	@dma_dir: Data transfer direction of the command
1661 *	@buf: Data buffer of the command
1662 *	@buflen: Length of data buffer
1663 *	@timeout: Timeout in msecs (0 for default)
1664 *
1665 *	Wrapper around ata_exec_internal_sg() which takes simple
1666 *	buffer instead of sg list.
1667 *
1668 *	LOCKING:
1669 *	None.  Should be called with kernel context, might sleep.
1670 *
1671 *	RETURNS:
1672 *	Zero on success, AC_ERR_* mask on failure
1673 */
1674unsigned ata_exec_internal(struct ata_device *dev,
1675			   struct ata_taskfile *tf, const u8 *cdb,
1676			   int dma_dir, void *buf, unsigned int buflen,
1677			   unsigned int timeout)
1678{
1679	struct scatterlist *psg = NULL, sg;
1680	unsigned int n_elem = 0;
1681
1682	if (dma_dir != DMA_NONE) {
1683		WARN_ON(!buf);
1684		sg_init_one(&sg, buf, buflen);
1685		psg = &sg;
1686		n_elem++;
1687	}
1688
1689	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1690				    timeout);
1691}
1692
1693/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1694 *	ata_pio_need_iordy	-	check if iordy needed
1695 *	@adev: ATA device
1696 *
1697 *	Check if the current speed of the device requires IORDY. Used
1698 *	by various controllers for chip configuration.
1699 */
1700unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1701{
1702	/* Don't set IORDY if we're preparing for reset.  IORDY may
1703	 * lead to controller lock up on certain controllers if the
1704	 * port is not occupied.  See bko#11703 for details.
1705	 */
1706	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1707		return 0;
1708	/* Controller doesn't support IORDY.  Probably a pointless
1709	 * check as the caller should know this.
1710	 */
1711	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1712		return 0;
1713	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1714	if (ata_id_is_cfa(adev->id)
1715	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1716		return 0;
1717	/* PIO3 and higher it is mandatory */
1718	if (adev->pio_mode > XFER_PIO_2)
1719		return 1;
1720	/* We turn it on when possible */
1721	if (ata_id_has_iordy(adev->id))
1722		return 1;
1723	return 0;
1724}
1725EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
1726
1727/**
1728 *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1729 *	@adev: ATA device
1730 *
1731 *	Compute the highest mode possible if we are not using iordy. Return
1732 *	-1 if no iordy mode is available.
1733 */
1734static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1735{
1736	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1737	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1738		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1739		/* Is the speed faster than the drive allows non IORDY ? */
1740		if (pio) {
1741			/* This is cycle times not frequency - watch the logic! */
1742			if (pio > 240)	/* PIO2 is 240nS per cycle */
1743				return 3 << ATA_SHIFT_PIO;
1744			return 7 << ATA_SHIFT_PIO;
1745		}
1746	}
1747	return 3 << ATA_SHIFT_PIO;
1748}
1749
1750/**
1751 *	ata_do_dev_read_id		-	default ID read method
1752 *	@dev: device
1753 *	@tf: proposed taskfile
1754 *	@id: data buffer
1755 *
1756 *	Issue the identify taskfile and hand back the buffer containing
1757 *	identify data. For some RAID controllers and for pre ATA devices
1758 *	this function is wrapped or replaced by the driver
1759 */
1760unsigned int ata_do_dev_read_id(struct ata_device *dev,
1761				struct ata_taskfile *tf, __le16 *id)
1762{
1763	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1764				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1765}
1766EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1767
1768/**
1769 *	ata_dev_read_id - Read ID data from the specified device
1770 *	@dev: target device
1771 *	@p_class: pointer to class of the target device (may be changed)
1772 *	@flags: ATA_READID_* flags
1773 *	@id: buffer to read IDENTIFY data into
1774 *
1775 *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1776 *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1777 *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1778 *	for pre-ATA4 drives.
1779 *
1780 *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1781 *	now we abort if we hit that case.
1782 *
1783 *	LOCKING:
1784 *	Kernel thread context (may sleep)
1785 *
1786 *	RETURNS:
1787 *	0 on success, -errno otherwise.
1788 */
1789int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1790		    unsigned int flags, u16 *id)
1791{
1792	struct ata_port *ap = dev->link->ap;
1793	unsigned int class = *p_class;
1794	struct ata_taskfile tf;
1795	unsigned int err_mask = 0;
1796	const char *reason;
1797	bool is_semb = class == ATA_DEV_SEMB;
1798	int may_fallback = 1, tried_spinup = 0;
1799	int rc;
1800
 
 
 
1801retry:
1802	ata_tf_init(dev, &tf);
1803
1804	switch (class) {
1805	case ATA_DEV_SEMB:
1806		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1807		fallthrough;
1808	case ATA_DEV_ATA:
1809	case ATA_DEV_ZAC:
1810		tf.command = ATA_CMD_ID_ATA;
1811		break;
1812	case ATA_DEV_ATAPI:
1813		tf.command = ATA_CMD_ID_ATAPI;
1814		break;
1815	default:
1816		rc = -ENODEV;
1817		reason = "unsupported class";
1818		goto err_out;
1819	}
1820
1821	tf.protocol = ATA_PROT_PIO;
1822
1823	/* Some devices choke if TF registers contain garbage.  Make
1824	 * sure those are properly initialized.
1825	 */
1826	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1827
1828	/* Device presence detection is unreliable on some
1829	 * controllers.  Always poll IDENTIFY if available.
1830	 */
1831	tf.flags |= ATA_TFLAG_POLLING;
1832
1833	if (ap->ops->read_id)
1834		err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
1835	else
1836		err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
1837
1838	if (err_mask) {
1839		if (err_mask & AC_ERR_NODEV_HINT) {
1840			ata_dev_dbg(dev, "NODEV after polling detection\n");
1841			return -ENOENT;
1842		}
1843
1844		if (is_semb) {
1845			ata_dev_info(dev,
1846		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1847			/* SEMB is not supported yet */
1848			*p_class = ATA_DEV_SEMB_UNSUP;
1849			return 0;
1850		}
1851
1852		if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
1853			/* Device or controller might have reported
1854			 * the wrong device class.  Give a shot at the
1855			 * other IDENTIFY if the current one is
1856			 * aborted by the device.
1857			 */
1858			if (may_fallback) {
1859				may_fallback = 0;
1860
1861				if (class == ATA_DEV_ATA)
1862					class = ATA_DEV_ATAPI;
1863				else
1864					class = ATA_DEV_ATA;
1865				goto retry;
1866			}
1867
1868			/* Control reaches here iff the device aborted
1869			 * both flavors of IDENTIFYs which happens
1870			 * sometimes with phantom devices.
1871			 */
1872			ata_dev_dbg(dev,
1873				    "both IDENTIFYs aborted, assuming NODEV\n");
1874			return -ENOENT;
1875		}
1876
1877		rc = -EIO;
1878		reason = "I/O error";
1879		goto err_out;
1880	}
1881
1882	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1883		ata_dev_info(dev, "dumping IDENTIFY data, "
1884			    "class=%d may_fallback=%d tried_spinup=%d\n",
1885			    class, may_fallback, tried_spinup);
1886		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
1887			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1888	}
1889
1890	/* Falling back doesn't make sense if ID data was read
1891	 * successfully at least once.
1892	 */
1893	may_fallback = 0;
1894
1895	swap_buf_le16(id, ATA_ID_WORDS);
1896
1897	/* sanity check */
1898	rc = -EINVAL;
1899	reason = "device reports invalid type";
1900
1901	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1902		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1903			goto err_out;
1904		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1905							ata_id_is_ata(id)) {
1906			ata_dev_dbg(dev,
1907				"host indicates ignore ATA devices, ignored\n");
1908			return -ENOENT;
1909		}
1910	} else {
1911		if (ata_id_is_ata(id))
1912			goto err_out;
1913	}
1914
1915	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1916		tried_spinup = 1;
1917		/*
1918		 * Drive powered-up in standby mode, and requires a specific
1919		 * SET_FEATURES spin-up subcommand before it will accept
1920		 * anything other than the original IDENTIFY command.
1921		 */
1922		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1923		if (err_mask && id[2] != 0x738c) {
1924			rc = -EIO;
1925			reason = "SPINUP failed";
1926			goto err_out;
1927		}
1928		/*
1929		 * If the drive initially returned incomplete IDENTIFY info,
1930		 * we now must reissue the IDENTIFY command.
1931		 */
1932		if (id[2] == 0x37c8)
1933			goto retry;
1934	}
1935
1936	if ((flags & ATA_READID_POSTRESET) &&
1937	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1938		/*
1939		 * The exact sequence expected by certain pre-ATA4 drives is:
1940		 * SRST RESET
1941		 * IDENTIFY (optional in early ATA)
1942		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1943		 * anything else..
1944		 * Some drives were very specific about that exact sequence.
1945		 *
1946		 * Note that ATA4 says lba is mandatory so the second check
1947		 * should never trigger.
1948		 */
1949		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1950			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1951			if (err_mask) {
1952				rc = -EIO;
1953				reason = "INIT_DEV_PARAMS failed";
1954				goto err_out;
1955			}
1956
1957			/* current CHS translation info (id[53-58]) might be
1958			 * changed. reread the identify device info.
1959			 */
1960			flags &= ~ATA_READID_POSTRESET;
1961			goto retry;
1962		}
1963	}
1964
1965	*p_class = class;
1966
1967	return 0;
1968
1969 err_out:
1970	ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1971		     reason, err_mask);
 
1972	return rc;
1973}
1974
1975bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf,
1976			   bool set_active)
1977{
1978	/* Only applies to ATA and ZAC devices */
1979	if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
1980		return false;
1981
1982	ata_tf_init(dev, tf);
1983	tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1984	tf->protocol = ATA_PROT_NODATA;
1985
1986	if (set_active) {
1987		/* VERIFY for 1 sector at lba=0 */
1988		tf->command = ATA_CMD_VERIFY;
1989		tf->nsect = 1;
1990		if (dev->flags & ATA_DFLAG_LBA) {
1991			tf->flags |= ATA_TFLAG_LBA;
1992			tf->device |= ATA_LBA;
1993		} else {
1994			/* CHS */
1995			tf->lbal = 0x1; /* sect */
1996		}
1997	} else {
1998		tf->command = ATA_CMD_STANDBYNOW1;
1999	}
2000
2001	return true;
2002}
2003
2004static bool ata_dev_power_is_active(struct ata_device *dev)
2005{
2006	struct ata_taskfile tf;
2007	unsigned int err_mask;
2008
2009	ata_tf_init(dev, &tf);
2010	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2011	tf.protocol = ATA_PROT_NODATA;
2012	tf.command = ATA_CMD_CHK_POWER;
2013
2014	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2015	if (err_mask) {
2016		ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
2017			    err_mask);
2018		/*
2019		 * Assume we are in standby mode so that we always force a
2020		 * spinup in ata_dev_power_set_active().
2021		 */
2022		return false;
2023	}
2024
2025	ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
2026
2027	/* Active or idle */
2028	return tf.nsect == 0xff;
2029}
2030
2031/**
2032 *	ata_dev_power_set_standby - Set a device power mode to standby
2033 *	@dev: target device
2034 *
2035 *	Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
2036 *	For an HDD device, this spins down the disks.
2037 *
2038 *	LOCKING:
2039 *	Kernel thread context (may sleep).
2040 */
2041void ata_dev_power_set_standby(struct ata_device *dev)
2042{
2043	unsigned long ap_flags = dev->link->ap->flags;
2044	struct ata_taskfile tf;
2045	unsigned int err_mask;
2046
2047	/* If the device is already sleeping or in standby, do nothing. */
2048	if ((dev->flags & ATA_DFLAG_SLEEPING) ||
2049	    !ata_dev_power_is_active(dev))
2050		return;
2051
2052	/*
2053	 * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
2054	 * causing some drives to spin up and down again. For these, do nothing
2055	 * if we are being called on shutdown.
2056	 */
2057	if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
2058	    system_state == SYSTEM_POWER_OFF)
2059		return;
2060
2061	if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
2062	    system_entering_hibernation())
2063		return;
2064
2065	/* Issue STANDBY IMMEDIATE command only if supported by the device */
2066	if (!ata_dev_power_init_tf(dev, &tf, false))
2067		return;
2068
2069	ata_dev_notice(dev, "Entering standby power mode\n");
2070
2071	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2072	if (err_mask)
2073		ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
2074			    err_mask);
2075}
2076
2077/**
2078 *	ata_dev_power_set_active -  Set a device power mode to active
2079 *	@dev: target device
2080 *
2081 *	Issue a VERIFY command to enter to ensure that the device is in the
2082 *	active power mode. For a spun-down HDD (standby or idle power mode),
2083 *	the VERIFY command will complete after the disk spins up.
2084 *
2085 *	LOCKING:
2086 *	Kernel thread context (may sleep).
2087 */
2088void ata_dev_power_set_active(struct ata_device *dev)
2089{
2090	struct ata_taskfile tf;
2091	unsigned int err_mask;
2092
2093	/*
2094	 * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
2095	 * if supported by the device.
2096	 */
2097	if (!ata_dev_power_init_tf(dev, &tf, true))
2098		return;
2099
2100	/*
2101	 * Check the device power state & condition and force a spinup with
2102	 * VERIFY command only if the drive is not already ACTIVE or IDLE.
2103	 */
2104	if (ata_dev_power_is_active(dev))
2105		return;
2106
2107	ata_dev_notice(dev, "Entering active power mode\n");
2108
2109	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2110	if (err_mask)
2111		ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
2112			    err_mask);
2113}
2114
2115/**
2116 *	ata_read_log_page - read a specific log page
2117 *	@dev: target device
2118 *	@log: log to read
2119 *	@page: page to read
2120 *	@buf: buffer to store read page
2121 *	@sectors: number of sectors to read
2122 *
2123 *	Read log page using READ_LOG_EXT command.
2124 *
2125 *	LOCKING:
2126 *	Kernel thread context (may sleep).
2127 *
2128 *	RETURNS:
2129 *	0 on success, AC_ERR_* mask otherwise.
2130 */
2131unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2132			       u8 page, void *buf, unsigned int sectors)
2133{
2134	unsigned long ap_flags = dev->link->ap->flags;
2135	struct ata_taskfile tf;
2136	unsigned int err_mask;
2137	bool dma = false;
2138
2139	ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
2140
2141	/*
2142	 * Return error without actually issuing the command on controllers
2143	 * which e.g. lockup on a read log page.
2144	 */
2145	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2146		return AC_ERR_DEV;
2147
2148retry:
2149	ata_tf_init(dev, &tf);
2150	if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
2151	    !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2152		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2153		tf.protocol = ATA_PROT_DMA;
2154		dma = true;
2155	} else {
2156		tf.command = ATA_CMD_READ_LOG_EXT;
2157		tf.protocol = ATA_PROT_PIO;
2158		dma = false;
2159	}
2160	tf.lbal = log;
2161	tf.lbam = page;
2162	tf.nsect = sectors;
2163	tf.hob_nsect = sectors >> 8;
2164	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2165
2166	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2167				     buf, sectors * ATA_SECT_SIZE, 0);
2168
2169	if (err_mask) {
2170		if (dma) {
2171			dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2172			if (!ata_port_is_frozen(dev->link->ap))
2173				goto retry;
2174		}
2175		ata_dev_err(dev,
2176			    "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
2177			    (unsigned int)log, (unsigned int)page, err_mask);
2178	}
2179
2180	return err_mask;
2181}
2182
2183static int ata_log_supported(struct ata_device *dev, u8 log)
2184{
2185	struct ata_port *ap = dev->link->ap;
2186
2187	if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
2188		return 0;
2189
2190	if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2191		return 0;
2192	return get_unaligned_le16(&ap->sector_buf[log * 2]);
2193}
2194
2195static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2196{
2197	struct ata_port *ap = dev->link->ap;
2198	unsigned int err, i;
2199
2200	if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
2201		return false;
2202
2203	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2204		/*
2205		 * IDENTIFY DEVICE data log is defined as mandatory starting
2206		 * with ACS-3 (ATA version 10). Warn about the missing log
2207		 * for drives which implement this ATA level or above.
2208		 */
2209		if (ata_id_major_version(dev->id) >= 10)
2210			ata_dev_warn(dev,
2211				"ATA Identify Device Log not supported\n");
2212		dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
2213		return false;
2214	}
2215
2216	/*
2217	 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2218	 * supported.
2219	 */
2220	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2221				1);
2222	if (err)
2223		return false;
2224
2225	for (i = 0; i < ap->sector_buf[8]; i++) {
2226		if (ap->sector_buf[9 + i] == page)
2227			return true;
2228	}
2229
2230	return false;
2231}
2232
2233static int ata_do_link_spd_horkage(struct ata_device *dev)
2234{
2235	struct ata_link *plink = ata_dev_phys_link(dev);
2236	u32 target, target_limit;
2237
2238	if (!sata_scr_valid(plink))
2239		return 0;
2240
2241	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2242		target = 1;
2243	else
2244		return 0;
2245
2246	target_limit = (1 << target) - 1;
2247
2248	/* if already on stricter limit, no need to push further */
2249	if (plink->sata_spd_limit <= target_limit)
2250		return 0;
2251
2252	plink->sata_spd_limit = target_limit;
2253
2254	/* Request another EH round by returning -EAGAIN if link is
2255	 * going faster than the target speed.  Forward progress is
2256	 * guaranteed by setting sata_spd_limit to target_limit above.
2257	 */
2258	if (plink->sata_spd > target) {
2259		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2260			     sata_spd_string(target));
2261		return -EAGAIN;
2262	}
2263	return 0;
2264}
2265
2266static inline u8 ata_dev_knobble(struct ata_device *dev)
2267{
2268	struct ata_port *ap = dev->link->ap;
2269
2270	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2271		return 0;
2272
2273	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2274}
2275
2276static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2277{
2278	struct ata_port *ap = dev->link->ap;
2279	unsigned int err_mask;
2280
2281	if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2282		ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2283		return;
2284	}
2285	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2286				     0, ap->sector_buf, 1);
2287	if (!err_mask) {
2288		u8 *cmds = dev->ncq_send_recv_cmds;
2289
2290		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2291		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2292
2293		if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2294			ata_dev_dbg(dev, "disabling queued TRIM support\n");
2295			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2296				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2297		}
2298	}
2299}
2300
2301static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2302{
2303	struct ata_port *ap = dev->link->ap;
2304	unsigned int err_mask;
2305
2306	if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2307		ata_dev_warn(dev,
2308			     "NCQ Send/Recv Log not supported\n");
2309		return;
2310	}
2311	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2312				     0, ap->sector_buf, 1);
2313	if (!err_mask) {
2314		u8 *cmds = dev->ncq_non_data_cmds;
2315
2316		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2317	}
2318}
2319
2320static void ata_dev_config_ncq_prio(struct ata_device *dev)
2321{
2322	struct ata_port *ap = dev->link->ap;
2323	unsigned int err_mask;
2324
2325	if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2326		return;
2327
2328	err_mask = ata_read_log_page(dev,
2329				     ATA_LOG_IDENTIFY_DEVICE,
2330				     ATA_LOG_SATA_SETTINGS,
2331				     ap->sector_buf,
2332				     1);
2333	if (err_mask)
2334		goto not_supported;
2335
2336	if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
2337		goto not_supported;
2338
2339	dev->flags |= ATA_DFLAG_NCQ_PRIO;
2340
2341	return;
2342
2343not_supported:
2344	dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
2345	dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2346}
2347
2348static bool ata_dev_check_adapter(struct ata_device *dev,
2349				  unsigned short vendor_id)
2350{
2351	struct pci_dev *pcidev = NULL;
2352	struct device *parent_dev = NULL;
2353
2354	for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2355	     parent_dev = parent_dev->parent) {
2356		if (dev_is_pci(parent_dev)) {
2357			pcidev = to_pci_dev(parent_dev);
2358			if (pcidev->vendor == vendor_id)
2359				return true;
2360			break;
2361		}
2362	}
2363
2364	return false;
2365}
2366
2367static int ata_dev_config_ncq(struct ata_device *dev,
2368			       char *desc, size_t desc_sz)
2369{
2370	struct ata_port *ap = dev->link->ap;
2371	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2372	unsigned int err_mask;
2373	char *aa_desc = "";
2374
2375	if (!ata_id_has_ncq(dev->id)) {
2376		desc[0] = '\0';
2377		return 0;
2378	}
2379	if (!IS_ENABLED(CONFIG_SATA_HOST))
2380		return 0;
2381	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2382		snprintf(desc, desc_sz, "NCQ (not used)");
2383		return 0;
2384	}
2385
2386	if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2387	    ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2388		snprintf(desc, desc_sz, "NCQ (not used)");
2389		return 0;
2390	}
2391
2392	if (ap->flags & ATA_FLAG_NCQ) {
2393		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2394		dev->flags |= ATA_DFLAG_NCQ;
2395	}
2396
2397	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2398		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2399		ata_id_has_fpdma_aa(dev->id)) {
2400		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2401			SATA_FPDMA_AA);
2402		if (err_mask) {
2403			ata_dev_err(dev,
2404				    "failed to enable AA (error_mask=0x%x)\n",
2405				    err_mask);
2406			if (err_mask != AC_ERR_DEV) {
2407				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2408				return -EIO;
2409			}
2410		} else
2411			aa_desc = ", AA";
2412	}
2413
2414	if (hdepth >= ddepth)
2415		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2416	else
2417		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2418			ddepth, aa_desc);
2419
2420	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2421		if (ata_id_has_ncq_send_and_recv(dev->id))
2422			ata_dev_config_ncq_send_recv(dev);
2423		if (ata_id_has_ncq_non_data(dev->id))
2424			ata_dev_config_ncq_non_data(dev);
2425		if (ata_id_has_ncq_prio(dev->id))
2426			ata_dev_config_ncq_prio(dev);
2427	}
2428
2429	return 0;
2430}
2431
2432static void ata_dev_config_sense_reporting(struct ata_device *dev)
2433{
2434	unsigned int err_mask;
2435
2436	if (!ata_id_has_sense_reporting(dev->id))
2437		return;
2438
2439	if (ata_id_sense_reporting_enabled(dev->id))
2440		return;
2441
2442	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2443	if (err_mask) {
2444		ata_dev_dbg(dev,
2445			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
2446			    err_mask);
2447	}
2448}
2449
2450static void ata_dev_config_zac(struct ata_device *dev)
2451{
2452	struct ata_port *ap = dev->link->ap;
2453	unsigned int err_mask;
2454	u8 *identify_buf = ap->sector_buf;
2455
2456	dev->zac_zones_optimal_open = U32_MAX;
2457	dev->zac_zones_optimal_nonseq = U32_MAX;
2458	dev->zac_zones_max_open = U32_MAX;
2459
2460	/*
2461	 * Always set the 'ZAC' flag for Host-managed devices.
2462	 */
2463	if (dev->class == ATA_DEV_ZAC)
2464		dev->flags |= ATA_DFLAG_ZAC;
2465	else if (ata_id_zoned_cap(dev->id) == 0x01)
2466		/*
2467		 * Check for host-aware devices.
2468		 */
2469		dev->flags |= ATA_DFLAG_ZAC;
2470
2471	if (!(dev->flags & ATA_DFLAG_ZAC))
2472		return;
2473
2474	if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2475		ata_dev_warn(dev,
2476			     "ATA Zoned Information Log not supported\n");
2477		return;
2478	}
2479
2480	/*
2481	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2482	 */
2483	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2484				     ATA_LOG_ZONED_INFORMATION,
2485				     identify_buf, 1);
2486	if (!err_mask) {
2487		u64 zoned_cap, opt_open, opt_nonseq, max_open;
2488
2489		zoned_cap = get_unaligned_le64(&identify_buf[8]);
2490		if ((zoned_cap >> 63))
2491			dev->zac_zoned_cap = (zoned_cap & 1);
2492		opt_open = get_unaligned_le64(&identify_buf[24]);
2493		if ((opt_open >> 63))
2494			dev->zac_zones_optimal_open = (u32)opt_open;
2495		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2496		if ((opt_nonseq >> 63))
2497			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2498		max_open = get_unaligned_le64(&identify_buf[40]);
2499		if ((max_open >> 63))
2500			dev->zac_zones_max_open = (u32)max_open;
2501	}
2502}
2503
2504static void ata_dev_config_trusted(struct ata_device *dev)
2505{
2506	struct ata_port *ap = dev->link->ap;
2507	u64 trusted_cap;
2508	unsigned int err;
2509
2510	if (!ata_id_has_trusted(dev->id))
2511		return;
2512
2513	if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2514		ata_dev_warn(dev,
2515			     "Security Log not supported\n");
2516		return;
2517	}
2518
2519	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2520			ap->sector_buf, 1);
2521	if (err)
2522		return;
2523
2524	trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2525	if (!(trusted_cap & (1ULL << 63))) {
2526		ata_dev_dbg(dev,
2527			    "Trusted Computing capability qword not valid!\n");
2528		return;
2529	}
2530
2531	if (trusted_cap & (1 << 0))
2532		dev->flags |= ATA_DFLAG_TRUSTED;
2533}
2534
2535static void ata_dev_config_cdl(struct ata_device *dev)
2536{
2537	struct ata_port *ap = dev->link->ap;
2538	unsigned int err_mask;
2539	bool cdl_enabled;
2540	u64 val;
2541
2542	if (ata_id_major_version(dev->id) < 12)
2543		goto not_supported;
2544
2545	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
2546	    !ata_identify_page_supported(dev, ATA_LOG_SUPPORTED_CAPABILITIES) ||
2547	    !ata_identify_page_supported(dev, ATA_LOG_CURRENT_SETTINGS))
2548		goto not_supported;
2549
2550	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2551				     ATA_LOG_SUPPORTED_CAPABILITIES,
2552				     ap->sector_buf, 1);
2553	if (err_mask)
2554		goto not_supported;
2555
2556	/* Check Command Duration Limit Supported bits */
2557	val = get_unaligned_le64(&ap->sector_buf[168]);
2558	if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0)))
2559		goto not_supported;
2560
2561	/* Warn the user if command duration guideline is not supported */
2562	if (!(val & BIT_ULL(1)))
2563		ata_dev_warn(dev,
2564			"Command duration guideline is not supported\n");
2565
2566	/*
2567	 * We must have support for the sense data for successful NCQ commands
2568	 * log indicated by the successful NCQ command sense data supported bit.
2569	 */
2570	val = get_unaligned_le64(&ap->sector_buf[8]);
2571	if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(47))) {
2572		ata_dev_warn(dev,
2573			"CDL supported but Successful NCQ Command Sense Data is not supported\n");
2574		goto not_supported;
2575	}
2576
2577	/* Without NCQ autosense, the successful NCQ commands log is useless. */
2578	if (!ata_id_has_ncq_autosense(dev->id)) {
2579		ata_dev_warn(dev,
2580			"CDL supported but NCQ autosense is not supported\n");
2581		goto not_supported;
2582	}
2583
2584	/*
2585	 * If CDL is marked as enabled, make sure the feature is enabled too.
2586	 * Conversely, if CDL is disabled, make sure the feature is turned off.
2587	 */
2588	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2589				     ATA_LOG_CURRENT_SETTINGS,
2590				     ap->sector_buf, 1);
2591	if (err_mask)
2592		goto not_supported;
2593
2594	val = get_unaligned_le64(&ap->sector_buf[8]);
2595	cdl_enabled = val & BIT_ULL(63) && val & BIT_ULL(21);
2596	if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
2597		if (!cdl_enabled) {
2598			/* Enable CDL on the device */
2599			err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 1);
2600			if (err_mask) {
2601				ata_dev_err(dev,
2602					    "Enable CDL feature failed\n");
2603				goto not_supported;
2604			}
2605		}
2606	} else {
2607		if (cdl_enabled) {
2608			/* Disable CDL on the device */
2609			err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 0);
2610			if (err_mask) {
2611				ata_dev_err(dev,
2612					    "Disable CDL feature failed\n");
2613				goto not_supported;
2614			}
2615		}
2616	}
2617
2618	/*
2619	 * While CDL itself has to be enabled using sysfs, CDL requires that
2620	 * sense data for successful NCQ commands is enabled to work properly.
2621	 * Just like ata_dev_config_sense_reporting(), enable it unconditionally
2622	 * if supported.
2623	 */
2624	if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(18))) {
2625		err_mask = ata_dev_set_feature(dev,
2626					SETFEATURE_SENSE_DATA_SUCC_NCQ, 0x1);
2627		if (err_mask) {
2628			ata_dev_warn(dev,
2629				     "failed to enable Sense Data for successful NCQ commands, Emask 0x%x\n",
2630				     err_mask);
2631			goto not_supported;
2632		}
2633	}
2634
2635	/*
2636	 * Allocate a buffer to handle reading the sense data for successful
2637	 * NCQ Commands log page for commands using a CDL with one of the limit
2638	 * policy set to 0xD (successful completion with sense data available
2639	 * bit set).
2640	 */
2641	if (!ap->ncq_sense_buf) {
2642		ap->ncq_sense_buf = kmalloc(ATA_LOG_SENSE_NCQ_SIZE, GFP_KERNEL);
2643		if (!ap->ncq_sense_buf)
2644			goto not_supported;
2645	}
2646
2647	/*
2648	 * Command duration limits is supported: cache the CDL log page 18h
2649	 * (command duration descriptors).
2650	 */
2651	err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, ap->sector_buf, 1);
2652	if (err_mask) {
2653		ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
2654		goto not_supported;
2655	}
2656
2657	memcpy(dev->cdl, ap->sector_buf, ATA_LOG_CDL_SIZE);
2658	dev->flags |= ATA_DFLAG_CDL;
2659
2660	return;
2661
2662not_supported:
2663	dev->flags &= ~(ATA_DFLAG_CDL | ATA_DFLAG_CDL_ENABLED);
2664	kfree(ap->ncq_sense_buf);
2665	ap->ncq_sense_buf = NULL;
2666}
2667
2668static int ata_dev_config_lba(struct ata_device *dev)
2669{
2670	const u16 *id = dev->id;
2671	const char *lba_desc;
2672	char ncq_desc[32];
2673	int ret;
2674
2675	dev->flags |= ATA_DFLAG_LBA;
2676
2677	if (ata_id_has_lba48(id)) {
2678		lba_desc = "LBA48";
2679		dev->flags |= ATA_DFLAG_LBA48;
2680		if (dev->n_sectors >= (1UL << 28) &&
2681		    ata_id_has_flush_ext(id))
2682			dev->flags |= ATA_DFLAG_FLUSH_EXT;
2683	} else {
2684		lba_desc = "LBA";
2685	}
2686
2687	/* config NCQ */
2688	ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2689
2690	/* print device info to dmesg */
2691	if (ata_dev_print_info(dev))
2692		ata_dev_info(dev,
2693			     "%llu sectors, multi %u: %s %s\n",
2694			     (unsigned long long)dev->n_sectors,
2695			     dev->multi_count, lba_desc, ncq_desc);
2696
2697	return ret;
2698}
2699
2700static void ata_dev_config_chs(struct ata_device *dev)
2701{
2702	const u16 *id = dev->id;
2703
2704	if (ata_id_current_chs_valid(id)) {
2705		/* Current CHS translation is valid. */
2706		dev->cylinders = id[54];
2707		dev->heads     = id[55];
2708		dev->sectors   = id[56];
2709	} else {
2710		/* Default translation */
2711		dev->cylinders	= id[1];
2712		dev->heads	= id[3];
2713		dev->sectors	= id[6];
2714	}
2715
2716	/* print device info to dmesg */
2717	if (ata_dev_print_info(dev))
2718		ata_dev_info(dev,
2719			     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2720			     (unsigned long long)dev->n_sectors,
2721			     dev->multi_count, dev->cylinders,
2722			     dev->heads, dev->sectors);
2723}
2724
2725static void ata_dev_config_fua(struct ata_device *dev)
2726{
2727	/* Ignore FUA support if its use is disabled globally */
2728	if (!libata_fua)
2729		goto nofua;
2730
2731	/* Ignore devices without support for WRITE DMA FUA EXT */
2732	if (!(dev->flags & ATA_DFLAG_LBA48) || !ata_id_has_fua(dev->id))
2733		goto nofua;
2734
2735	/* Ignore known bad devices and devices that lack NCQ support */
2736	if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA))
2737		goto nofua;
2738
2739	dev->flags |= ATA_DFLAG_FUA;
2740
2741	return;
2742
2743nofua:
2744	dev->flags &= ~ATA_DFLAG_FUA;
2745}
2746
2747static void ata_dev_config_devslp(struct ata_device *dev)
2748{
2749	u8 *sata_setting = dev->link->ap->sector_buf;
2750	unsigned int err_mask;
2751	int i, j;
2752
2753	/*
2754	 * Check device sleep capability. Get DevSlp timing variables
2755	 * from SATA Settings page of Identify Device Data Log.
2756	 */
2757	if (!ata_id_has_devslp(dev->id) ||
2758	    !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2759		return;
2760
2761	err_mask = ata_read_log_page(dev,
2762				     ATA_LOG_IDENTIFY_DEVICE,
2763				     ATA_LOG_SATA_SETTINGS,
2764				     sata_setting, 1);
2765	if (err_mask)
2766		return;
2767
2768	dev->flags |= ATA_DFLAG_DEVSLP;
2769	for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2770		j = ATA_LOG_DEVSLP_OFFSET + i;
2771		dev->devslp_timing[i] = sata_setting[j];
2772	}
2773}
2774
2775static void ata_dev_config_cpr(struct ata_device *dev)
2776{
2777	unsigned int err_mask;
2778	size_t buf_len;
2779	int i, nr_cpr = 0;
2780	struct ata_cpr_log *cpr_log = NULL;
2781	u8 *desc, *buf = NULL;
2782
2783	if (ata_id_major_version(dev->id) < 11)
2784		goto out;
2785
2786	buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
2787	if (buf_len == 0)
2788		goto out;
2789
2790	/*
2791	 * Read the concurrent positioning ranges log (0x47). We can have at
2792	 * most 255 32B range descriptors plus a 64B header. This log varies in
2793	 * size, so use the size reported in the GPL directory. Reading beyond
2794	 * the supported length will result in an error.
2795	 */
2796	buf_len <<= 9;
2797	buf = kzalloc(buf_len, GFP_KERNEL);
2798	if (!buf)
2799		goto out;
2800
2801	err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
2802				     0, buf, buf_len >> 9);
2803	if (err_mask)
2804		goto out;
2805
2806	nr_cpr = buf[0];
2807	if (!nr_cpr)
2808		goto out;
2809
2810	cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
2811	if (!cpr_log)
2812		goto out;
2813
2814	cpr_log->nr_cpr = nr_cpr;
2815	desc = &buf[64];
2816	for (i = 0; i < nr_cpr; i++, desc += 32) {
2817		cpr_log->cpr[i].num = desc[0];
2818		cpr_log->cpr[i].num_storage_elements = desc[1];
2819		cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
2820		cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
2821	}
2822
2823out:
2824	swap(dev->cpr_log, cpr_log);
2825	kfree(cpr_log);
2826	kfree(buf);
2827}
2828
2829static void ata_dev_print_features(struct ata_device *dev)
2830{
2831	if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
2832		return;
2833
2834	ata_dev_info(dev,
2835		     "Features:%s%s%s%s%s%s%s%s\n",
2836		     dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
2837		     dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
2838		     dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
2839		     dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
2840		     dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
2841		     dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
2842		     dev->flags & ATA_DFLAG_CDL ? " CDL" : "",
2843		     dev->cpr_log ? " CPR" : "");
2844}
2845
2846/**
2847 *	ata_dev_configure - Configure the specified ATA/ATAPI device
2848 *	@dev: Target device to configure
2849 *
2850 *	Configure @dev according to @dev->id.  Generic and low-level
2851 *	driver specific fixups are also applied.
2852 *
2853 *	LOCKING:
2854 *	Kernel thread context (may sleep)
2855 *
2856 *	RETURNS:
2857 *	0 on success, -errno otherwise
2858 */
2859int ata_dev_configure(struct ata_device *dev)
2860{
2861	struct ata_port *ap = dev->link->ap;
2862	bool print_info = ata_dev_print_info(dev);
 
2863	const u16 *id = dev->id;
2864	unsigned int xfer_mask;
2865	unsigned int err_mask;
2866	char revbuf[7];		/* XYZ-99\0 */
2867	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2868	char modelbuf[ATA_ID_PROD_LEN+1];
2869	int rc;
2870
2871	if (!ata_dev_enabled(dev)) {
2872		ata_dev_dbg(dev, "no device\n");
2873		return 0;
2874	}
2875
 
 
 
2876	/* set horkage */
2877	dev->horkage |= ata_dev_blacklisted(dev);
2878	ata_force_horkage(dev);
2879
2880	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2881		ata_dev_info(dev, "unsupported device, disabling\n");
2882		ata_dev_disable(dev);
2883		return 0;
2884	}
2885
2886	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2887	    dev->class == ATA_DEV_ATAPI) {
2888		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2889			     atapi_enabled ? "not supported with this driver"
2890			     : "disabled");
2891		ata_dev_disable(dev);
2892		return 0;
2893	}
2894
2895	rc = ata_do_link_spd_horkage(dev);
2896	if (rc)
2897		return rc;
2898
2899	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2900	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2901	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2902		dev->horkage |= ATA_HORKAGE_NOLPM;
2903
2904	if (ap->flags & ATA_FLAG_NO_LPM)
2905		dev->horkage |= ATA_HORKAGE_NOLPM;
2906
2907	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2908		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2909		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2910	}
2911
2912	/* let ACPI work its magic */
2913	rc = ata_acpi_on_devcfg(dev);
2914	if (rc)
2915		return rc;
2916
2917	/* massage HPA, do it early as it might change IDENTIFY data */
2918	rc = ata_hpa_resize(dev);
2919	if (rc)
2920		return rc;
2921
2922	/* print device capabilities */
2923	ata_dev_dbg(dev,
2924		    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2925		    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2926		    __func__,
2927		    id[49], id[82], id[83], id[84],
2928		    id[85], id[86], id[87], id[88]);
 
2929
2930	/* initialize to-be-configured parameters */
2931	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2932	dev->max_sectors = 0;
2933	dev->cdb_len = 0;
2934	dev->n_sectors = 0;
2935	dev->cylinders = 0;
2936	dev->heads = 0;
2937	dev->sectors = 0;
2938	dev->multi_count = 0;
2939
2940	/*
2941	 * common ATA, ATAPI feature tests
2942	 */
2943
2944	/* find max transfer mode; for printk only */
2945	xfer_mask = ata_id_xfermask(id);
2946
2947	ata_dump_id(dev, id);
 
2948
2949	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2950	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2951			sizeof(fwrevbuf));
2952
2953	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2954			sizeof(modelbuf));
2955
2956	/* ATA-specific feature tests */
2957	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2958		if (ata_id_is_cfa(id)) {
2959			/* CPRM may make this media unusable */
2960			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2961				ata_dev_warn(dev,
2962	"supports DRM functions and may not be fully accessible\n");
2963			snprintf(revbuf, 7, "CFA");
2964		} else {
2965			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2966			/* Warn the user if the device has TPM extensions */
2967			if (ata_id_has_tpm(id))
2968				ata_dev_warn(dev,
2969	"supports DRM functions and may not be fully accessible\n");
2970		}
2971
2972		dev->n_sectors = ata_id_n_sectors(id);
2973
2974		/* get current R/W Multiple count setting */
2975		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2976			unsigned int max = dev->id[47] & 0xff;
2977			unsigned int cnt = dev->id[59] & 0xff;
2978			/* only recognize/allow powers of two here */
2979			if (is_power_of_2(max) && is_power_of_2(cnt))
2980				if (cnt <= max)
2981					dev->multi_count = cnt;
2982		}
2983
2984		/* print device info to dmesg */
2985		if (print_info)
2986			ata_dev_info(dev, "%s: %s, %s, max %s\n",
2987				     revbuf, modelbuf, fwrevbuf,
2988				     ata_mode_string(xfer_mask));
 
 
 
 
 
 
 
 
 
2989
2990		if (ata_id_has_lba(id)) {
2991			rc = ata_dev_config_lba(dev);
2992			if (rc)
2993				return rc;
 
 
 
 
 
 
 
 
 
 
 
2994		} else {
2995			ata_dev_config_chs(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2996		}
2997
2998		ata_dev_config_fua(dev);
2999		ata_dev_config_devslp(dev);
3000		ata_dev_config_sense_reporting(dev);
3001		ata_dev_config_zac(dev);
3002		ata_dev_config_trusted(dev);
3003		ata_dev_config_cpr(dev);
3004		ata_dev_config_cdl(dev);
3005		dev->cdb_len = 32;
3006
3007		if (print_info)
3008			ata_dev_print_features(dev);
3009	}
3010
3011	/* ATAPI-specific feature tests */
3012	else if (dev->class == ATA_DEV_ATAPI) {
3013		const char *cdb_intr_string = "";
3014		const char *atapi_an_string = "";
3015		const char *dma_dir_string = "";
3016		u32 sntf;
3017
3018		rc = atapi_cdb_len(id);
3019		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
3020			ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
 
3021			rc = -EINVAL;
3022			goto err_out_nosup;
3023		}
3024		dev->cdb_len = (unsigned int) rc;
3025
3026		/* Enable ATAPI AN if both the host and device have
3027		 * the support.  If PMP is attached, SNTF is required
3028		 * to enable ATAPI AN to discern between PHY status
3029		 * changed notifications and ATAPI ANs.
3030		 */
3031		if (atapi_an &&
3032		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
3033		    (!sata_pmp_attached(ap) ||
3034		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
 
 
3035			/* issue SET feature command to turn this on */
3036			err_mask = ata_dev_set_feature(dev,
3037					SETFEATURES_SATA_ENABLE, SATA_AN);
3038			if (err_mask)
3039				ata_dev_err(dev,
3040					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
3041					    err_mask);
3042			else {
3043				dev->flags |= ATA_DFLAG_AN;
3044				atapi_an_string = ", ATAPI AN";
3045			}
3046		}
3047
3048		if (ata_id_cdb_intr(dev->id)) {
3049			dev->flags |= ATA_DFLAG_CDB_INTR;
3050			cdb_intr_string = ", CDB intr";
3051		}
3052
3053		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
3054			dev->flags |= ATA_DFLAG_DMADIR;
3055			dma_dir_string = ", DMADIR";
3056		}
3057
3058		if (ata_id_has_da(dev->id)) {
3059			dev->flags |= ATA_DFLAG_DA;
3060			zpodd_init(dev);
3061		}
3062
3063		/* print device info to dmesg */
3064		if (print_info)
3065			ata_dev_info(dev,
3066				     "ATAPI: %s, %s, max %s%s%s%s\n",
3067				     modelbuf, fwrevbuf,
3068				     ata_mode_string(xfer_mask),
3069				     cdb_intr_string, atapi_an_string,
3070				     dma_dir_string);
3071	}
3072
3073	/* determine max_sectors */
3074	dev->max_sectors = ATA_MAX_SECTORS;
3075	if (dev->flags & ATA_DFLAG_LBA48)
3076		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
3077
3078	/* Limit PATA drive on SATA cable bridge transfers to udma5,
3079	   200 sectors */
3080	if (ata_dev_knobble(dev)) {
3081		if (print_info)
3082			ata_dev_info(dev, "applying bridge limits\n");
3083		dev->udma_mask &= ATA_UDMA5;
3084		dev->max_sectors = ATA_MAX_SECTORS;
3085	}
3086
3087	if ((dev->class == ATA_DEV_ATAPI) &&
3088	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
3089		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
3090		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
3091	}
3092
3093	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
3094		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
3095					 dev->max_sectors);
3096
3097	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
3098		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
3099					 dev->max_sectors);
3100
3101	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
3102		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
3103
3104	if (ap->ops->dev_config)
3105		ap->ops->dev_config(dev);
3106
3107	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
3108		/* Let the user know. We don't want to disallow opens for
3109		   rescue purposes, or in case the vendor is just a blithering
3110		   idiot. Do this after the dev_config call as some controllers
3111		   with buggy firmware may want to avoid reporting false device
3112		   bugs */
3113
3114		if (print_info) {
3115			ata_dev_warn(dev,
3116"Drive reports diagnostics failure. This may indicate a drive\n");
3117			ata_dev_warn(dev,
3118"fault or invalid emulation. Contact drive vendor for information.\n");
3119		}
3120	}
3121
3122	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
3123		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
3124		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
3125	}
3126
3127	return 0;
3128
3129err_out_nosup:
 
 
3130	return rc;
3131}
3132
3133/**
3134 *	ata_cable_40wire	-	return 40 wire cable type
3135 *	@ap: port
3136 *
3137 *	Helper method for drivers which want to hardwire 40 wire cable
3138 *	detection.
3139 */
3140
3141int ata_cable_40wire(struct ata_port *ap)
3142{
3143	return ATA_CBL_PATA40;
3144}
3145EXPORT_SYMBOL_GPL(ata_cable_40wire);
3146
3147/**
3148 *	ata_cable_80wire	-	return 80 wire cable type
3149 *	@ap: port
3150 *
3151 *	Helper method for drivers which want to hardwire 80 wire cable
3152 *	detection.
3153 */
3154
3155int ata_cable_80wire(struct ata_port *ap)
3156{
3157	return ATA_CBL_PATA80;
3158}
3159EXPORT_SYMBOL_GPL(ata_cable_80wire);
3160
3161/**
3162 *	ata_cable_unknown	-	return unknown PATA cable.
3163 *	@ap: port
3164 *
3165 *	Helper method for drivers which have no PATA cable detection.
3166 */
3167
3168int ata_cable_unknown(struct ata_port *ap)
3169{
3170	return ATA_CBL_PATA_UNK;
3171}
3172EXPORT_SYMBOL_GPL(ata_cable_unknown);
3173
3174/**
3175 *	ata_cable_ignore	-	return ignored PATA cable.
3176 *	@ap: port
3177 *
3178 *	Helper method for drivers which don't use cable type to limit
3179 *	transfer mode.
3180 */
3181int ata_cable_ignore(struct ata_port *ap)
3182{
3183	return ATA_CBL_PATA_IGN;
3184}
3185EXPORT_SYMBOL_GPL(ata_cable_ignore);
3186
3187/**
3188 *	ata_cable_sata	-	return SATA cable type
3189 *	@ap: port
3190 *
3191 *	Helper method for drivers which have SATA cables
3192 */
3193
3194int ata_cable_sata(struct ata_port *ap)
3195{
3196	return ATA_CBL_SATA;
3197}
3198EXPORT_SYMBOL_GPL(ata_cable_sata);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3199
3200/**
3201 *	sata_print_link_status - Print SATA link status
3202 *	@link: SATA link to printk link status about
3203 *
3204 *	This function prints link speed and status of a SATA link.
3205 *
3206 *	LOCKING:
3207 *	None.
3208 */
3209static void sata_print_link_status(struct ata_link *link)
3210{
3211	u32 sstatus, scontrol, tmp;
3212
3213	if (sata_scr_read(link, SCR_STATUS, &sstatus))
3214		return;
3215	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3216		return;
3217
3218	if (ata_phys_link_online(link)) {
3219		tmp = (sstatus >> 4) & 0xf;
3220		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3221			      sata_spd_string(tmp), sstatus, scontrol);
3222	} else {
3223		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3224			      sstatus, scontrol);
3225	}
3226}
3227
3228/**
3229 *	ata_dev_pair		-	return other device on cable
3230 *	@adev: device
3231 *
3232 *	Obtain the other device on the same cable, or if none is
3233 *	present NULL is returned
3234 */
3235
3236struct ata_device *ata_dev_pair(struct ata_device *adev)
3237{
3238	struct ata_link *link = adev->link;
3239	struct ata_device *pair = &link->device[1 - adev->devno];
3240	if (!ata_dev_enabled(pair))
3241		return NULL;
3242	return pair;
3243}
3244EXPORT_SYMBOL_GPL(ata_dev_pair);
3245
3246/**
3247 *	sata_down_spd_limit - adjust SATA spd limit downward
3248 *	@link: Link to adjust SATA spd limit for
3249 *	@spd_limit: Additional limit
3250 *
3251 *	Adjust SATA spd limit of @link downward.  Note that this
3252 *	function only adjusts the limit.  The change must be applied
3253 *	using sata_set_spd().
3254 *
3255 *	If @spd_limit is non-zero, the speed is limited to equal to or
3256 *	lower than @spd_limit if such speed is supported.  If
3257 *	@spd_limit is slower than any supported speed, only the lowest
3258 *	supported speed is allowed.
3259 *
3260 *	LOCKING:
3261 *	Inherited from caller.
3262 *
3263 *	RETURNS:
3264 *	0 on success, negative errno on failure
3265 */
3266int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3267{
3268	u32 sstatus, spd, mask;
3269	int rc, bit;
3270
3271	if (!sata_scr_valid(link))
3272		return -EOPNOTSUPP;
3273
3274	/* If SCR can be read, use it to determine the current SPD.
3275	 * If not, use cached value in link->sata_spd.
3276	 */
3277	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3278	if (rc == 0 && ata_sstatus_online(sstatus))
3279		spd = (sstatus >> 4) & 0xf;
3280	else
3281		spd = link->sata_spd;
3282
3283	mask = link->sata_spd_limit;
3284	if (mask <= 1)
3285		return -EINVAL;
3286
3287	/* unconditionally mask off the highest bit */
3288	bit = fls(mask) - 1;
3289	mask &= ~(1 << bit);
3290
3291	/*
3292	 * Mask off all speeds higher than or equal to the current one.  At
3293	 * this point, if current SPD is not available and we previously
3294	 * recorded the link speed from SStatus, the driver has already
3295	 * masked off the highest bit so mask should already be 1 or 0.
3296	 * Otherwise, we should not force 1.5Gbps on a link where we have
3297	 * not previously recorded speed from SStatus.  Just return in this
3298	 * case.
3299	 */
3300	if (spd > 1)
3301		mask &= (1 << (spd - 1)) - 1;
3302	else if (link->sata_spd)
3303		return -EINVAL;
3304
3305	/* were we already at the bottom? */
3306	if (!mask)
3307		return -EINVAL;
3308
3309	if (spd_limit) {
3310		if (mask & ((1 << spd_limit) - 1))
3311			mask &= (1 << spd_limit) - 1;
3312		else {
3313			bit = ffs(mask) - 1;
3314			mask = 1 << bit;
3315		}
3316	}
3317
3318	link->sata_spd_limit = mask;
3319
3320	ata_link_warn(link, "limiting SATA link speed to %s\n",
3321		      sata_spd_string(fls(mask)));
3322
3323	return 0;
3324}
3325
3326#ifdef CONFIG_ATA_ACPI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3327/**
3328 *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3329 *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3330 *	@cycle: cycle duration in ns
3331 *
3332 *	Return matching xfer mode for @cycle.  The returned mode is of
3333 *	the transfer type specified by @xfer_shift.  If @cycle is too
3334 *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3335 *	than the fastest known mode, the fasted mode is returned.
3336 *
3337 *	LOCKING:
3338 *	None.
3339 *
3340 *	RETURNS:
3341 *	Matching xfer_mode, 0xff if no match found.
3342 */
3343u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3344{
3345	u8 base_mode = 0xff, last_mode = 0xff;
3346	const struct ata_xfer_ent *ent;
3347	const struct ata_timing *t;
3348
3349	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3350		if (ent->shift == xfer_shift)
3351			base_mode = ent->base;
3352
3353	for (t = ata_timing_find_mode(base_mode);
3354	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3355		unsigned short this_cycle;
3356
3357		switch (xfer_shift) {
3358		case ATA_SHIFT_PIO:
3359		case ATA_SHIFT_MWDMA:
3360			this_cycle = t->cycle;
3361			break;
3362		case ATA_SHIFT_UDMA:
3363			this_cycle = t->udma;
3364			break;
3365		default:
3366			return 0xff;
3367		}
3368
3369		if (cycle > this_cycle)
3370			break;
3371
3372		last_mode = t->mode;
3373	}
3374
3375	return last_mode;
3376}
3377#endif
3378
3379/**
3380 *	ata_down_xfermask_limit - adjust dev xfer masks downward
3381 *	@dev: Device to adjust xfer masks
3382 *	@sel: ATA_DNXFER_* selector
3383 *
3384 *	Adjust xfer masks of @dev downward.  Note that this function
3385 *	does not apply the change.  Invoking ata_set_mode() afterwards
3386 *	will apply the limit.
3387 *
3388 *	LOCKING:
3389 *	Inherited from caller.
3390 *
3391 *	RETURNS:
3392 *	0 on success, negative errno on failure
3393 */
3394int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3395{
3396	char buf[32];
3397	unsigned int orig_mask, xfer_mask;
3398	unsigned int pio_mask, mwdma_mask, udma_mask;
3399	int quiet, highbit;
3400
3401	quiet = !!(sel & ATA_DNXFER_QUIET);
3402	sel &= ~ATA_DNXFER_QUIET;
3403
3404	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3405						  dev->mwdma_mask,
3406						  dev->udma_mask);
3407	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3408
3409	switch (sel) {
3410	case ATA_DNXFER_PIO:
3411		highbit = fls(pio_mask) - 1;
3412		pio_mask &= ~(1 << highbit);
3413		break;
3414
3415	case ATA_DNXFER_DMA:
3416		if (udma_mask) {
3417			highbit = fls(udma_mask) - 1;
3418			udma_mask &= ~(1 << highbit);
3419			if (!udma_mask)
3420				return -ENOENT;
3421		} else if (mwdma_mask) {
3422			highbit = fls(mwdma_mask) - 1;
3423			mwdma_mask &= ~(1 << highbit);
3424			if (!mwdma_mask)
3425				return -ENOENT;
3426		}
3427		break;
3428
3429	case ATA_DNXFER_40C:
3430		udma_mask &= ATA_UDMA_MASK_40C;
3431		break;
3432
3433	case ATA_DNXFER_FORCE_PIO0:
3434		pio_mask &= 1;
3435		fallthrough;
3436	case ATA_DNXFER_FORCE_PIO:
3437		mwdma_mask = 0;
3438		udma_mask = 0;
3439		break;
3440
3441	default:
3442		BUG();
3443	}
3444
3445	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3446
3447	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3448		return -ENOENT;
3449
3450	if (!quiet) {
3451		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3452			snprintf(buf, sizeof(buf), "%s:%s",
3453				 ata_mode_string(xfer_mask),
3454				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3455		else
3456			snprintf(buf, sizeof(buf), "%s",
3457				 ata_mode_string(xfer_mask));
3458
3459		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3460	}
3461
3462	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3463			    &dev->udma_mask);
3464
3465	return 0;
3466}
3467
3468static int ata_dev_set_mode(struct ata_device *dev)
3469{
3470	struct ata_port *ap = dev->link->ap;
3471	struct ata_eh_context *ehc = &dev->link->eh_context;
3472	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3473	const char *dev_err_whine = "";
3474	int ign_dev_err = 0;
3475	unsigned int err_mask = 0;
3476	int rc;
3477
3478	dev->flags &= ~ATA_DFLAG_PIO;
3479	if (dev->xfer_shift == ATA_SHIFT_PIO)
3480		dev->flags |= ATA_DFLAG_PIO;
3481
3482	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3483		dev_err_whine = " (SET_XFERMODE skipped)";
3484	else {
3485		if (nosetxfer)
3486			ata_dev_warn(dev,
3487				     "NOSETXFER but PATA detected - can't "
3488				     "skip SETXFER, might malfunction\n");
3489		err_mask = ata_dev_set_xfermode(dev);
3490	}
3491
3492	if (err_mask & ~AC_ERR_DEV)
3493		goto fail;
3494
3495	/* revalidate */
3496	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3497	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3498	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3499	if (rc)
3500		return rc;
3501
3502	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3503		/* Old CFA may refuse this command, which is just fine */
3504		if (ata_id_is_cfa(dev->id))
3505			ign_dev_err = 1;
3506		/* Catch several broken garbage emulations plus some pre
3507		   ATA devices */
3508		if (ata_id_major_version(dev->id) == 0 &&
3509					dev->pio_mode <= XFER_PIO_2)
3510			ign_dev_err = 1;
3511		/* Some very old devices and some bad newer ones fail
3512		   any kind of SET_XFERMODE request but support PIO0-2
3513		   timings and no IORDY */
3514		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3515			ign_dev_err = 1;
3516	}
3517	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3518	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3519	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3520	    dev->dma_mode == XFER_MW_DMA_0 &&
3521	    (dev->id[63] >> 8) & 1)
3522		ign_dev_err = 1;
3523
3524	/* if the device is actually configured correctly, ignore dev err */
3525	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3526		ign_dev_err = 1;
3527
3528	if (err_mask & AC_ERR_DEV) {
3529		if (!ign_dev_err)
3530			goto fail;
3531		else
3532			dev_err_whine = " (device error ignored)";
3533	}
3534
3535	ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
3536		    dev->xfer_shift, (int)dev->xfer_mode);
3537
3538	if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3539	    ehc->i.flags & ATA_EHI_DID_HARDRESET)
3540		ata_dev_info(dev, "configured for %s%s\n",
3541			     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3542			     dev_err_whine);
3543
3544	return 0;
3545
3546 fail:
3547	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3548	return -EIO;
3549}
3550
3551/**
3552 *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3553 *	@link: link on which timings will be programmed
3554 *	@r_failed_dev: out parameter for failed device
3555 *
3556 *	Standard implementation of the function used to tune and set
3557 *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3558 *	ata_dev_set_mode() fails, pointer to the failing device is
3559 *	returned in @r_failed_dev.
3560 *
3561 *	LOCKING:
3562 *	PCI/etc. bus probe sem.
3563 *
3564 *	RETURNS:
3565 *	0 on success, negative errno otherwise
3566 */
3567
3568int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3569{
3570	struct ata_port *ap = link->ap;
3571	struct ata_device *dev;
3572	int rc = 0, used_dma = 0, found = 0;
3573
3574	/* step 1: calculate xfer_mask */
3575	ata_for_each_dev(dev, link, ENABLED) {
3576		unsigned int pio_mask, dma_mask;
3577		unsigned int mode_mask;
3578
3579		mode_mask = ATA_DMA_MASK_ATA;
3580		if (dev->class == ATA_DEV_ATAPI)
3581			mode_mask = ATA_DMA_MASK_ATAPI;
3582		else if (ata_id_is_cfa(dev->id))
3583			mode_mask = ATA_DMA_MASK_CFA;
3584
3585		ata_dev_xfermask(dev);
3586		ata_force_xfermask(dev);
3587
3588		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
 
3589
3590		if (libata_dma_mask & mode_mask)
3591			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3592						     dev->udma_mask);
3593		else
3594			dma_mask = 0;
3595
3596		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3597		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3598
3599		found = 1;
3600		if (ata_dma_enabled(dev))
3601			used_dma = 1;
3602	}
3603	if (!found)
3604		goto out;
3605
3606	/* step 2: always set host PIO timings */
3607	ata_for_each_dev(dev, link, ENABLED) {
3608		if (dev->pio_mode == 0xff) {
3609			ata_dev_warn(dev, "no PIO support\n");
3610			rc = -EINVAL;
3611			goto out;
3612		}
3613
3614		dev->xfer_mode = dev->pio_mode;
3615		dev->xfer_shift = ATA_SHIFT_PIO;
3616		if (ap->ops->set_piomode)
3617			ap->ops->set_piomode(ap, dev);
3618	}
3619
3620	/* step 3: set host DMA timings */
3621	ata_for_each_dev(dev, link, ENABLED) {
3622		if (!ata_dma_enabled(dev))
3623			continue;
3624
3625		dev->xfer_mode = dev->dma_mode;
3626		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3627		if (ap->ops->set_dmamode)
3628			ap->ops->set_dmamode(ap, dev);
3629	}
3630
3631	/* step 4: update devices' xfer mode */
3632	ata_for_each_dev(dev, link, ENABLED) {
3633		rc = ata_dev_set_mode(dev);
3634		if (rc)
3635			goto out;
3636	}
3637
3638	/* Record simplex status. If we selected DMA then the other
3639	 * host channels are not permitted to do so.
3640	 */
3641	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3642		ap->host->simplex_claimed = ap;
3643
3644 out:
3645	if (rc)
3646		*r_failed_dev = dev;
3647	return rc;
3648}
3649EXPORT_SYMBOL_GPL(ata_do_set_mode);
3650
3651/**
3652 *	ata_wait_ready - wait for link to become ready
3653 *	@link: link to be waited on
3654 *	@deadline: deadline jiffies for the operation
3655 *	@check_ready: callback to check link readiness
3656 *
3657 *	Wait for @link to become ready.  @check_ready should return
3658 *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3659 *	link doesn't seem to be occupied, other errno for other error
3660 *	conditions.
3661 *
3662 *	Transient -ENODEV conditions are allowed for
3663 *	ATA_TMOUT_FF_WAIT.
3664 *
3665 *	LOCKING:
3666 *	EH context.
3667 *
3668 *	RETURNS:
3669 *	0 if @link is ready before @deadline; otherwise, -errno.
3670 */
3671int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3672		   int (*check_ready)(struct ata_link *link))
3673{
3674	unsigned long start = jiffies;
3675	unsigned long nodev_deadline;
3676	int warned = 0;
3677
3678	/* choose which 0xff timeout to use, read comment in libata.h */
3679	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3680		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3681	else
3682		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3683
3684	/* Slave readiness can't be tested separately from master.  On
3685	 * M/S emulation configuration, this function should be called
3686	 * only on the master and it will handle both master and slave.
3687	 */
3688	WARN_ON(link == link->ap->slave_link);
3689
3690	if (time_after(nodev_deadline, deadline))
3691		nodev_deadline = deadline;
3692
3693	while (1) {
3694		unsigned long now = jiffies;
3695		int ready, tmp;
3696
3697		ready = tmp = check_ready(link);
3698		if (ready > 0)
3699			return 0;
3700
3701		/*
3702		 * -ENODEV could be transient.  Ignore -ENODEV if link
3703		 * is online.  Also, some SATA devices take a long
3704		 * time to clear 0xff after reset.  Wait for
3705		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3706		 * offline.
3707		 *
3708		 * Note that some PATA controllers (pata_ali) explode
3709		 * if status register is read more than once when
3710		 * there's no device attached.
3711		 */
3712		if (ready == -ENODEV) {
3713			if (ata_link_online(link))
3714				ready = 0;
3715			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3716				 !ata_link_offline(link) &&
3717				 time_before(now, nodev_deadline))
3718				ready = 0;
3719		}
3720
3721		if (ready)
3722			return ready;
3723		if (time_after(now, deadline))
3724			return -EBUSY;
3725
3726		if (!warned && time_after(now, start + 5 * HZ) &&
3727		    (deadline - now > 3 * HZ)) {
3728			ata_link_warn(link,
3729				"link is slow to respond, please be patient "
3730				"(ready=%d)\n", tmp);
3731			warned = 1;
3732		}
3733
3734		ata_msleep(link->ap, 50);
3735	}
3736}
3737
3738/**
3739 *	ata_wait_after_reset - wait for link to become ready after reset
3740 *	@link: link to be waited on
3741 *	@deadline: deadline jiffies for the operation
3742 *	@check_ready: callback to check link readiness
3743 *
3744 *	Wait for @link to become ready after reset.
3745 *
3746 *	LOCKING:
3747 *	EH context.
3748 *
3749 *	RETURNS:
3750 *	0 if @link is ready before @deadline; otherwise, -errno.
3751 */
3752int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3753				int (*check_ready)(struct ata_link *link))
3754{
3755	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3756
3757	return ata_wait_ready(link, deadline, check_ready);
3758}
3759EXPORT_SYMBOL_GPL(ata_wait_after_reset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3760
3761/**
3762 *	ata_std_prereset - prepare for reset
3763 *	@link: ATA link to be reset
3764 *	@deadline: deadline jiffies for the operation
3765 *
3766 *	@link is about to be reset.  Initialize it.  Failure from
3767 *	prereset makes libata abort whole reset sequence and give up
3768 *	that port, so prereset should be best-effort.  It does its
3769 *	best to prepare for reset sequence but if things go wrong, it
3770 *	should just whine, not fail.
3771 *
3772 *	LOCKING:
3773 *	Kernel thread context (may sleep)
3774 *
3775 *	RETURNS:
3776 *	Always 0.
3777 */
3778int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3779{
3780	struct ata_port *ap = link->ap;
3781	struct ata_eh_context *ehc = &link->eh_context;
3782	const unsigned int *timing = sata_ehc_deb_timing(ehc);
3783	int rc;
3784
3785	/* if we're about to do hardreset, nothing more to do */
3786	if (ehc->i.action & ATA_EH_HARDRESET)
3787		return 0;
3788
3789	/* if SATA, resume link */
3790	if (ap->flags & ATA_FLAG_SATA) {
3791		rc = sata_link_resume(link, timing, deadline);
3792		/* whine about phy resume failure but proceed */
3793		if (rc && rc != -EOPNOTSUPP)
3794			ata_link_warn(link,
3795				      "failed to resume link for reset (errno=%d)\n",
3796				      rc);
3797	}
3798
3799	/* no point in trying softreset on offline link */
3800	if (ata_phys_link_offline(link))
3801		ehc->i.action &= ~ATA_EH_SOFTRESET;
3802
3803	return 0;
3804}
3805EXPORT_SYMBOL_GPL(ata_std_prereset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3806
3807/**
3808 *	sata_std_hardreset - COMRESET w/o waiting or classification
3809 *	@link: link to reset
3810 *	@class: resulting class of attached device
3811 *	@deadline: deadline jiffies for the operation
3812 *
3813 *	Standard SATA COMRESET w/o waiting or classification.
3814 *
3815 *	LOCKING:
3816 *	Kernel thread context (may sleep)
3817 *
3818 *	RETURNS:
3819 *	0 if link offline, -EAGAIN if link online, -errno on errors.
3820 */
3821int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3822		       unsigned long deadline)
3823{
3824	const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
3825	bool online;
3826	int rc;
3827
3828	/* do hardreset */
3829	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3830	return online ? -EAGAIN : rc;
3831}
3832EXPORT_SYMBOL_GPL(sata_std_hardreset);
3833
3834/**
3835 *	ata_std_postreset - standard postreset callback
3836 *	@link: the target ata_link
3837 *	@classes: classes of attached devices
3838 *
3839 *	This function is invoked after a successful reset.  Note that
3840 *	the device might have been reset more than once using
3841 *	different reset methods before postreset is invoked.
3842 *
3843 *	LOCKING:
3844 *	Kernel thread context (may sleep)
3845 */
3846void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3847{
3848	u32 serror;
3849
 
 
3850	/* reset complete, clear SError */
3851	if (!sata_scr_read(link, SCR_ERROR, &serror))
3852		sata_scr_write(link, SCR_ERROR, serror);
3853
3854	/* print link status */
3855	sata_print_link_status(link);
 
 
3856}
3857EXPORT_SYMBOL_GPL(ata_std_postreset);
3858
3859/**
3860 *	ata_dev_same_device - Determine whether new ID matches configured device
3861 *	@dev: device to compare against
3862 *	@new_class: class of the new device
3863 *	@new_id: IDENTIFY page of the new device
3864 *
3865 *	Compare @new_class and @new_id against @dev and determine
3866 *	whether @dev is the device indicated by @new_class and
3867 *	@new_id.
3868 *
3869 *	LOCKING:
3870 *	None.
3871 *
3872 *	RETURNS:
3873 *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3874 */
3875static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3876			       const u16 *new_id)
3877{
3878	const u16 *old_id = dev->id;
3879	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3880	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3881
3882	if (dev->class != new_class) {
3883		ata_dev_info(dev, "class mismatch %d != %d\n",
3884			     dev->class, new_class);
3885		return 0;
3886	}
3887
3888	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3889	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3890	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3891	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3892
3893	if (strcmp(model[0], model[1])) {
3894		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3895			     model[0], model[1]);
3896		return 0;
3897	}
3898
3899	if (strcmp(serial[0], serial[1])) {
3900		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3901			     serial[0], serial[1]);
3902		return 0;
3903	}
3904
3905	return 1;
3906}
3907
3908/**
3909 *	ata_dev_reread_id - Re-read IDENTIFY data
3910 *	@dev: target ATA device
3911 *	@readid_flags: read ID flags
3912 *
3913 *	Re-read IDENTIFY page and make sure @dev is still attached to
3914 *	the port.
3915 *
3916 *	LOCKING:
3917 *	Kernel thread context (may sleep)
3918 *
3919 *	RETURNS:
3920 *	0 on success, negative errno otherwise
3921 */
3922int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3923{
3924	unsigned int class = dev->class;
3925	u16 *id = (void *)dev->link->ap->sector_buf;
3926	int rc;
3927
3928	/* read ID data */
3929	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3930	if (rc)
3931		return rc;
3932
3933	/* is the device still there? */
3934	if (!ata_dev_same_device(dev, class, id))
3935		return -ENODEV;
3936
3937	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3938	return 0;
3939}
3940
3941/**
3942 *	ata_dev_revalidate - Revalidate ATA device
3943 *	@dev: device to revalidate
3944 *	@new_class: new class code
3945 *	@readid_flags: read ID flags
3946 *
3947 *	Re-read IDENTIFY page, make sure @dev is still attached to the
3948 *	port and reconfigure it according to the new IDENTIFY page.
3949 *
3950 *	LOCKING:
3951 *	Kernel thread context (may sleep)
3952 *
3953 *	RETURNS:
3954 *	0 on success, negative errno otherwise
3955 */
3956int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3957		       unsigned int readid_flags)
3958{
3959	u64 n_sectors = dev->n_sectors;
3960	u64 n_native_sectors = dev->n_native_sectors;
3961	int rc;
3962
3963	if (!ata_dev_enabled(dev))
3964		return -ENODEV;
3965
3966	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3967	if (ata_class_enabled(new_class) && new_class == ATA_DEV_PMP) {
 
 
 
3968		ata_dev_info(dev, "class mismatch %u != %u\n",
3969			     dev->class, new_class);
3970		rc = -ENODEV;
3971		goto fail;
3972	}
3973
3974	/* re-read ID */
3975	rc = ata_dev_reread_id(dev, readid_flags);
3976	if (rc)
3977		goto fail;
3978
3979	/* configure device according to the new ID */
3980	rc = ata_dev_configure(dev);
3981	if (rc)
3982		goto fail;
3983
3984	/* verify n_sectors hasn't changed */
3985	if (dev->class != ATA_DEV_ATA || !n_sectors ||
3986	    dev->n_sectors == n_sectors)
3987		return 0;
3988
3989	/* n_sectors has changed */
3990	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3991		     (unsigned long long)n_sectors,
3992		     (unsigned long long)dev->n_sectors);
3993
3994	/*
3995	 * Something could have caused HPA to be unlocked
3996	 * involuntarily.  If n_native_sectors hasn't changed and the
3997	 * new size matches it, keep the device.
3998	 */
3999	if (dev->n_native_sectors == n_native_sectors &&
4000	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4001		ata_dev_warn(dev,
4002			     "new n_sectors matches native, probably "
4003			     "late HPA unlock, n_sectors updated\n");
4004		/* use the larger n_sectors */
4005		return 0;
4006	}
4007
4008	/*
4009	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4010	 * unlocking HPA in those cases.
4011	 *
4012	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4013	 */
4014	if (dev->n_native_sectors == n_native_sectors &&
4015	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4016	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4017		ata_dev_warn(dev,
4018			     "old n_sectors matches native, probably "
4019			     "late HPA lock, will try to unlock HPA\n");
4020		/* try unlocking HPA */
4021		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4022		rc = -EIO;
4023	} else
4024		rc = -ENODEV;
4025
4026	/* restore original n_[native_]sectors and fail */
4027	dev->n_native_sectors = n_native_sectors;
4028	dev->n_sectors = n_sectors;
4029 fail:
4030	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4031	return rc;
4032}
4033
4034struct ata_blacklist_entry {
4035	const char *model_num;
4036	const char *model_rev;
4037	unsigned long horkage;
4038};
4039
4040static const struct ata_blacklist_entry ata_device_blacklist [] = {
4041	/* Devices with DMA related problems under Linux */
4042	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4043	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4044	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4045	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4046	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4047	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4048	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4049	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4050	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4051	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4052	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4053	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4054	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4055	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4056	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4057	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4058	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4059	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4060	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4061	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4062	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4063	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4064	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4065	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4066	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4067	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4068	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4069	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4070	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4071	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
4072	/* Odd clown on sil3726/4726 PMPs */
4073	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4074	/* Similar story with ASMedia 1092 */
4075	{ "ASMT109x- Config",	NULL,		ATA_HORKAGE_DISABLE },
4076
4077	/* Weird ATAPI devices */
4078	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4079	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4080	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4081	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4082
4083	/*
4084	 * Causes silent data corruption with higher max sects.
4085	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4086	 */
4087	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
4088
4089	/*
4090	 * These devices time out with higher max sects.
4091	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4092	 */
4093	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4094	{ "LITEON EP1-*",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4095
4096	/* Devices we expect to fail diagnostics */
4097
4098	/* Devices where NCQ should be avoided */
4099	/* NCQ is slow */
4100	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4101	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ },
4102	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4103	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4104	/* NCQ is broken */
4105	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4106	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4107	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4108	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4109	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4110
4111	/* Seagate NCQ + FLUSH CACHE firmware bug */
4112	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4113						ATA_HORKAGE_FIRMWARE_WARN },
4114
4115	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4116						ATA_HORKAGE_FIRMWARE_WARN },
4117
4118	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4119						ATA_HORKAGE_FIRMWARE_WARN },
4120
4121	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4122						ATA_HORKAGE_FIRMWARE_WARN },
4123
4124	/* drives which fail FPDMA_AA activation (some may freeze afterwards)
4125	   the ST disks also have LPM issues */
4126	{ "ST1000LM024 HN-M101MBB", NULL,	ATA_HORKAGE_BROKEN_FPDMA_AA |
4127						ATA_HORKAGE_NOLPM },
4128	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
4129
4130	/* Blacklist entries taken from Silicon Image 3124/3132
4131	   Windows driver .inf file - also several Linux problem reports */
4132	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ },
4133	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ },
4134	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ },
4135
4136	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4137	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ },
4138
4139	/* Sandisk SD7/8/9s lock up hard on large trims */
4140	{ "SanDisk SD[789]*",	NULL,		ATA_HORKAGE_MAX_TRIM_128M },
4141
4142	/* devices which puke on READ_NATIVE_MAX */
4143	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA },
4144	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4145	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4146	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4147
4148	/* this one allows HPA unlocking but fails IOs on the area */
4149	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4150
4151	/* Devices which report 1 sector over size HPA */
4152	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE },
4153	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE },
4154	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE },
4155
4156	/* Devices which get the IVB wrong */
4157	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
4158	/* Maybe we should just blacklist TSSTcorp... */
4159	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB },
4160
4161	/* Devices that do not need bridging limits applied */
4162	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK },
4163	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK },
4164
4165	/* Devices which aren't very happy with higher link speeds */
4166	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS },
4167	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS },
4168
4169	/*
4170	 * Devices which choke on SETXFER.  Applies only if both the
4171	 * device and controller are SATA.
4172	 */
4173	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4174	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4175	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4176	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4177	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4178
4179	/* These specific Pioneer models have LPM issues */
4180	{ "PIONEER BD-RW   BDR-207M",	NULL,	ATA_HORKAGE_NOLPM },
4181	{ "PIONEER BD-RW   BDR-205",	NULL,	ATA_HORKAGE_NOLPM },
4182
4183	/* Crucial BX100 SSD 500GB has broken LPM support */
4184	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
4185
4186	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4187	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4188						ATA_HORKAGE_ZERO_AFTER_TRIM |
4189						ATA_HORKAGE_NOLPM },
4190	/* 512GB MX100 with newer firmware has only LPM issues */
4191	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
4192						ATA_HORKAGE_NOLPM },
4193
4194	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4195	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4196						ATA_HORKAGE_ZERO_AFTER_TRIM |
4197						ATA_HORKAGE_NOLPM },
4198	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4199						ATA_HORKAGE_ZERO_AFTER_TRIM |
4200						ATA_HORKAGE_NOLPM },
4201
4202	/* These specific Samsung models/firmware-revs do not handle LPM well */
4203	{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
4204	{ "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM },
4205	{ "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM },
4206	{ "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
4207
4208	/* devices that don't properly handle queued TRIM commands */
4209	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4210						ATA_HORKAGE_ZERO_AFTER_TRIM },
4211	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4212						ATA_HORKAGE_ZERO_AFTER_TRIM },
4213	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4214						ATA_HORKAGE_ZERO_AFTER_TRIM },
4215	{ "Micron_1100_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4216						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4217	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4218						ATA_HORKAGE_ZERO_AFTER_TRIM },
4219	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4220						ATA_HORKAGE_ZERO_AFTER_TRIM },
4221	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4222						ATA_HORKAGE_ZERO_AFTER_TRIM },
4223	{ "Samsung SSD 840 EVO*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4224						ATA_HORKAGE_NO_DMA_LOG |
4225						ATA_HORKAGE_ZERO_AFTER_TRIM },
4226	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4227						ATA_HORKAGE_ZERO_AFTER_TRIM },
4228	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4229						ATA_HORKAGE_ZERO_AFTER_TRIM },
4230	{ "Samsung SSD 860*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4231						ATA_HORKAGE_ZERO_AFTER_TRIM |
4232						ATA_HORKAGE_NO_NCQ_ON_ATI },
4233	{ "Samsung SSD 870*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4234						ATA_HORKAGE_ZERO_AFTER_TRIM |
4235						ATA_HORKAGE_NO_NCQ_ON_ATI },
4236	{ "SAMSUNG*MZ7LH*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4237						ATA_HORKAGE_ZERO_AFTER_TRIM |
4238						ATA_HORKAGE_NO_NCQ_ON_ATI, },
4239	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4240						ATA_HORKAGE_ZERO_AFTER_TRIM },
4241
4242	/* devices that don't properly handle TRIM commands */
4243	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM },
4244	{ "M88V29*",			NULL,	ATA_HORKAGE_NOTRIM },
4245
4246	/*
4247	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4248	 * (Return Zero After Trim) flags in the ATA Command Set are
4249	 * unreliable in the sense that they only define what happens if
4250	 * the device successfully executed the DSM TRIM command. TRIM
4251	 * is only advisory, however, and the device is free to silently
4252	 * ignore all or parts of the request.
4253	 *
4254	 * Whitelist drives that are known to reliably return zeroes
4255	 * after TRIM.
4256	 */
4257
4258	/*
4259	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4260	 * that model before whitelisting all other intel SSDs.
4261	 */
4262	{ "INTEL*SSDSC2MH*",		NULL,	0 },
4263
4264	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4265	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4266	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4267	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4268	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4269	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4270	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4271	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4272
4273	/*
4274	 * Some WD SATA-I drives spin up and down erratically when the link
4275	 * is put into the slumber mode.  We don't have full list of the
4276	 * affected devices.  Disable LPM if the device matches one of the
4277	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4278	 * lost too.
4279	 *
4280	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4281	 */
4282	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4283	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4284	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4285	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4286	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4287	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4288	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4289
4290	/*
4291	 * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
4292	 * log page is accessed. Ensure we never ask for this log page with
4293	 * these devices.
4294	 */
4295	{ "SATADOM-ML 3ME",		NULL,	ATA_HORKAGE_NO_LOG_DIR },
4296
4297	/* Buggy FUA */
4298	{ "Maxtor",		"BANC1G10",	ATA_HORKAGE_NO_FUA },
4299	{ "WDC*WD2500J*",	NULL,		ATA_HORKAGE_NO_FUA },
4300	{ "OCZ-VERTEX*",	NULL,		ATA_HORKAGE_NO_FUA },
4301	{ "INTEL*SSDSC2CT*",	NULL,		ATA_HORKAGE_NO_FUA },
4302
4303	/* End Marker */
4304	{ }
4305};
4306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4307static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4308{
4309	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4310	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4311	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4312
4313	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4314	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4315
4316	while (ad->model_num) {
4317		if (glob_match(ad->model_num, model_num)) {
4318			if (ad->model_rev == NULL)
4319				return ad->horkage;
4320			if (glob_match(ad->model_rev, model_rev))
4321				return ad->horkage;
4322		}
4323		ad++;
4324	}
4325	return 0;
4326}
4327
4328static int ata_dma_blacklisted(const struct ata_device *dev)
4329{
4330	/* We don't support polling DMA.
4331	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4332	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4333	 */
4334	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4335	    (dev->flags & ATA_DFLAG_CDB_INTR))
4336		return 1;
4337	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4338}
4339
4340/**
4341 *	ata_is_40wire		-	check drive side detection
4342 *	@dev: device
4343 *
4344 *	Perform drive side detection decoding, allowing for device vendors
4345 *	who can't follow the documentation.
4346 */
4347
4348static int ata_is_40wire(struct ata_device *dev)
4349{
4350	if (dev->horkage & ATA_HORKAGE_IVB)
4351		return ata_drive_40wire_relaxed(dev->id);
4352	return ata_drive_40wire(dev->id);
4353}
4354
4355/**
4356 *	cable_is_40wire		-	40/80/SATA decider
4357 *	@ap: port to consider
4358 *
4359 *	This function encapsulates the policy for speed management
4360 *	in one place. At the moment we don't cache the result but
4361 *	there is a good case for setting ap->cbl to the result when
4362 *	we are called with unknown cables (and figuring out if it
4363 *	impacts hotplug at all).
4364 *
4365 *	Return 1 if the cable appears to be 40 wire.
4366 */
4367
4368static int cable_is_40wire(struct ata_port *ap)
4369{
4370	struct ata_link *link;
4371	struct ata_device *dev;
4372
4373	/* If the controller thinks we are 40 wire, we are. */
4374	if (ap->cbl == ATA_CBL_PATA40)
4375		return 1;
4376
4377	/* If the controller thinks we are 80 wire, we are. */
4378	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4379		return 0;
4380
4381	/* If the system is known to be 40 wire short cable (eg
4382	 * laptop), then we allow 80 wire modes even if the drive
4383	 * isn't sure.
4384	 */
4385	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4386		return 0;
4387
4388	/* If the controller doesn't know, we scan.
4389	 *
4390	 * Note: We look for all 40 wire detects at this point.  Any
4391	 *       80 wire detect is taken to be 80 wire cable because
4392	 * - in many setups only the one drive (slave if present) will
4393	 *   give a valid detect
4394	 * - if you have a non detect capable drive you don't want it
4395	 *   to colour the choice
4396	 */
4397	ata_for_each_link(link, ap, EDGE) {
4398		ata_for_each_dev(dev, link, ENABLED) {
4399			if (!ata_is_40wire(dev))
4400				return 0;
4401		}
4402	}
4403	return 1;
4404}
4405
4406/**
4407 *	ata_dev_xfermask - Compute supported xfermask of the given device
4408 *	@dev: Device to compute xfermask for
4409 *
4410 *	Compute supported xfermask of @dev and store it in
4411 *	dev->*_mask.  This function is responsible for applying all
4412 *	known limits including host controller limits, device
4413 *	blacklist, etc...
4414 *
4415 *	LOCKING:
4416 *	None.
4417 */
4418static void ata_dev_xfermask(struct ata_device *dev)
4419{
4420	struct ata_link *link = dev->link;
4421	struct ata_port *ap = link->ap;
4422	struct ata_host *host = ap->host;
4423	unsigned int xfer_mask;
4424
4425	/* controller modes available */
4426	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4427				      ap->mwdma_mask, ap->udma_mask);
4428
4429	/* drive modes available */
4430	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4431				       dev->mwdma_mask, dev->udma_mask);
4432	xfer_mask &= ata_id_xfermask(dev->id);
4433
4434	/*
4435	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4436	 *	cable
4437	 */
4438	if (ata_dev_pair(dev)) {
4439		/* No PIO5 or PIO6 */
4440		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4441		/* No MWDMA3 or MWDMA 4 */
4442		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4443	}
4444
4445	if (ata_dma_blacklisted(dev)) {
4446		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4447		ata_dev_warn(dev,
4448			     "device is on DMA blacklist, disabling DMA\n");
4449	}
4450
4451	if ((host->flags & ATA_HOST_SIMPLEX) &&
4452	    host->simplex_claimed && host->simplex_claimed != ap) {
4453		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4454		ata_dev_warn(dev,
4455			     "simplex DMA is claimed by other device, disabling DMA\n");
4456	}
4457
4458	if (ap->flags & ATA_FLAG_NO_IORDY)
4459		xfer_mask &= ata_pio_mask_no_iordy(dev);
4460
4461	if (ap->ops->mode_filter)
4462		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4463
4464	/* Apply cable rule here.  Don't apply it early because when
4465	 * we handle hot plug the cable type can itself change.
4466	 * Check this last so that we know if the transfer rate was
4467	 * solely limited by the cable.
4468	 * Unknown or 80 wire cables reported host side are checked
4469	 * drive side as well. Cases where we know a 40wire cable
4470	 * is used safely for 80 are not checked here.
4471	 */
4472	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4473		/* UDMA/44 or higher would be available */
4474		if (cable_is_40wire(ap)) {
4475			ata_dev_warn(dev,
4476				     "limited to UDMA/33 due to 40-wire cable\n");
4477			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4478		}
4479
4480	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4481			    &dev->mwdma_mask, &dev->udma_mask);
4482}
4483
4484/**
4485 *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4486 *	@dev: Device to which command will be sent
4487 *
4488 *	Issue SET FEATURES - XFER MODE command to device @dev
4489 *	on port @ap.
4490 *
4491 *	LOCKING:
4492 *	PCI/etc. bus probe sem.
4493 *
4494 *	RETURNS:
4495 *	0 on success, AC_ERR_* mask otherwise.
4496 */
4497
4498static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4499{
4500	struct ata_taskfile tf;
 
4501
4502	/* set up set-features taskfile */
4503	ata_dev_dbg(dev, "set features - xfer mode\n");
4504
4505	/* Some controllers and ATAPI devices show flaky interrupt
4506	 * behavior after setting xfer mode.  Use polling instead.
4507	 */
4508	ata_tf_init(dev, &tf);
4509	tf.command = ATA_CMD_SET_FEATURES;
4510	tf.feature = SETFEATURES_XFER;
4511	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4512	tf.protocol = ATA_PROT_NODATA;
4513	/* If we are using IORDY we must send the mode setting command */
4514	if (ata_pio_need_iordy(dev))
4515		tf.nsect = dev->xfer_mode;
4516	/* If the device has IORDY and the controller does not - turn it off */
4517 	else if (ata_id_has_iordy(dev->id))
4518		tf.nsect = 0x01;
4519	else /* In the ancient relic department - skip all of this */
4520		return 0;
4521
4522	/*
4523	 * On some disks, this command causes spin-up, so we need longer
4524	 * timeout.
4525	 */
4526	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4527}
4528
4529/**
4530 *	ata_dev_set_feature - Issue SET FEATURES
4531 *	@dev: Device to which command will be sent
4532 *	@subcmd: The SET FEATURES subcommand to be sent
4533 *	@action: The sector count represents a subcommand specific action
4534 *
4535 *	Issue SET FEATURES command to device @dev on port @ap with sector count
 
4536 *
4537 *	LOCKING:
4538 *	PCI/etc. bus probe sem.
4539 *
4540 *	RETURNS:
4541 *	0 on success, AC_ERR_* mask otherwise.
4542 */
4543unsigned int ata_dev_set_feature(struct ata_device *dev, u8 subcmd, u8 action)
4544{
4545	struct ata_taskfile tf;
4546	unsigned int timeout = 0;
4547
4548	/* set up set-features taskfile */
4549	ata_dev_dbg(dev, "set features\n");
4550
4551	ata_tf_init(dev, &tf);
4552	tf.command = ATA_CMD_SET_FEATURES;
4553	tf.feature = subcmd;
4554	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4555	tf.protocol = ATA_PROT_NODATA;
4556	tf.nsect = action;
4557
4558	if (subcmd == SETFEATURES_SPINUP)
4559		timeout = ata_probe_timeout ?
4560			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4561
4562	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
 
4563}
4564EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4565
4566/**
4567 *	ata_dev_init_params - Issue INIT DEV PARAMS command
4568 *	@dev: Device to which command will be sent
4569 *	@heads: Number of heads (taskfile parameter)
4570 *	@sectors: Number of sectors (taskfile parameter)
4571 *
4572 *	LOCKING:
4573 *	Kernel thread context (may sleep)
4574 *
4575 *	RETURNS:
4576 *	0 on success, AC_ERR_* mask otherwise.
4577 */
4578static unsigned int ata_dev_init_params(struct ata_device *dev,
4579					u16 heads, u16 sectors)
4580{
4581	struct ata_taskfile tf;
4582	unsigned int err_mask;
4583
4584	/* Number of sectors per track 1-255. Number of heads 1-16 */
4585	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4586		return AC_ERR_INVALID;
4587
4588	/* set up init dev params taskfile */
4589	ata_dev_dbg(dev, "init dev params \n");
4590
4591	ata_tf_init(dev, &tf);
4592	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4593	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4594	tf.protocol = ATA_PROT_NODATA;
4595	tf.nsect = sectors;
4596	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4597
4598	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4599	/* A clean abort indicates an original or just out of spec drive
4600	   and we should continue as we issue the setup based on the
4601	   drive reported working geometry */
4602	if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
4603		err_mask = 0;
4604
 
4605	return err_mask;
4606}
4607
4608/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4609 *	atapi_check_dma - Check whether ATAPI DMA can be supported
4610 *	@qc: Metadata associated with taskfile to check
4611 *
4612 *	Allow low-level driver to filter ATA PACKET commands, returning
4613 *	a status indicating whether or not it is OK to use DMA for the
4614 *	supplied PACKET command.
4615 *
4616 *	LOCKING:
4617 *	spin_lock_irqsave(host lock)
4618 *
4619 *	RETURNS: 0 when ATAPI DMA can be used
4620 *               nonzero otherwise
4621 */
4622int atapi_check_dma(struct ata_queued_cmd *qc)
4623{
4624	struct ata_port *ap = qc->ap;
4625
4626	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4627	 * few ATAPI devices choke on such DMA requests.
4628	 */
4629	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4630	    unlikely(qc->nbytes & 15))
4631		return 1;
4632
4633	if (ap->ops->check_atapi_dma)
4634		return ap->ops->check_atapi_dma(qc);
4635
4636	return 0;
4637}
4638
4639/**
4640 *	ata_std_qc_defer - Check whether a qc needs to be deferred
4641 *	@qc: ATA command in question
4642 *
4643 *	Non-NCQ commands cannot run with any other command, NCQ or
4644 *	not.  As upper layer only knows the queue depth, we are
4645 *	responsible for maintaining exclusion.  This function checks
4646 *	whether a new command @qc can be issued.
4647 *
4648 *	LOCKING:
4649 *	spin_lock_irqsave(host lock)
4650 *
4651 *	RETURNS:
4652 *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4653 */
4654int ata_std_qc_defer(struct ata_queued_cmd *qc)
4655{
4656	struct ata_link *link = qc->dev->link;
4657
4658	if (ata_is_ncq(qc->tf.protocol)) {
4659		if (!ata_tag_valid(link->active_tag))
4660			return 0;
4661	} else {
4662		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4663			return 0;
4664	}
4665
4666	return ATA_DEFER_LINK;
4667}
4668EXPORT_SYMBOL_GPL(ata_std_qc_defer);
4669
4670enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4671{
4672	return AC_ERR_OK;
4673}
4674EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4675
4676/**
4677 *	ata_sg_init - Associate command with scatter-gather table.
4678 *	@qc: Command to be associated
4679 *	@sg: Scatter-gather table.
4680 *	@n_elem: Number of elements in s/g table.
4681 *
4682 *	Initialize the data-related elements of queued_cmd @qc
4683 *	to point to a scatter-gather table @sg, containing @n_elem
4684 *	elements.
4685 *
4686 *	LOCKING:
4687 *	spin_lock_irqsave(host lock)
4688 */
4689void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4690		 unsigned int n_elem)
4691{
4692	qc->sg = sg;
4693	qc->n_elem = n_elem;
4694	qc->cursg = qc->sg;
4695}
4696
4697#ifdef CONFIG_HAS_DMA
4698
4699/**
4700 *	ata_sg_clean - Unmap DMA memory associated with command
4701 *	@qc: Command containing DMA memory to be released
4702 *
4703 *	Unmap all mapped DMA memory associated with this command.
4704 *
4705 *	LOCKING:
4706 *	spin_lock_irqsave(host lock)
4707 */
4708static void ata_sg_clean(struct ata_queued_cmd *qc)
4709{
4710	struct ata_port *ap = qc->ap;
4711	struct scatterlist *sg = qc->sg;
4712	int dir = qc->dma_dir;
4713
4714	WARN_ON_ONCE(sg == NULL);
4715
4716	if (qc->n_elem)
4717		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4718
4719	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4720	qc->sg = NULL;
4721}
4722
4723/**
4724 *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4725 *	@qc: Command with scatter-gather table to be mapped.
4726 *
4727 *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4728 *
4729 *	LOCKING:
4730 *	spin_lock_irqsave(host lock)
4731 *
4732 *	RETURNS:
4733 *	Zero on success, negative on error.
4734 *
4735 */
4736static int ata_sg_setup(struct ata_queued_cmd *qc)
4737{
4738	struct ata_port *ap = qc->ap;
4739	unsigned int n_elem;
4740
 
 
4741	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4742	if (n_elem < 1)
4743		return -1;
4744
 
4745	qc->orig_n_elem = qc->n_elem;
4746	qc->n_elem = n_elem;
4747	qc->flags |= ATA_QCFLAG_DMAMAP;
4748
4749	return 0;
4750}
4751
4752#else /* !CONFIG_HAS_DMA */
4753
4754static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4755static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4756
4757#endif /* !CONFIG_HAS_DMA */
4758
4759/**
4760 *	swap_buf_le16 - swap halves of 16-bit words in place
4761 *	@buf:  Buffer to swap
4762 *	@buf_words:  Number of 16-bit words in buffer.
4763 *
4764 *	Swap halves of 16-bit words if needed to convert from
4765 *	little-endian byte order to native cpu byte order, or
4766 *	vice-versa.
4767 *
4768 *	LOCKING:
4769 *	Inherited from caller.
4770 */
4771void swap_buf_le16(u16 *buf, unsigned int buf_words)
4772{
4773#ifdef __BIG_ENDIAN
4774	unsigned int i;
4775
4776	for (i = 0; i < buf_words; i++)
4777		buf[i] = le16_to_cpu(buf[i]);
4778#endif /* __BIG_ENDIAN */
4779}
4780
4781/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4782 *	ata_qc_free - free unused ata_queued_cmd
4783 *	@qc: Command to complete
4784 *
4785 *	Designed to free unused ata_queued_cmd object
4786 *	in case something prevents using it.
4787 *
4788 *	LOCKING:
4789 *	spin_lock_irqsave(host lock)
4790 */
4791void ata_qc_free(struct ata_queued_cmd *qc)
4792{
 
 
 
 
 
 
4793	qc->flags = 0;
4794	if (ata_tag_valid(qc->tag))
 
4795		qc->tag = ATA_TAG_POISON;
 
 
4796}
4797
4798void __ata_qc_complete(struct ata_queued_cmd *qc)
4799{
4800	struct ata_port *ap;
4801	struct ata_link *link;
4802
4803	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4804	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4805	ap = qc->ap;
4806	link = qc->dev->link;
4807
4808	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4809		ata_sg_clean(qc);
4810
4811	/* command should be marked inactive atomically with qc completion */
4812	if (ata_is_ncq(qc->tf.protocol)) {
4813		link->sactive &= ~(1 << qc->hw_tag);
4814		if (!link->sactive)
4815			ap->nr_active_links--;
4816	} else {
4817		link->active_tag = ATA_TAG_POISON;
4818		ap->nr_active_links--;
4819	}
4820
4821	/* clear exclusive status */
4822	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4823		     ap->excl_link == link))
4824		ap->excl_link = NULL;
4825
4826	/* atapi: mark qc as inactive to prevent the interrupt handler
4827	 * from completing the command twice later, before the error handler
4828	 * is called. (when rc != 0 and atapi request sense is needed)
4829	 */
4830	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4831	ap->qc_active &= ~(1ULL << qc->tag);
4832
4833	/* call completion callback */
4834	qc->complete_fn(qc);
4835}
4836
4837static void fill_result_tf(struct ata_queued_cmd *qc)
4838{
4839	struct ata_port *ap = qc->ap;
4840
4841	qc->result_tf.flags = qc->tf.flags;
4842	ap->ops->qc_fill_rtf(qc);
4843}
4844
4845static void ata_verify_xfer(struct ata_queued_cmd *qc)
4846{
4847	struct ata_device *dev = qc->dev;
4848
4849	if (!ata_is_data(qc->tf.protocol))
4850		return;
4851
4852	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4853		return;
4854
4855	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4856}
4857
4858/**
4859 *	ata_qc_complete - Complete an active ATA command
4860 *	@qc: Command to complete
4861 *
4862 *	Indicate to the mid and upper layers that an ATA command has
4863 *	completed, with either an ok or not-ok status.
4864 *
4865 *	Refrain from calling this function multiple times when
4866 *	successfully completing multiple NCQ commands.
4867 *	ata_qc_complete_multiple() should be used instead, which will
4868 *	properly update IRQ expect state.
4869 *
4870 *	LOCKING:
4871 *	spin_lock_irqsave(host lock)
4872 */
4873void ata_qc_complete(struct ata_queued_cmd *qc)
4874{
4875	struct ata_port *ap = qc->ap;
4876	struct ata_device *dev = qc->dev;
4877	struct ata_eh_info *ehi = &dev->link->eh_info;
4878
4879	/* Trigger the LED (if available) */
4880	ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
4881
4882	/*
4883	 * In order to synchronize EH with the regular execution path, a qc that
4884	 * is owned by EH is marked with ATA_QCFLAG_EH.
 
4885	 *
4886	 * The normal execution path is responsible for not accessing a qc owned
4887	 * by EH.  libata core enforces the rule by returning NULL from
4888	 * ata_qc_from_tag() for qcs owned by EH.
4889	 */
4890	if (unlikely(qc->err_mask))
4891		qc->flags |= ATA_QCFLAG_EH;
 
 
4892
4893	/*
4894	 * Finish internal commands without any further processing and always
4895	 * with the result TF filled.
4896	 */
4897	if (unlikely(ata_tag_internal(qc->tag))) {
4898		fill_result_tf(qc);
4899		trace_ata_qc_complete_internal(qc);
4900		__ata_qc_complete(qc);
4901		return;
4902	}
4903
4904	/* Non-internal qc has failed.  Fill the result TF and summon EH. */
4905	if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
4906		fill_result_tf(qc);
4907		trace_ata_qc_complete_failed(qc);
4908		ata_qc_schedule_eh(qc);
4909		return;
4910	}
 
 
4911
4912	WARN_ON_ONCE(ata_port_is_frozen(ap));
 
 
 
 
 
 
 
 
4913
4914	/* read result TF if requested */
4915	if (qc->flags & ATA_QCFLAG_RESULT_TF)
4916		fill_result_tf(qc);
4917
4918	trace_ata_qc_complete_done(qc);
 
 
4919
4920	/*
4921	 * For CDL commands that completed without an error, check if we have
4922	 * sense data (ATA_SENSE is set). If we do, then the command may have
4923	 * been aborted by the device due to a limit timeout using the policy
4924	 * 0xD. For these commands, invoke EH to get the command sense data.
4925	 */
4926	if (qc->flags & ATA_QCFLAG_HAS_CDL &&
4927	    qc->result_tf.status & ATA_SENSE) {
4928		/*
4929		 * Tell SCSI EH to not overwrite scmd->result even if this
4930		 * command is finished with result SAM_STAT_GOOD.
4931		 */
4932		qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS;
4933		qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD;
4934		ehi->dev_action[dev->devno] |= ATA_EH_GET_SUCCESS_SENSE;
 
 
 
 
 
 
 
 
 
4935
4936		/*
4937		 * set pending so that ata_qc_schedule_eh() does not trigger
4938		 * fast drain, and freeze the port.
4939		 */
4940		ap->pflags |= ATA_PFLAG_EH_PENDING;
4941		ata_qc_schedule_eh(qc);
4942		return;
4943	}
4944
4945	/* Some commands need post-processing after successful completion. */
4946	switch (qc->tf.command) {
4947	case ATA_CMD_SET_FEATURES:
4948		if (qc->tf.feature != SETFEATURES_WC_ON &&
4949		    qc->tf.feature != SETFEATURES_WC_OFF &&
4950		    qc->tf.feature != SETFEATURES_RA_ON &&
4951		    qc->tf.feature != SETFEATURES_RA_OFF)
4952			break;
4953		fallthrough;
4954	case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4955	case ATA_CMD_SET_MULTI: /* multi_count changed */
4956		/* revalidate device */
4957		ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4958		ata_port_schedule_eh(ap);
4959		break;
4960
4961	case ATA_CMD_SLEEP:
4962		dev->flags |= ATA_DFLAG_SLEEPING;
4963		break;
4964	}
4965
4966	if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4967		ata_verify_xfer(qc);
 
4968
4969	__ata_qc_complete(qc);
 
4970}
4971EXPORT_SYMBOL_GPL(ata_qc_complete);
4972
4973/**
4974 *	ata_qc_get_active - get bitmask of active qcs
4975 *	@ap: port in question
 
 
 
 
 
 
 
 
 
 
4976 *
4977 *	LOCKING:
4978 *	spin_lock_irqsave(host lock)
4979 *
4980 *	RETURNS:
4981 *	Bitmask of active qcs
4982 */
4983u64 ata_qc_get_active(struct ata_port *ap)
4984{
4985	u64 qc_active = ap->qc_active;
 
 
 
4986
4987	/* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4988	if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4989		qc_active |= (1 << 0);
4990		qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4991	}
4992
4993	return qc_active;
 
 
 
 
 
 
 
 
 
 
 
 
4994}
4995EXPORT_SYMBOL_GPL(ata_qc_get_active);
4996
4997/**
4998 *	ata_qc_issue - issue taskfile to device
4999 *	@qc: command to issue to device
5000 *
5001 *	Prepare an ATA command to submission to device.
5002 *	This includes mapping the data into a DMA-able
5003 *	area, filling in the S/G table, and finally
5004 *	writing the taskfile to hardware, starting the command.
5005 *
5006 *	LOCKING:
5007 *	spin_lock_irqsave(host lock)
5008 */
5009void ata_qc_issue(struct ata_queued_cmd *qc)
5010{
5011	struct ata_port *ap = qc->ap;
5012	struct ata_link *link = qc->dev->link;
5013	u8 prot = qc->tf.protocol;
5014
5015	/* Make sure only one non-NCQ command is outstanding. */
5016	WARN_ON_ONCE(ata_tag_valid(link->active_tag));
 
 
 
5017
5018	if (ata_is_ncq(prot)) {
5019		WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
5020
5021		if (!link->sactive)
5022			ap->nr_active_links++;
5023		link->sactive |= 1 << qc->hw_tag;
5024	} else {
5025		WARN_ON_ONCE(link->sactive);
5026
5027		ap->nr_active_links++;
5028		link->active_tag = qc->tag;
5029	}
5030
5031	qc->flags |= ATA_QCFLAG_ACTIVE;
5032	ap->qc_active |= 1ULL << qc->tag;
5033
5034	/*
5035	 * We guarantee to LLDs that they will have at least one
5036	 * non-zero sg if the command is a data command.
5037	 */
5038	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
 
5039		goto sys_err;
5040
5041	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5042				 (ap->flags & ATA_FLAG_PIO_DMA)))
5043		if (ata_sg_setup(qc))
5044			goto sys_err;
5045
5046	/* if device is sleeping, schedule reset and abort the link */
5047	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5048		link->eh_info.action |= ATA_EH_RESET;
5049		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5050		ata_link_abort(link);
5051		return;
5052	}
5053
5054	trace_ata_qc_prep(qc);
5055	qc->err_mask |= ap->ops->qc_prep(qc);
5056	if (unlikely(qc->err_mask))
5057		goto err;
5058	trace_ata_qc_issue(qc);
5059	qc->err_mask |= ap->ops->qc_issue(qc);
5060	if (unlikely(qc->err_mask))
5061		goto err;
5062	return;
5063
5064sys_err:
5065	qc->err_mask |= AC_ERR_SYSTEM;
5066err:
5067	ata_qc_complete(qc);
5068}
5069
5070/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5071 *	ata_phys_link_online - test whether the given link is online
5072 *	@link: ATA link to test
5073 *
5074 *	Test whether @link is online.  Note that this function returns
5075 *	0 if online status of @link cannot be obtained, so
5076 *	ata_link_online(link) != !ata_link_offline(link).
5077 *
5078 *	LOCKING:
5079 *	None.
5080 *
5081 *	RETURNS:
5082 *	True if the port online status is available and online.
5083 */
5084bool ata_phys_link_online(struct ata_link *link)
5085{
5086	u32 sstatus;
5087
5088	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5089	    ata_sstatus_online(sstatus))
5090		return true;
5091	return false;
5092}
5093
5094/**
5095 *	ata_phys_link_offline - test whether the given link is offline
5096 *	@link: ATA link to test
5097 *
5098 *	Test whether @link is offline.  Note that this function
5099 *	returns 0 if offline status of @link cannot be obtained, so
5100 *	ata_link_online(link) != !ata_link_offline(link).
5101 *
5102 *	LOCKING:
5103 *	None.
5104 *
5105 *	RETURNS:
5106 *	True if the port offline status is available and offline.
5107 */
5108bool ata_phys_link_offline(struct ata_link *link)
5109{
5110	u32 sstatus;
5111
5112	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5113	    !ata_sstatus_online(sstatus))
5114		return true;
5115	return false;
5116}
5117
5118/**
5119 *	ata_link_online - test whether the given link is online
5120 *	@link: ATA link to test
5121 *
5122 *	Test whether @link is online.  This is identical to
5123 *	ata_phys_link_online() when there's no slave link.  When
5124 *	there's a slave link, this function should only be called on
5125 *	the master link and will return true if any of M/S links is
5126 *	online.
5127 *
5128 *	LOCKING:
5129 *	None.
5130 *
5131 *	RETURNS:
5132 *	True if the port online status is available and online.
5133 */
5134bool ata_link_online(struct ata_link *link)
5135{
5136	struct ata_link *slave = link->ap->slave_link;
5137
5138	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5139
5140	return ata_phys_link_online(link) ||
5141		(slave && ata_phys_link_online(slave));
5142}
5143EXPORT_SYMBOL_GPL(ata_link_online);
5144
5145/**
5146 *	ata_link_offline - test whether the given link is offline
5147 *	@link: ATA link to test
5148 *
5149 *	Test whether @link is offline.  This is identical to
5150 *	ata_phys_link_offline() when there's no slave link.  When
5151 *	there's a slave link, this function should only be called on
5152 *	the master link and will return true if both M/S links are
5153 *	offline.
5154 *
5155 *	LOCKING:
5156 *	None.
5157 *
5158 *	RETURNS:
5159 *	True if the port offline status is available and offline.
5160 */
5161bool ata_link_offline(struct ata_link *link)
5162{
5163	struct ata_link *slave = link->ap->slave_link;
5164
5165	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5166
5167	return ata_phys_link_offline(link) &&
5168		(!slave || ata_phys_link_offline(slave));
5169}
5170EXPORT_SYMBOL_GPL(ata_link_offline);
5171
5172#ifdef CONFIG_PM
5173static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5174				unsigned int action, unsigned int ehi_flags,
5175				bool async)
5176{
5177	struct ata_link *link;
5178	unsigned long flags;
 
 
 
 
 
5179
5180	spin_lock_irqsave(ap->lock, flags);
 
 
 
 
 
 
5181
5182	/*
5183	 * A previous PM operation might still be in progress. Wait for
5184	 * ATA_PFLAG_PM_PENDING to clear.
5185	 */
5186	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5187		spin_unlock_irqrestore(ap->lock, flags);
5188		ata_port_wait_eh(ap);
5189		spin_lock_irqsave(ap->lock, flags);
5190	}
5191
5192	/* Request PM operation to EH */
5193	ap->pm_mesg = mesg;
5194	ap->pflags |= ATA_PFLAG_PM_PENDING;
5195	ata_for_each_link(link, ap, HOST_FIRST) {
5196		link->eh_info.action |= action;
5197		link->eh_info.flags |= ehi_flags;
5198	}
5199
5200	ata_port_schedule_eh(ap);
 
 
 
 
5201
5202	spin_unlock_irqrestore(ap->lock, flags);
5203
5204	if (!async)
5205		ata_port_wait_eh(ap);
5206}
5207
5208static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg,
5209			     bool async)
5210{
5211	/*
5212	 * We are about to suspend the port, so we do not care about
5213	 * scsi_rescan_device() calls scheduled by previous resume operations.
5214	 * The next resume will schedule the rescan again. So cancel any rescan
5215	 * that is not done yet.
5216	 */
5217	cancel_delayed_work_sync(&ap->scsi_rescan_task);
5218
5219	/*
5220	 * On some hardware, device fails to respond after spun down for
5221	 * suspend. As the device will not be used until being resumed, we
5222	 * do not need to touch the device. Ask EH to skip the usual stuff
5223	 * and proceed directly to suspend.
5224	 *
5225	 * http://thread.gmane.org/gmane.linux.ide/46764
5226	 */
5227	ata_port_request_pm(ap, mesg, 0,
5228			    ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
5229			    ATA_EHI_NO_RECOVERY,
5230			    async);
5231}
5232
5233static int ata_port_pm_suspend(struct device *dev)
5234{
5235	struct ata_port *ap = to_ata_port(dev);
5236
5237	if (pm_runtime_suspended(dev))
5238		return 0;
5239
5240	ata_port_suspend(ap, PMSG_SUSPEND, false);
5241	return 0;
5242}
5243
5244static int ata_port_pm_freeze(struct device *dev)
5245{
5246	struct ata_port *ap = to_ata_port(dev);
5247
5248	if (pm_runtime_suspended(dev))
5249		return 0;
5250
5251	ata_port_suspend(ap, PMSG_FREEZE, false);
5252	return 0;
5253}
5254
5255static int ata_port_pm_poweroff(struct device *dev)
5256{
5257	if (!pm_runtime_suspended(dev))
5258		ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE, false);
5259	return 0;
5260}
5261
5262static void ata_port_resume(struct ata_port *ap, pm_message_t mesg,
5263			    bool async)
5264{
5265	ata_port_request_pm(ap, mesg, ATA_EH_RESET,
5266			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
5267			    async);
5268}
5269
5270static int ata_port_pm_resume(struct device *dev)
5271{
5272	if (!pm_runtime_suspended(dev))
5273		ata_port_resume(to_ata_port(dev), PMSG_RESUME, true);
5274	return 0;
5275}
5276
5277/*
5278 * For ODDs, the upper layer will poll for media change every few seconds,
5279 * which will make it enter and leave suspend state every few seconds. And
5280 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5281 * is very little and the ODD may malfunction after constantly being reset.
5282 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5283 * ODD is attached to the port.
5284 */
5285static int ata_port_runtime_idle(struct device *dev)
5286{
5287	struct ata_port *ap = to_ata_port(dev);
5288	struct ata_link *link;
5289	struct ata_device *adev;
5290
5291	ata_for_each_link(link, ap, HOST_FIRST) {
5292		ata_for_each_dev(adev, link, ENABLED)
5293			if (adev->class == ATA_DEV_ATAPI &&
5294			    !zpodd_dev_enabled(adev))
5295				return -EBUSY;
5296	}
5297
5298	return 0;
5299}
5300
5301static int ata_port_runtime_suspend(struct device *dev)
5302{
5303	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND, false);
5304	return 0;
5305}
5306
5307static int ata_port_runtime_resume(struct device *dev)
5308{
5309	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME, false);
5310	return 0;
5311}
5312
5313static const struct dev_pm_ops ata_port_pm_ops = {
5314	.suspend = ata_port_pm_suspend,
5315	.resume = ata_port_pm_resume,
5316	.freeze = ata_port_pm_freeze,
5317	.thaw = ata_port_pm_resume,
5318	.poweroff = ata_port_pm_poweroff,
5319	.restore = ata_port_pm_resume,
5320
5321	.runtime_suspend = ata_port_runtime_suspend,
5322	.runtime_resume = ata_port_runtime_resume,
5323	.runtime_idle = ata_port_runtime_idle,
5324};
5325
5326/* sas ports don't participate in pm runtime management of ata_ports,
5327 * and need to resume ata devices at the domain level, not the per-port
5328 * level. sas suspend/resume is async to allow parallel port recovery
5329 * since sas has multiple ata_port instances per Scsi_Host.
5330 */
5331void ata_sas_port_suspend(struct ata_port *ap)
5332{
5333	ata_port_suspend(ap, PMSG_SUSPEND, true);
5334}
5335EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5336
5337void ata_sas_port_resume(struct ata_port *ap)
5338{
5339	ata_port_resume(ap, PMSG_RESUME, true);
5340}
5341EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5342
5343/**
5344 *	ata_host_suspend - suspend host
5345 *	@host: host to suspend
5346 *	@mesg: PM message
5347 *
5348 *	Suspend @host.  Actual operation is performed by port suspend.
 
 
 
 
 
 
 
 
5349 */
5350void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5351{
5352	host->dev->power.power_state = mesg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5353}
5354EXPORT_SYMBOL_GPL(ata_host_suspend);
5355
5356/**
5357 *	ata_host_resume - resume host
5358 *	@host: host to resume
5359 *
5360 *	Resume @host.  Actual operation is performed by port resume.
 
 
 
 
 
5361 */
5362void ata_host_resume(struct ata_host *host)
5363{
 
 
5364	host->dev->power.power_state = PMSG_ON;
5365}
5366EXPORT_SYMBOL_GPL(ata_host_resume);
5367#endif
5368
5369const struct device_type ata_port_type = {
5370	.name = ATA_PORT_TYPE_NAME,
5371#ifdef CONFIG_PM
5372	.pm = &ata_port_pm_ops,
5373#endif
5374};
5375
5376/**
5377 *	ata_dev_init - Initialize an ata_device structure
5378 *	@dev: Device structure to initialize
5379 *
5380 *	Initialize @dev in preparation for probing.
5381 *
5382 *	LOCKING:
5383 *	Inherited from caller.
5384 */
5385void ata_dev_init(struct ata_device *dev)
5386{
5387	struct ata_link *link = ata_dev_phys_link(dev);
5388	struct ata_port *ap = link->ap;
5389	unsigned long flags;
5390
5391	/* SATA spd limit is bound to the attached device, reset together */
5392	link->sata_spd_limit = link->hw_sata_spd_limit;
5393	link->sata_spd = 0;
5394
5395	/* High bits of dev->flags are used to record warm plug
5396	 * requests which occur asynchronously.  Synchronize using
5397	 * host lock.
5398	 */
5399	spin_lock_irqsave(ap->lock, flags);
5400	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5401	dev->horkage = 0;
5402	spin_unlock_irqrestore(ap->lock, flags);
5403
5404	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5405	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5406	dev->pio_mask = UINT_MAX;
5407	dev->mwdma_mask = UINT_MAX;
5408	dev->udma_mask = UINT_MAX;
5409}
5410
5411/**
5412 *	ata_link_init - Initialize an ata_link structure
5413 *	@ap: ATA port link is attached to
5414 *	@link: Link structure to initialize
5415 *	@pmp: Port multiplier port number
5416 *
5417 *	Initialize @link.
5418 *
5419 *	LOCKING:
5420 *	Kernel thread context (may sleep)
5421 */
5422void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5423{
5424	int i;
5425
5426	/* clear everything except for devices */
5427	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5428	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5429
5430	link->ap = ap;
5431	link->pmp = pmp;
5432	link->active_tag = ATA_TAG_POISON;
5433	link->hw_sata_spd_limit = UINT_MAX;
5434
5435	/* can't use iterator, ap isn't initialized yet */
5436	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5437		struct ata_device *dev = &link->device[i];
5438
5439		dev->link = link;
5440		dev->devno = dev - link->device;
5441#ifdef CONFIG_ATA_ACPI
5442		dev->gtf_filter = ata_acpi_gtf_filter;
5443#endif
5444		ata_dev_init(dev);
5445	}
5446}
5447
5448/**
5449 *	sata_link_init_spd - Initialize link->sata_spd_limit
5450 *	@link: Link to configure sata_spd_limit for
5451 *
5452 *	Initialize ``link->[hw_]sata_spd_limit`` to the currently
5453 *	configured value.
5454 *
5455 *	LOCKING:
5456 *	Kernel thread context (may sleep).
5457 *
5458 *	RETURNS:
5459 *	0 on success, -errno on failure.
5460 */
5461int sata_link_init_spd(struct ata_link *link)
5462{
5463	u8 spd;
5464	int rc;
5465
5466	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5467	if (rc)
5468		return rc;
5469
5470	spd = (link->saved_scontrol >> 4) & 0xf;
5471	if (spd)
5472		link->hw_sata_spd_limit &= (1 << spd) - 1;
5473
5474	ata_force_link_limits(link);
5475
5476	link->sata_spd_limit = link->hw_sata_spd_limit;
5477
5478	return 0;
5479}
5480
5481/**
5482 *	ata_port_alloc - allocate and initialize basic ATA port resources
5483 *	@host: ATA host this allocated port belongs to
5484 *
5485 *	Allocate and initialize basic ATA port resources.
5486 *
5487 *	RETURNS:
5488 *	Allocate ATA port on success, NULL on failure.
5489 *
5490 *	LOCKING:
5491 *	Inherited from calling layer (may sleep).
5492 */
5493struct ata_port *ata_port_alloc(struct ata_host *host)
5494{
5495	struct ata_port *ap;
5496
 
 
5497	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5498	if (!ap)
5499		return NULL;
5500
5501	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5502	ap->lock = &host->lock;
5503	ap->print_id = -1;
5504	ap->local_port_no = -1;
5505	ap->host = host;
5506	ap->dev = host->dev;
5507
 
 
 
 
 
 
 
 
 
5508	mutex_init(&ap->scsi_scan_mutex);
5509	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5510	INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5511	INIT_LIST_HEAD(&ap->eh_done_q);
5512	init_waitqueue_head(&ap->eh_wait_q);
5513	init_completion(&ap->park_req_pending);
5514	timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5515		    TIMER_DEFERRABLE);
 
5516
5517	ap->cbl = ATA_CBL_NONE;
5518
5519	ata_link_init(ap, &ap->link, 0);
5520
5521#ifdef ATA_IRQ_TRAP
5522	ap->stats.unhandled_irq = 1;
5523	ap->stats.idle_irq = 1;
5524#endif
5525	ata_sff_port_init(ap);
5526
5527	return ap;
5528}
5529
5530static void ata_devres_release(struct device *gendev, void *res)
5531{
5532	struct ata_host *host = dev_get_drvdata(gendev);
5533	int i;
5534
5535	for (i = 0; i < host->n_ports; i++) {
5536		struct ata_port *ap = host->ports[i];
5537
5538		if (!ap)
5539			continue;
5540
5541		if (ap->scsi_host)
5542			scsi_host_put(ap->scsi_host);
5543
5544	}
5545
5546	dev_set_drvdata(gendev, NULL);
5547	ata_host_put(host);
5548}
5549
5550static void ata_host_release(struct kref *kref)
5551{
5552	struct ata_host *host = container_of(kref, struct ata_host, kref);
5553	int i;
5554
5555	for (i = 0; i < host->n_ports; i++) {
5556		struct ata_port *ap = host->ports[i];
5557
5558		kfree(ap->pmp_link);
5559		kfree(ap->slave_link);
5560		kfree(ap->ncq_sense_buf);
5561		kfree(ap);
5562		host->ports[i] = NULL;
5563	}
5564	kfree(host);
5565}
5566
5567void ata_host_get(struct ata_host *host)
5568{
5569	kref_get(&host->kref);
5570}
5571
5572void ata_host_put(struct ata_host *host)
5573{
5574	kref_put(&host->kref, ata_host_release);
5575}
5576EXPORT_SYMBOL_GPL(ata_host_put);
5577
5578/**
5579 *	ata_host_alloc - allocate and init basic ATA host resources
5580 *	@dev: generic device this host is associated with
5581 *	@max_ports: maximum number of ATA ports associated with this host
5582 *
5583 *	Allocate and initialize basic ATA host resources.  LLD calls
5584 *	this function to allocate a host, initializes it fully and
5585 *	attaches it using ata_host_register().
5586 *
5587 *	@max_ports ports are allocated and host->n_ports is
5588 *	initialized to @max_ports.  The caller is allowed to decrease
5589 *	host->n_ports before calling ata_host_register().  The unused
5590 *	ports will be automatically freed on registration.
5591 *
5592 *	RETURNS:
5593 *	Allocate ATA host on success, NULL on failure.
5594 *
5595 *	LOCKING:
5596 *	Inherited from calling layer (may sleep).
5597 */
5598struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5599{
5600	struct ata_host *host;
5601	size_t sz;
5602	int i;
5603	void *dr;
 
 
 
 
5604
5605	/* alloc a container for our list of ATA ports (buses) */
5606	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5607	host = kzalloc(sz, GFP_KERNEL);
 
5608	if (!host)
5609		return NULL;
5610
5611	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5612		goto err_free;
5613
5614	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5615	if (!dr)
5616		goto err_out;
5617
5618	devres_add(dev, dr);
5619	dev_set_drvdata(dev, host);
5620
5621	spin_lock_init(&host->lock);
5622	mutex_init(&host->eh_mutex);
5623	host->dev = dev;
5624	host->n_ports = max_ports;
5625	kref_init(&host->kref);
5626
5627	/* allocate ports bound to this host */
5628	for (i = 0; i < max_ports; i++) {
5629		struct ata_port *ap;
5630
5631		ap = ata_port_alloc(host);
5632		if (!ap)
5633			goto err_out;
5634
5635		ap->port_no = i;
5636		host->ports[i] = ap;
5637	}
5638
5639	devres_remove_group(dev, NULL);
5640	return host;
5641
5642 err_out:
5643	devres_release_group(dev, NULL);
5644 err_free:
5645	kfree(host);
5646	return NULL;
5647}
5648EXPORT_SYMBOL_GPL(ata_host_alloc);
5649
5650/**
5651 *	ata_host_alloc_pinfo - alloc host and init with port_info array
5652 *	@dev: generic device this host is associated with
5653 *	@ppi: array of ATA port_info to initialize host with
5654 *	@n_ports: number of ATA ports attached to this host
5655 *
5656 *	Allocate ATA host and initialize with info from @ppi.  If NULL
5657 *	terminated, @ppi may contain fewer entries than @n_ports.  The
5658 *	last entry will be used for the remaining ports.
5659 *
5660 *	RETURNS:
5661 *	Allocate ATA host on success, NULL on failure.
5662 *
5663 *	LOCKING:
5664 *	Inherited from calling layer (may sleep).
5665 */
5666struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5667				      const struct ata_port_info * const * ppi,
5668				      int n_ports)
5669{
5670	const struct ata_port_info *pi = &ata_dummy_port_info;
5671	struct ata_host *host;
5672	int i, j;
5673
5674	host = ata_host_alloc(dev, n_ports);
5675	if (!host)
5676		return NULL;
5677
5678	for (i = 0, j = 0; i < host->n_ports; i++) {
5679		struct ata_port *ap = host->ports[i];
5680
5681		if (ppi[j])
5682			pi = ppi[j++];
5683
5684		ap->pio_mask = pi->pio_mask;
5685		ap->mwdma_mask = pi->mwdma_mask;
5686		ap->udma_mask = pi->udma_mask;
5687		ap->flags |= pi->flags;
5688		ap->link.flags |= pi->link_flags;
5689		ap->ops = pi->port_ops;
5690
5691		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5692			host->ops = pi->port_ops;
5693	}
5694
5695	return host;
5696}
5697EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5698
5699static void ata_host_stop(struct device *gendev, void *res)
5700{
5701	struct ata_host *host = dev_get_drvdata(gendev);
5702	int i;
5703
5704	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5705
5706	for (i = 0; i < host->n_ports; i++) {
5707		struct ata_port *ap = host->ports[i];
5708
5709		if (ap->ops->port_stop)
5710			ap->ops->port_stop(ap);
5711	}
5712
5713	if (host->ops->host_stop)
5714		host->ops->host_stop(host);
5715}
5716
5717/**
5718 *	ata_finalize_port_ops - finalize ata_port_operations
5719 *	@ops: ata_port_operations to finalize
5720 *
5721 *	An ata_port_operations can inherit from another ops and that
5722 *	ops can again inherit from another.  This can go on as many
5723 *	times as necessary as long as there is no loop in the
5724 *	inheritance chain.
5725 *
5726 *	Ops tables are finalized when the host is started.  NULL or
5727 *	unspecified entries are inherited from the closet ancestor
5728 *	which has the method and the entry is populated with it.
5729 *	After finalization, the ops table directly points to all the
5730 *	methods and ->inherits is no longer necessary and cleared.
5731 *
5732 *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5733 *
5734 *	LOCKING:
5735 *	None.
5736 */
5737static void ata_finalize_port_ops(struct ata_port_operations *ops)
5738{
5739	static DEFINE_SPINLOCK(lock);
5740	const struct ata_port_operations *cur;
5741	void **begin = (void **)ops;
5742	void **end = (void **)&ops->inherits;
5743	void **pp;
5744
5745	if (!ops || !ops->inherits)
5746		return;
5747
5748	spin_lock(&lock);
5749
5750	for (cur = ops->inherits; cur; cur = cur->inherits) {
5751		void **inherit = (void **)cur;
5752
5753		for (pp = begin; pp < end; pp++, inherit++)
5754			if (!*pp)
5755				*pp = *inherit;
5756	}
5757
5758	for (pp = begin; pp < end; pp++)
5759		if (IS_ERR(*pp))
5760			*pp = NULL;
5761
5762	ops->inherits = NULL;
5763
5764	spin_unlock(&lock);
5765}
5766
5767/**
5768 *	ata_host_start - start and freeze ports of an ATA host
5769 *	@host: ATA host to start ports for
5770 *
5771 *	Start and then freeze ports of @host.  Started status is
5772 *	recorded in host->flags, so this function can be called
5773 *	multiple times.  Ports are guaranteed to get started only
5774 *	once.  If host->ops is not initialized yet, it is set to the
5775 *	first non-dummy port ops.
5776 *
5777 *	LOCKING:
5778 *	Inherited from calling layer (may sleep).
5779 *
5780 *	RETURNS:
5781 *	0 if all ports are started successfully, -errno otherwise.
5782 */
5783int ata_host_start(struct ata_host *host)
5784{
5785	int have_stop = 0;
5786	void *start_dr = NULL;
5787	int i, rc;
5788
5789	if (host->flags & ATA_HOST_STARTED)
5790		return 0;
5791
5792	ata_finalize_port_ops(host->ops);
5793
5794	for (i = 0; i < host->n_ports; i++) {
5795		struct ata_port *ap = host->ports[i];
5796
5797		ata_finalize_port_ops(ap->ops);
5798
5799		if (!host->ops && !ata_port_is_dummy(ap))
5800			host->ops = ap->ops;
5801
5802		if (ap->ops->port_stop)
5803			have_stop = 1;
5804	}
5805
5806	if (host->ops && host->ops->host_stop)
5807		have_stop = 1;
5808
5809	if (have_stop) {
5810		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5811		if (!start_dr)
5812			return -ENOMEM;
5813	}
5814
5815	for (i = 0; i < host->n_ports; i++) {
5816		struct ata_port *ap = host->ports[i];
5817
5818		if (ap->ops->port_start) {
5819			rc = ap->ops->port_start(ap);
5820			if (rc) {
5821				if (rc != -ENODEV)
5822					dev_err(host->dev,
5823						"failed to start port %d (errno=%d)\n",
5824						i, rc);
5825				goto err_out;
5826			}
5827		}
5828		ata_eh_freeze_port(ap);
5829	}
5830
5831	if (start_dr)
5832		devres_add(host->dev, start_dr);
5833	host->flags |= ATA_HOST_STARTED;
5834	return 0;
5835
5836 err_out:
5837	while (--i >= 0) {
5838		struct ata_port *ap = host->ports[i];
5839
5840		if (ap->ops->port_stop)
5841			ap->ops->port_stop(ap);
5842	}
5843	devres_free(start_dr);
5844	return rc;
5845}
5846EXPORT_SYMBOL_GPL(ata_host_start);
5847
5848/**
5849 *	ata_host_init - Initialize a host struct for sas (ipr, libsas)
5850 *	@host:	host to initialize
5851 *	@dev:	device host is attached to
 
5852 *	@ops:	port_ops
5853 *
 
 
 
5854 */
 
5855void ata_host_init(struct ata_host *host, struct device *dev,
5856		   struct ata_port_operations *ops)
5857{
5858	spin_lock_init(&host->lock);
5859	mutex_init(&host->eh_mutex);
5860	host->n_tags = ATA_MAX_QUEUE;
5861	host->dev = dev;
 
5862	host->ops = ops;
5863	kref_init(&host->kref);
5864}
5865EXPORT_SYMBOL_GPL(ata_host_init);
5866
5867void ata_port_probe(struct ata_port *ap)
5868{
5869	struct ata_eh_info *ehi = &ap->link.eh_info;
5870	unsigned long flags;
 
 
 
 
 
 
 
5871
5872	/* kick EH for boot probing */
5873	spin_lock_irqsave(ap->lock, flags);
 
5874
5875	ehi->probe_mask |= ATA_ALL_DEVICES;
5876	ehi->action |= ATA_EH_RESET;
5877	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5878
5879	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5880	ap->pflags |= ATA_PFLAG_LOADING;
5881	ata_port_schedule_eh(ap);
5882
5883	spin_unlock_irqrestore(ap->lock, flags);
 
 
 
 
 
 
 
5884}
5885EXPORT_SYMBOL_GPL(ata_port_probe);
5886
5887static void async_port_probe(void *data, async_cookie_t cookie)
5888{
5889	struct ata_port *ap = data;
5890
5891	/*
5892	 * If we're not allowed to scan this host in parallel,
5893	 * we need to wait until all previous scans have completed
5894	 * before going further.
5895	 * Jeff Garzik says this is only within a controller, so we
5896	 * don't need to wait for port 0, only for later ports.
5897	 */
5898	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5899		async_synchronize_cookie(cookie);
5900
5901	ata_port_probe(ap);
5902	ata_port_wait_eh(ap);
5903
5904	/* in order to keep device order, we need to synchronize at this point */
5905	async_synchronize_cookie(cookie);
5906
5907	ata_scsi_scan_host(ap, 1);
5908}
5909
5910/**
5911 *	ata_host_register - register initialized ATA host
5912 *	@host: ATA host to register
5913 *	@sht: template for SCSI host
5914 *
5915 *	Register initialized ATA host.  @host is allocated using
5916 *	ata_host_alloc() and fully initialized by LLD.  This function
5917 *	starts ports, registers @host with ATA and SCSI layers and
5918 *	probe registered devices.
5919 *
5920 *	LOCKING:
5921 *	Inherited from calling layer (may sleep).
5922 *
5923 *	RETURNS:
5924 *	0 on success, -errno otherwise.
5925 */
5926int ata_host_register(struct ata_host *host, const struct scsi_host_template *sht)
5927{
5928	int i, rc;
5929
5930	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
5931
5932	/* host must have been started */
5933	if (!(host->flags & ATA_HOST_STARTED)) {
5934		dev_err(host->dev, "BUG: trying to register unstarted host\n");
5935		WARN_ON(1);
5936		return -EINVAL;
5937	}
5938
5939	/* Blow away unused ports.  This happens when LLD can't
5940	 * determine the exact number of ports to allocate at
5941	 * allocation time.
5942	 */
5943	for (i = host->n_ports; host->ports[i]; i++)
5944		kfree(host->ports[i]);
5945
5946	/* give ports names and add SCSI hosts */
5947	for (i = 0; i < host->n_ports; i++) {
5948		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
5949		host->ports[i]->local_port_no = i + 1;
5950	}
5951
5952	/* Create associated sysfs transport objects  */
5953	for (i = 0; i < host->n_ports; i++) {
5954		rc = ata_tport_add(host->dev,host->ports[i]);
5955		if (rc) {
5956			goto err_tadd;
5957		}
5958	}
5959
5960	rc = ata_scsi_add_hosts(host, sht);
5961	if (rc)
5962		goto err_tadd;
5963
 
 
 
5964	/* set cable, sata_spd_limit and report */
5965	for (i = 0; i < host->n_ports; i++) {
5966		struct ata_port *ap = host->ports[i];
5967		unsigned int xfer_mask;
5968
5969		/* set SATA cable type if still unset */
5970		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5971			ap->cbl = ATA_CBL_SATA;
5972
5973		/* init sata_spd_limit to the current value */
5974		sata_link_init_spd(&ap->link);
5975		if (ap->slave_link)
5976			sata_link_init_spd(ap->slave_link);
5977
5978		/* print per-port info to dmesg */
5979		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5980					      ap->udma_mask);
5981
5982		if (!ata_port_is_dummy(ap)) {
5983			ata_port_info(ap, "%cATA max %s %s\n",
5984				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5985				      ata_mode_string(xfer_mask),
5986				      ap->link.eh_info.desc);
5987			ata_ehi_clear_desc(&ap->link.eh_info);
5988		} else
5989			ata_port_info(ap, "DUMMY\n");
5990	}
5991
5992	/* perform each probe asynchronously */
5993	for (i = 0; i < host->n_ports; i++) {
5994		struct ata_port *ap = host->ports[i];
5995		ap->cookie = async_schedule(async_port_probe, ap);
5996	}
5997
5998	return 0;
5999
6000 err_tadd:
6001	while (--i >= 0) {
6002		ata_tport_delete(host->ports[i]);
6003	}
6004	return rc;
6005
6006}
6007EXPORT_SYMBOL_GPL(ata_host_register);
6008
6009/**
6010 *	ata_host_activate - start host, request IRQ and register it
6011 *	@host: target ATA host
6012 *	@irq: IRQ to request
6013 *	@irq_handler: irq_handler used when requesting IRQ
6014 *	@irq_flags: irq_flags used when requesting IRQ
6015 *	@sht: scsi_host_template to use when registering the host
6016 *
6017 *	After allocating an ATA host and initializing it, most libata
6018 *	LLDs perform three steps to activate the host - start host,
6019 *	request IRQ and register it.  This helper takes necessary
6020 *	arguments and performs the three steps in one go.
6021 *
6022 *	An invalid IRQ skips the IRQ registration and expects the host to
6023 *	have set polling mode on the port. In this case, @irq_handler
6024 *	should be NULL.
6025 *
6026 *	LOCKING:
6027 *	Inherited from calling layer (may sleep).
6028 *
6029 *	RETURNS:
6030 *	0 on success, -errno otherwise.
6031 */
6032int ata_host_activate(struct ata_host *host, int irq,
6033		      irq_handler_t irq_handler, unsigned long irq_flags,
6034		      const struct scsi_host_template *sht)
6035{
6036	int i, rc;
6037	char *irq_desc;
6038
6039	rc = ata_host_start(host);
6040	if (rc)
6041		return rc;
6042
6043	/* Special case for polling mode */
6044	if (!irq) {
6045		WARN_ON(irq_handler);
6046		return ata_host_register(host, sht);
6047	}
6048
6049	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6050				  dev_driver_string(host->dev),
6051				  dev_name(host->dev));
6052	if (!irq_desc)
6053		return -ENOMEM;
6054
6055	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6056			      irq_desc, host);
6057	if (rc)
6058		return rc;
6059
6060	for (i = 0; i < host->n_ports; i++)
6061		ata_port_desc_misc(host->ports[i], irq);
6062
6063	rc = ata_host_register(host, sht);
6064	/* if failed, just free the IRQ and leave ports alone */
6065	if (rc)
6066		devm_free_irq(host->dev, irq, host);
6067
6068	return rc;
6069}
6070EXPORT_SYMBOL_GPL(ata_host_activate);
6071
6072/**
6073 *	ata_port_detach - Detach ATA port in preparation of device removal
6074 *	@ap: ATA port to be detached
6075 *
6076 *	Detach all ATA devices and the associated SCSI devices of @ap;
6077 *	then, remove the associated SCSI host.  @ap is guaranteed to
6078 *	be quiescent on return from this function.
6079 *
6080 *	LOCKING:
6081 *	Kernel thread context (may sleep).
6082 */
6083static void ata_port_detach(struct ata_port *ap)
6084{
6085	unsigned long flags;
6086	struct ata_link *link;
6087	struct ata_device *dev;
6088
6089	/* Ensure ata_port probe has completed */
6090	async_synchronize_cookie(ap->cookie + 1);
6091
6092	/* Wait for any ongoing EH */
6093	ata_port_wait_eh(ap);
6094
6095	mutex_lock(&ap->scsi_scan_mutex);
6096	spin_lock_irqsave(ap->lock, flags);
6097
6098	/* Remove scsi devices */
6099	ata_for_each_link(link, ap, HOST_FIRST) {
6100		ata_for_each_dev(dev, link, ALL) {
6101			if (dev->sdev) {
6102				spin_unlock_irqrestore(ap->lock, flags);
6103				scsi_remove_device(dev->sdev);
6104				spin_lock_irqsave(ap->lock, flags);
6105				dev->sdev = NULL;
6106			}
6107		}
6108	}
6109
6110	/* Tell EH to disable all devices */
6111	ap->pflags |= ATA_PFLAG_UNLOADING;
6112	ata_port_schedule_eh(ap);
6113
6114	spin_unlock_irqrestore(ap->lock, flags);
6115	mutex_unlock(&ap->scsi_scan_mutex);
6116
6117	/* wait till EH commits suicide */
6118	ata_port_wait_eh(ap);
6119
6120	/* it better be dead now */
6121	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6122
6123	cancel_delayed_work_sync(&ap->hotplug_task);
6124	cancel_delayed_work_sync(&ap->scsi_rescan_task);
6125
6126	/* clean up zpodd on port removal */
6127	ata_for_each_link(link, ap, HOST_FIRST) {
6128		ata_for_each_dev(dev, link, ALL) {
6129			if (zpodd_dev_enabled(dev))
6130				zpodd_exit(dev);
6131		}
6132	}
6133	if (ap->pmp_link) {
6134		int i;
6135		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6136			ata_tlink_delete(&ap->pmp_link[i]);
6137	}
 
 
6138	/* remove the associated SCSI host */
6139	scsi_remove_host(ap->scsi_host);
6140	ata_tport_delete(ap);
6141}
6142
6143/**
6144 *	ata_host_detach - Detach all ports of an ATA host
6145 *	@host: Host to detach
6146 *
6147 *	Detach all ports of @host.
6148 *
6149 *	LOCKING:
6150 *	Kernel thread context (may sleep).
6151 */
6152void ata_host_detach(struct ata_host *host)
6153{
6154	int i;
6155
6156	for (i = 0; i < host->n_ports; i++)
6157		ata_port_detach(host->ports[i]);
6158
6159	/* the host is dead now, dissociate ACPI */
6160	ata_acpi_dissociate(host);
6161}
6162EXPORT_SYMBOL_GPL(ata_host_detach);
6163
6164#ifdef CONFIG_PCI
6165
6166/**
6167 *	ata_pci_remove_one - PCI layer callback for device removal
6168 *	@pdev: PCI device that was removed
6169 *
6170 *	PCI layer indicates to libata via this hook that hot-unplug or
6171 *	module unload event has occurred.  Detach all ports.  Resource
6172 *	release is handled via devres.
6173 *
6174 *	LOCKING:
6175 *	Inherited from PCI layer (may sleep).
6176 */
6177void ata_pci_remove_one(struct pci_dev *pdev)
6178{
6179	struct ata_host *host = pci_get_drvdata(pdev);
 
6180
6181	ata_host_detach(host);
6182}
6183EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6184
6185void ata_pci_shutdown_one(struct pci_dev *pdev)
6186{
6187	struct ata_host *host = pci_get_drvdata(pdev);
6188	int i;
6189
6190	for (i = 0; i < host->n_ports; i++) {
6191		struct ata_port *ap = host->ports[i];
6192
6193		ap->pflags |= ATA_PFLAG_FROZEN;
6194
6195		/* Disable port interrupts */
6196		if (ap->ops->freeze)
6197			ap->ops->freeze(ap);
6198
6199		/* Stop the port DMA engines */
6200		if (ap->ops->port_stop)
6201			ap->ops->port_stop(ap);
6202	}
6203}
6204EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
6205
6206/* move to PCI subsystem */
6207int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6208{
6209	unsigned long tmp = 0;
6210
6211	switch (bits->width) {
6212	case 1: {
6213		u8 tmp8 = 0;
6214		pci_read_config_byte(pdev, bits->reg, &tmp8);
6215		tmp = tmp8;
6216		break;
6217	}
6218	case 2: {
6219		u16 tmp16 = 0;
6220		pci_read_config_word(pdev, bits->reg, &tmp16);
6221		tmp = tmp16;
6222		break;
6223	}
6224	case 4: {
6225		u32 tmp32 = 0;
6226		pci_read_config_dword(pdev, bits->reg, &tmp32);
6227		tmp = tmp32;
6228		break;
6229	}
6230
6231	default:
6232		return -EINVAL;
6233	}
6234
6235	tmp &= bits->mask;
6236
6237	return (tmp == bits->val) ? 1 : 0;
6238}
6239EXPORT_SYMBOL_GPL(pci_test_config_bits);
6240
6241#ifdef CONFIG_PM
6242void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6243{
6244	pci_save_state(pdev);
6245	pci_disable_device(pdev);
6246
6247	if (mesg.event & PM_EVENT_SLEEP)
6248		pci_set_power_state(pdev, PCI_D3hot);
6249}
6250EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6251
6252int ata_pci_device_do_resume(struct pci_dev *pdev)
6253{
6254	int rc;
6255
6256	pci_set_power_state(pdev, PCI_D0);
6257	pci_restore_state(pdev);
6258
6259	rc = pcim_enable_device(pdev);
6260	if (rc) {
6261		dev_err(&pdev->dev,
6262			"failed to enable device after resume (%d)\n", rc);
6263		return rc;
6264	}
6265
6266	pci_set_master(pdev);
6267	return 0;
6268}
6269EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6270
6271int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6272{
6273	struct ata_host *host = pci_get_drvdata(pdev);
 
6274
6275	ata_host_suspend(host, mesg);
 
 
6276
6277	ata_pci_device_do_suspend(pdev, mesg);
6278
6279	return 0;
6280}
6281EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6282
6283int ata_pci_device_resume(struct pci_dev *pdev)
6284{
6285	struct ata_host *host = pci_get_drvdata(pdev);
6286	int rc;
6287
6288	rc = ata_pci_device_do_resume(pdev);
6289	if (rc == 0)
6290		ata_host_resume(host);
6291	return rc;
6292}
6293EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6294#endif /* CONFIG_PM */
 
6295#endif /* CONFIG_PCI */
6296
6297/**
6298 *	ata_platform_remove_one - Platform layer callback for device removal
6299 *	@pdev: Platform device that was removed
6300 *
6301 *	Platform layer indicates to libata via this hook that hot-unplug or
6302 *	module unload event has occurred.  Detach all ports.  Resource
6303 *	release is handled via devres.
6304 *
6305 *	LOCKING:
6306 *	Inherited from platform layer (may sleep).
6307 */
6308void ata_platform_remove_one(struct platform_device *pdev)
6309{
6310	struct ata_host *host = platform_get_drvdata(pdev);
6311
6312	ata_host_detach(host);
6313}
6314EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6315
6316#ifdef CONFIG_ATA_FORCE
6317
6318#define force_cbl(name, flag)				\
6319	{ #name,	.cbl		= (flag) }
6320
6321#define force_spd_limit(spd, val)			\
6322	{ #spd,	.spd_limit		= (val) }
6323
6324#define force_xfer(mode, shift)				\
6325	{ #mode,	.xfer_mask	= (1UL << (shift)) }
6326
6327#define force_lflag_on(name, flags)			\
6328	{ #name,	.lflags_on	= (flags) }
6329
6330#define force_lflag_onoff(name, flags)			\
6331	{ "no" #name,	.lflags_on	= (flags) },	\
6332	{ #name,	.lflags_off	= (flags) }
6333
6334#define force_horkage_on(name, flag)			\
6335	{ #name,	.horkage_on	= (flag) }
6336
6337#define force_horkage_onoff(name, flag)			\
6338	{ "no" #name,	.horkage_on	= (flag) },	\
6339	{ #name,	.horkage_off	= (flag) }
6340
6341static const struct ata_force_param force_tbl[] __initconst = {
6342	force_cbl(40c,			ATA_CBL_PATA40),
6343	force_cbl(80c,			ATA_CBL_PATA80),
6344	force_cbl(short40c,		ATA_CBL_PATA40_SHORT),
6345	force_cbl(unk,			ATA_CBL_PATA_UNK),
6346	force_cbl(ign,			ATA_CBL_PATA_IGN),
6347	force_cbl(sata,			ATA_CBL_SATA),
6348
6349	force_spd_limit(1.5Gbps,	1),
6350	force_spd_limit(3.0Gbps,	2),
6351
6352	force_xfer(pio0,		ATA_SHIFT_PIO + 0),
6353	force_xfer(pio1,		ATA_SHIFT_PIO + 1),
6354	force_xfer(pio2,		ATA_SHIFT_PIO + 2),
6355	force_xfer(pio3,		ATA_SHIFT_PIO + 3),
6356	force_xfer(pio4,		ATA_SHIFT_PIO + 4),
6357	force_xfer(pio5,		ATA_SHIFT_PIO + 5),
6358	force_xfer(pio6,		ATA_SHIFT_PIO + 6),
6359	force_xfer(mwdma0,		ATA_SHIFT_MWDMA + 0),
6360	force_xfer(mwdma1,		ATA_SHIFT_MWDMA + 1),
6361	force_xfer(mwdma2,		ATA_SHIFT_MWDMA + 2),
6362	force_xfer(mwdma3,		ATA_SHIFT_MWDMA + 3),
6363	force_xfer(mwdma4,		ATA_SHIFT_MWDMA + 4),
6364	force_xfer(udma0,		ATA_SHIFT_UDMA + 0),
6365	force_xfer(udma16,		ATA_SHIFT_UDMA + 0),
6366	force_xfer(udma/16,		ATA_SHIFT_UDMA + 0),
6367	force_xfer(udma1,		ATA_SHIFT_UDMA + 1),
6368	force_xfer(udma25,		ATA_SHIFT_UDMA + 1),
6369	force_xfer(udma/25,		ATA_SHIFT_UDMA + 1),
6370	force_xfer(udma2,		ATA_SHIFT_UDMA + 2),
6371	force_xfer(udma33,		ATA_SHIFT_UDMA + 2),
6372	force_xfer(udma/33,		ATA_SHIFT_UDMA + 2),
6373	force_xfer(udma3,		ATA_SHIFT_UDMA + 3),
6374	force_xfer(udma44,		ATA_SHIFT_UDMA + 3),
6375	force_xfer(udma/44,		ATA_SHIFT_UDMA + 3),
6376	force_xfer(udma4,		ATA_SHIFT_UDMA + 4),
6377	force_xfer(udma66,		ATA_SHIFT_UDMA + 4),
6378	force_xfer(udma/66,		ATA_SHIFT_UDMA + 4),
6379	force_xfer(udma5,		ATA_SHIFT_UDMA + 5),
6380	force_xfer(udma100,		ATA_SHIFT_UDMA + 5),
6381	force_xfer(udma/100,		ATA_SHIFT_UDMA + 5),
6382	force_xfer(udma6,		ATA_SHIFT_UDMA + 6),
6383	force_xfer(udma133,		ATA_SHIFT_UDMA + 6),
6384	force_xfer(udma/133,		ATA_SHIFT_UDMA + 6),
6385	force_xfer(udma7,		ATA_SHIFT_UDMA + 7),
6386
6387	force_lflag_on(nohrst,		ATA_LFLAG_NO_HRST),
6388	force_lflag_on(nosrst,		ATA_LFLAG_NO_SRST),
6389	force_lflag_on(norst,		ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
6390	force_lflag_on(rstonce,		ATA_LFLAG_RST_ONCE),
6391	force_lflag_onoff(dbdelay,	ATA_LFLAG_NO_DEBOUNCE_DELAY),
6392
6393	force_horkage_onoff(ncq,	ATA_HORKAGE_NONCQ),
6394	force_horkage_onoff(ncqtrim,	ATA_HORKAGE_NO_NCQ_TRIM),
6395	force_horkage_onoff(ncqati,	ATA_HORKAGE_NO_NCQ_ON_ATI),
6396
6397	force_horkage_onoff(trim,	ATA_HORKAGE_NOTRIM),
6398	force_horkage_on(trim_zero,	ATA_HORKAGE_ZERO_AFTER_TRIM),
6399	force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
6400
6401	force_horkage_onoff(dma,	ATA_HORKAGE_NODMA),
6402	force_horkage_on(atapi_dmadir,	ATA_HORKAGE_ATAPI_DMADIR),
6403	force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
6404
6405	force_horkage_onoff(dmalog,	ATA_HORKAGE_NO_DMA_LOG),
6406	force_horkage_onoff(iddevlog,	ATA_HORKAGE_NO_ID_DEV_LOG),
6407	force_horkage_onoff(logdir,	ATA_HORKAGE_NO_LOG_DIR),
6408
6409	force_horkage_on(max_sec_128,	ATA_HORKAGE_MAX_SEC_128),
6410	force_horkage_on(max_sec_1024,	ATA_HORKAGE_MAX_SEC_1024),
6411	force_horkage_on(max_sec_lba48,	ATA_HORKAGE_MAX_SEC_LBA48),
6412
6413	force_horkage_onoff(lpm,	ATA_HORKAGE_NOLPM),
6414	force_horkage_onoff(setxfer,	ATA_HORKAGE_NOSETXFER),
6415	force_horkage_on(dump_id,	ATA_HORKAGE_DUMP_ID),
6416	force_horkage_onoff(fua,	ATA_HORKAGE_NO_FUA),
6417
6418	force_horkage_on(disable,	ATA_HORKAGE_DISABLE),
6419};
6420
6421static int __init ata_parse_force_one(char **cur,
6422				      struct ata_force_ent *force_ent,
6423				      const char **reason)
6424{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6425	char *start = *cur, *p = *cur;
6426	char *id, *val, *endp;
6427	const struct ata_force_param *match_fp = NULL;
6428	int nr_matches = 0, i;
6429
6430	/* find where this param ends and update *cur */
6431	while (*p != '\0' && *p != ',')
6432		p++;
6433
6434	if (*p == '\0')
6435		*cur = p;
6436	else
6437		*cur = p + 1;
6438
6439	*p = '\0';
6440
6441	/* parse */
6442	p = strchr(start, ':');
6443	if (!p) {
6444		val = strstrip(start);
6445		goto parse_val;
6446	}
6447	*p = '\0';
6448
6449	id = strstrip(start);
6450	val = strstrip(p + 1);
6451
6452	/* parse id */
6453	p = strchr(id, '.');
6454	if (p) {
6455		*p++ = '\0';
6456		force_ent->device = simple_strtoul(p, &endp, 10);
6457		if (p == endp || *endp != '\0') {
6458			*reason = "invalid device";
6459			return -EINVAL;
6460		}
6461	}
6462
6463	force_ent->port = simple_strtoul(id, &endp, 10);
6464	if (id == endp || *endp != '\0') {
6465		*reason = "invalid port/link";
6466		return -EINVAL;
6467	}
6468
6469 parse_val:
6470	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6471	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6472		const struct ata_force_param *fp = &force_tbl[i];
6473
6474		if (strncasecmp(val, fp->name, strlen(val)))
6475			continue;
6476
6477		nr_matches++;
6478		match_fp = fp;
6479
6480		if (strcasecmp(val, fp->name) == 0) {
6481			nr_matches = 1;
6482			break;
6483		}
6484	}
6485
6486	if (!nr_matches) {
6487		*reason = "unknown value";
6488		return -EINVAL;
6489	}
6490	if (nr_matches > 1) {
6491		*reason = "ambiguous value";
6492		return -EINVAL;
6493	}
6494
6495	force_ent->param = *match_fp;
6496
6497	return 0;
6498}
6499
6500static void __init ata_parse_force_param(void)
6501{
6502	int idx = 0, size = 1;
6503	int last_port = -1, last_device = -1;
6504	char *p, *cur, *next;
6505
6506	/* Calculate maximum number of params and allocate ata_force_tbl */
6507	for (p = ata_force_param_buf; *p; p++)
6508		if (*p == ',')
6509			size++;
6510
6511	ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
6512	if (!ata_force_tbl) {
6513		printk(KERN_WARNING "ata: failed to extend force table, "
6514		       "libata.force ignored\n");
6515		return;
6516	}
6517
6518	/* parse and populate the table */
6519	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6520		const char *reason = "";
6521		struct ata_force_ent te = { .port = -1, .device = -1 };
6522
6523		next = cur;
6524		if (ata_parse_force_one(&next, &te, &reason)) {
6525			printk(KERN_WARNING "ata: failed to parse force "
6526			       "parameter \"%s\" (%s)\n",
6527			       cur, reason);
6528			continue;
6529		}
6530
6531		if (te.port == -1) {
6532			te.port = last_port;
6533			te.device = last_device;
6534		}
6535
6536		ata_force_tbl[idx++] = te;
6537
6538		last_port = te.port;
6539		last_device = te.device;
6540	}
6541
6542	ata_force_tbl_size = idx;
6543}
6544
6545static void ata_free_force_param(void)
6546{
6547	kfree(ata_force_tbl);
6548}
6549#else
6550static inline void ata_parse_force_param(void) { }
6551static inline void ata_free_force_param(void) { }
6552#endif
6553
6554static int __init ata_init(void)
6555{
6556	int rc;
6557
6558	ata_parse_force_param();
6559
6560	rc = ata_sff_init();
6561	if (rc) {
6562		ata_free_force_param();
6563		return rc;
6564	}
6565
6566	libata_transport_init();
6567	ata_scsi_transport_template = ata_attach_transport();
6568	if (!ata_scsi_transport_template) {
6569		ata_sff_exit();
6570		rc = -ENOMEM;
6571		goto err_out;
6572	}
6573
6574	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6575	return 0;
6576
6577err_out:
6578	return rc;
6579}
6580
6581static void __exit ata_exit(void)
6582{
6583	ata_release_transport(ata_scsi_transport_template);
6584	libata_transport_exit();
6585	ata_sff_exit();
6586	ata_free_force_param();
6587}
6588
6589subsys_initcall(ata_init);
6590module_exit(ata_exit);
6591
6592static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6593
6594int ata_ratelimit(void)
6595{
6596	return __ratelimit(&ratelimit);
6597}
6598EXPORT_SYMBOL_GPL(ata_ratelimit);
6599
6600/**
6601 *	ata_msleep - ATA EH owner aware msleep
6602 *	@ap: ATA port to attribute the sleep to
6603 *	@msecs: duration to sleep in milliseconds
6604 *
6605 *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6606 *	ownership is released before going to sleep and reacquired
6607 *	after the sleep is complete.  IOW, other ports sharing the
6608 *	@ap->host will be allowed to own the EH while this task is
6609 *	sleeping.
6610 *
6611 *	LOCKING:
6612 *	Might sleep.
6613 */
6614void ata_msleep(struct ata_port *ap, unsigned int msecs)
6615{
6616	bool owns_eh = ap && ap->host->eh_owner == current;
6617
6618	if (owns_eh)
6619		ata_eh_release(ap);
6620
6621	if (msecs < 20) {
6622		unsigned long usecs = msecs * USEC_PER_MSEC;
6623		usleep_range(usecs, usecs + 50);
6624	} else {
6625		msleep(msecs);
6626	}
6627
6628	if (owns_eh)
6629		ata_eh_acquire(ap);
6630}
6631EXPORT_SYMBOL_GPL(ata_msleep);
6632
6633/**
6634 *	ata_wait_register - wait until register value changes
6635 *	@ap: ATA port to wait register for, can be NULL
6636 *	@reg: IO-mapped register
6637 *	@mask: Mask to apply to read register value
6638 *	@val: Wait condition
6639 *	@interval: polling interval in milliseconds
6640 *	@timeout: timeout in milliseconds
6641 *
6642 *	Waiting for some bits of register to change is a common
6643 *	operation for ATA controllers.  This function reads 32bit LE
6644 *	IO-mapped register @reg and tests for the following condition.
6645 *
6646 *	(*@reg & mask) != val
6647 *
6648 *	If the condition is met, it returns; otherwise, the process is
6649 *	repeated after @interval_msec until timeout.
6650 *
6651 *	LOCKING:
6652 *	Kernel thread context (may sleep)
6653 *
6654 *	RETURNS:
6655 *	The final register value.
6656 */
6657u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6658		      unsigned int interval, unsigned int timeout)
6659{
6660	unsigned long deadline;
6661	u32 tmp;
6662
6663	tmp = ioread32(reg);
6664
6665	/* Calculate timeout _after_ the first read to make sure
6666	 * preceding writes reach the controller before starting to
6667	 * eat away the timeout.
6668	 */
6669	deadline = ata_deadline(jiffies, timeout);
6670
6671	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6672		ata_msleep(ap, interval);
6673		tmp = ioread32(reg);
6674	}
6675
6676	return tmp;
6677}
6678EXPORT_SYMBOL_GPL(ata_wait_register);
6679
6680/*
6681 * Dummy port_ops
6682 */
6683static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6684{
6685	return AC_ERR_SYSTEM;
6686}
6687
6688static void ata_dummy_error_handler(struct ata_port *ap)
6689{
6690	/* truly dummy */
6691}
6692
6693struct ata_port_operations ata_dummy_port_ops = {
6694	.qc_prep		= ata_noop_qc_prep,
6695	.qc_issue		= ata_dummy_qc_issue,
6696	.error_handler		= ata_dummy_error_handler,
6697	.sched_eh		= ata_std_sched_eh,
6698	.end_eh			= ata_std_end_eh,
6699};
6700EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6701
6702const struct ata_port_info ata_dummy_port_info = {
6703	.port_ops		= &ata_dummy_port_ops,
6704};
6705EXPORT_SYMBOL_GPL(ata_dummy_port_info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6706
6707void ata_print_version(const struct device *dev, const char *version)
6708{
6709	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6710}
6711EXPORT_SYMBOL(ata_print_version);
6712
6713EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
6714EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
6715EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
6716EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
6717EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);