Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  SATA specific part of ATA helper library
   4 *
   5 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
   6 *  Copyright 2003-2004 Jeff Garzik
   7 *  Copyright 2006 Tejun Heo <htejun@gmail.com>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/module.h>
  12#include <scsi/scsi_cmnd.h>
  13#include <scsi/scsi_device.h>
  14#include <linux/libata.h>
  15
  16#include "libata.h"
  17#include "libata-transport.h"
  18
  19/* debounce timing parameters in msecs { interval, duration, timeout } */
  20const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
  21EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
  22const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
  23EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
  24const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
  25EXPORT_SYMBOL_GPL(sata_deb_timing_long);
  26
  27/**
  28 *	sata_scr_valid - test whether SCRs are accessible
  29 *	@link: ATA link to test SCR accessibility for
  30 *
  31 *	Test whether SCRs are accessible for @link.
  32 *
  33 *	LOCKING:
  34 *	None.
  35 *
  36 *	RETURNS:
  37 *	1 if SCRs are accessible, 0 otherwise.
  38 */
  39int sata_scr_valid(struct ata_link *link)
  40{
  41	struct ata_port *ap = link->ap;
  42
  43	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
  44}
  45EXPORT_SYMBOL_GPL(sata_scr_valid);
  46
  47/**
  48 *	sata_scr_read - read SCR register of the specified port
  49 *	@link: ATA link to read SCR for
  50 *	@reg: SCR to read
  51 *	@val: Place to store read value
  52 *
  53 *	Read SCR register @reg of @link into *@val.  This function is
  54 *	guaranteed to succeed if @link is ap->link, the cable type of
  55 *	the port is SATA and the port implements ->scr_read.
  56 *
  57 *	LOCKING:
  58 *	None if @link is ap->link.  Kernel thread context otherwise.
  59 *
  60 *	RETURNS:
  61 *	0 on success, negative errno on failure.
  62 */
  63int sata_scr_read(struct ata_link *link, int reg, u32 *val)
  64{
  65	if (ata_is_host_link(link)) {
  66		if (sata_scr_valid(link))
  67			return link->ap->ops->scr_read(link, reg, val);
  68		return -EOPNOTSUPP;
  69	}
  70
  71	return sata_pmp_scr_read(link, reg, val);
  72}
  73EXPORT_SYMBOL_GPL(sata_scr_read);
  74
  75/**
  76 *	sata_scr_write - write SCR register of the specified port
  77 *	@link: ATA link to write SCR for
  78 *	@reg: SCR to write
  79 *	@val: value to write
  80 *
  81 *	Write @val to SCR register @reg of @link.  This function is
  82 *	guaranteed to succeed if @link is ap->link, the cable type of
  83 *	the port is SATA and the port implements ->scr_read.
  84 *
  85 *	LOCKING:
  86 *	None if @link is ap->link.  Kernel thread context otherwise.
  87 *
  88 *	RETURNS:
  89 *	0 on success, negative errno on failure.
  90 */
  91int sata_scr_write(struct ata_link *link, int reg, u32 val)
  92{
  93	if (ata_is_host_link(link)) {
  94		if (sata_scr_valid(link))
  95			return link->ap->ops->scr_write(link, reg, val);
  96		return -EOPNOTSUPP;
  97	}
  98
  99	return sata_pmp_scr_write(link, reg, val);
 100}
 101EXPORT_SYMBOL_GPL(sata_scr_write);
 102
 103/**
 104 *	sata_scr_write_flush - write SCR register of the specified port and flush
 105 *	@link: ATA link to write SCR for
 106 *	@reg: SCR to write
 107 *	@val: value to write
 108 *
 109 *	This function is identical to sata_scr_write() except that this
 110 *	function performs flush after writing to the register.
 111 *
 112 *	LOCKING:
 113 *	None if @link is ap->link.  Kernel thread context otherwise.
 114 *
 115 *	RETURNS:
 116 *	0 on success, negative errno on failure.
 117 */
 118int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
 119{
 120	if (ata_is_host_link(link)) {
 121		int rc;
 122
 123		if (sata_scr_valid(link)) {
 124			rc = link->ap->ops->scr_write(link, reg, val);
 125			if (rc == 0)
 126				rc = link->ap->ops->scr_read(link, reg, &val);
 127			return rc;
 128		}
 129		return -EOPNOTSUPP;
 130	}
 131
 132	return sata_pmp_scr_write(link, reg, val);
 133}
 134EXPORT_SYMBOL_GPL(sata_scr_write_flush);
 135
 136/**
 137 *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
 138 *	@tf: Taskfile to convert
 139 *	@pmp: Port multiplier port
 140 *	@is_cmd: This FIS is for command
 141 *	@fis: Buffer into which data will output
 142 *
 143 *	Converts a standard ATA taskfile to a Serial ATA
 144 *	FIS structure (Register - Host to Device).
 145 *
 146 *	LOCKING:
 147 *	Inherited from caller.
 148 */
 149void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
 150{
 151	fis[0] = 0x27;			/* Register - Host to Device FIS */
 152	fis[1] = pmp & 0xf;		/* Port multiplier number*/
 153	if (is_cmd)
 154		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
 155
 156	fis[2] = tf->command;
 157	fis[3] = tf->feature;
 158
 159	fis[4] = tf->lbal;
 160	fis[5] = tf->lbam;
 161	fis[6] = tf->lbah;
 162	fis[7] = tf->device;
 163
 164	fis[8] = tf->hob_lbal;
 165	fis[9] = tf->hob_lbam;
 166	fis[10] = tf->hob_lbah;
 167	fis[11] = tf->hob_feature;
 168
 169	fis[12] = tf->nsect;
 170	fis[13] = tf->hob_nsect;
 171	fis[14] = 0;
 172	fis[15] = tf->ctl;
 173
 174	fis[16] = tf->auxiliary & 0xff;
 175	fis[17] = (tf->auxiliary >> 8) & 0xff;
 176	fis[18] = (tf->auxiliary >> 16) & 0xff;
 177	fis[19] = (tf->auxiliary >> 24) & 0xff;
 178}
 179EXPORT_SYMBOL_GPL(ata_tf_to_fis);
 180
 181/**
 182 *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
 183 *	@fis: Buffer from which data will be input
 184 *	@tf: Taskfile to output
 185 *
 186 *	Converts a serial ATA FIS structure to a standard ATA taskfile.
 187 *
 188 *	LOCKING:
 189 *	Inherited from caller.
 190 */
 191
 192void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
 193{
 194	tf->status	= fis[2];
 195	tf->error	= fis[3];
 196
 197	tf->lbal	= fis[4];
 198	tf->lbam	= fis[5];
 199	tf->lbah	= fis[6];
 200	tf->device	= fis[7];
 201
 202	tf->hob_lbal	= fis[8];
 203	tf->hob_lbam	= fis[9];
 204	tf->hob_lbah	= fis[10];
 205
 206	tf->nsect	= fis[12];
 207	tf->hob_nsect	= fis[13];
 208}
 209EXPORT_SYMBOL_GPL(ata_tf_from_fis);
 210
 211/**
 212 *	sata_link_debounce - debounce SATA phy status
 213 *	@link: ATA link to debounce SATA phy status for
 214 *	@params: timing parameters { interval, duration, timeout } in msec
 215 *	@deadline: deadline jiffies for the operation
 216 *
 217 *	Make sure SStatus of @link reaches stable state, determined by
 218 *	holding the same value where DET is not 1 for @duration polled
 219 *	every @interval, before @timeout.  Timeout constraints the
 220 *	beginning of the stable state.  Because DET gets stuck at 1 on
 221 *	some controllers after hot unplugging, this functions waits
 222 *	until timeout then returns 0 if DET is stable at 1.
 223 *
 224 *	@timeout is further limited by @deadline.  The sooner of the
 225 *	two is used.
 226 *
 227 *	LOCKING:
 228 *	Kernel thread context (may sleep)
 229 *
 230 *	RETURNS:
 231 *	0 on success, -errno on failure.
 232 */
 233int sata_link_debounce(struct ata_link *link, const unsigned long *params,
 234		       unsigned long deadline)
 235{
 236	unsigned long interval = params[0];
 237	unsigned long duration = params[1];
 238	unsigned long last_jiffies, t;
 239	u32 last, cur;
 240	int rc;
 241
 242	t = ata_deadline(jiffies, params[2]);
 243	if (time_before(t, deadline))
 244		deadline = t;
 245
 246	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
 247		return rc;
 248	cur &= 0xf;
 249
 250	last = cur;
 251	last_jiffies = jiffies;
 252
 253	while (1) {
 254		ata_msleep(link->ap, interval);
 255		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
 256			return rc;
 257		cur &= 0xf;
 258
 259		/* DET stable? */
 260		if (cur == last) {
 261			if (cur == 1 && time_before(jiffies, deadline))
 262				continue;
 263			if (time_after(jiffies,
 264				       ata_deadline(last_jiffies, duration)))
 265				return 0;
 266			continue;
 267		}
 268
 269		/* unstable, start over */
 270		last = cur;
 271		last_jiffies = jiffies;
 272
 273		/* Check deadline.  If debouncing failed, return
 274		 * -EPIPE to tell upper layer to lower link speed.
 275		 */
 276		if (time_after(jiffies, deadline))
 277			return -EPIPE;
 278	}
 279}
 280EXPORT_SYMBOL_GPL(sata_link_debounce);
 281
 282/**
 283 *	sata_link_resume - resume SATA link
 284 *	@link: ATA link to resume SATA
 285 *	@params: timing parameters { interval, duration, timeout } in msec
 286 *	@deadline: deadline jiffies for the operation
 287 *
 288 *	Resume SATA phy @link and debounce it.
 289 *
 290 *	LOCKING:
 291 *	Kernel thread context (may sleep)
 292 *
 293 *	RETURNS:
 294 *	0 on success, -errno on failure.
 295 */
 296int sata_link_resume(struct ata_link *link, const unsigned long *params,
 297		     unsigned long deadline)
 298{
 299	int tries = ATA_LINK_RESUME_TRIES;
 300	u32 scontrol, serror;
 301	int rc;
 302
 303	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
 304		return rc;
 305
 306	/*
 307	 * Writes to SControl sometimes get ignored under certain
 308	 * controllers (ata_piix SIDPR).  Make sure DET actually is
 309	 * cleared.
 310	 */
 311	do {
 312		scontrol = (scontrol & 0x0f0) | 0x300;
 313		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
 314			return rc;
 315		/*
 316		 * Some PHYs react badly if SStatus is pounded
 317		 * immediately after resuming.  Delay 200ms before
 318		 * debouncing.
 319		 */
 320		if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY))
 321			ata_msleep(link->ap, 200);
 322
 323		/* is SControl restored correctly? */
 324		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
 325			return rc;
 326	} while ((scontrol & 0xf0f) != 0x300 && --tries);
 327
 328	if ((scontrol & 0xf0f) != 0x300) {
 329		ata_link_warn(link, "failed to resume link (SControl %X)\n",
 330			     scontrol);
 331		return 0;
 332	}
 333
 334	if (tries < ATA_LINK_RESUME_TRIES)
 335		ata_link_warn(link, "link resume succeeded after %d retries\n",
 336			      ATA_LINK_RESUME_TRIES - tries);
 337
 338	if ((rc = sata_link_debounce(link, params, deadline)))
 339		return rc;
 340
 341	/* clear SError, some PHYs require this even for SRST to work */
 342	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
 343		rc = sata_scr_write(link, SCR_ERROR, serror);
 344
 345	return rc != -EINVAL ? rc : 0;
 346}
 347EXPORT_SYMBOL_GPL(sata_link_resume);
 348
 349/**
 350 *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
 351 *	@link: ATA link to manipulate SControl for
 352 *	@policy: LPM policy to configure
 353 *	@spm_wakeup: initiate LPM transition to active state
 354 *
 355 *	Manipulate the IPM field of the SControl register of @link
 356 *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
 357 *	@spm_wakeup is %true, the SPM field is manipulated to wake up
 358 *	the link.  This function also clears PHYRDY_CHG before
 359 *	returning.
 360 *
 361 *	LOCKING:
 362 *	EH context.
 363 *
 364 *	RETURNS:
 365 *	0 on success, -errno otherwise.
 366 */
 367int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
 368		      bool spm_wakeup)
 369{
 370	struct ata_eh_context *ehc = &link->eh_context;
 371	bool woken_up = false;
 372	u32 scontrol;
 373	int rc;
 374
 375	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
 376	if (rc)
 377		return rc;
 378
 379	switch (policy) {
 380	case ATA_LPM_MAX_POWER:
 381		/* disable all LPM transitions */
 382		scontrol |= (0x7 << 8);
 383		/* initiate transition to active state */
 384		if (spm_wakeup) {
 385			scontrol |= (0x4 << 12);
 386			woken_up = true;
 387		}
 388		break;
 389	case ATA_LPM_MED_POWER:
 390		/* allow LPM to PARTIAL */
 391		scontrol &= ~(0x1 << 8);
 392		scontrol |= (0x6 << 8);
 393		break;
 394	case ATA_LPM_MED_POWER_WITH_DIPM:
 395	case ATA_LPM_MIN_POWER_WITH_PARTIAL:
 396	case ATA_LPM_MIN_POWER:
 397		if (ata_link_nr_enabled(link) > 0)
 398			/* no restrictions on LPM transitions */
 399			scontrol &= ~(0x7 << 8);
 400		else {
 401			/* empty port, power off */
 402			scontrol &= ~0xf;
 403			scontrol |= (0x1 << 2);
 404		}
 405		break;
 406	default:
 407		WARN_ON(1);
 408	}
 409
 410	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
 411	if (rc)
 412		return rc;
 413
 414	/* give the link time to transit out of LPM state */
 415	if (woken_up)
 416		msleep(10);
 417
 418	/* clear PHYRDY_CHG from SError */
 419	ehc->i.serror &= ~SERR_PHYRDY_CHG;
 420	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
 421}
 422EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
 423
 424static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
 425{
 426	struct ata_link *host_link = &link->ap->link;
 427	u32 limit, target, spd;
 428
 429	limit = link->sata_spd_limit;
 430
 431	/* Don't configure downstream link faster than upstream link.
 432	 * It doesn't speed up anything and some PMPs choke on such
 433	 * configuration.
 434	 */
 435	if (!ata_is_host_link(link) && host_link->sata_spd)
 436		limit &= (1 << host_link->sata_spd) - 1;
 437
 438	if (limit == UINT_MAX)
 439		target = 0;
 440	else
 441		target = fls(limit);
 442
 443	spd = (*scontrol >> 4) & 0xf;
 444	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
 445
 446	return spd != target;
 447}
 448
 449/**
 450 *	sata_set_spd_needed - is SATA spd configuration needed
 451 *	@link: Link in question
 452 *
 453 *	Test whether the spd limit in SControl matches
 454 *	@link->sata_spd_limit.  This function is used to determine
 455 *	whether hardreset is necessary to apply SATA spd
 456 *	configuration.
 457 *
 458 *	LOCKING:
 459 *	Inherited from caller.
 460 *
 461 *	RETURNS:
 462 *	1 if SATA spd configuration is needed, 0 otherwise.
 463 */
 464static int sata_set_spd_needed(struct ata_link *link)
 465{
 466	u32 scontrol;
 467
 468	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
 469		return 1;
 470
 471	return __sata_set_spd_needed(link, &scontrol);
 472}
 473
 474/**
 475 *	sata_set_spd - set SATA spd according to spd limit
 476 *	@link: Link to set SATA spd for
 477 *
 478 *	Set SATA spd of @link according to sata_spd_limit.
 479 *
 480 *	LOCKING:
 481 *	Inherited from caller.
 482 *
 483 *	RETURNS:
 484 *	0 if spd doesn't need to be changed, 1 if spd has been
 485 *	changed.  Negative errno if SCR registers are inaccessible.
 486 */
 487int sata_set_spd(struct ata_link *link)
 488{
 489	u32 scontrol;
 490	int rc;
 491
 492	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
 493		return rc;
 494
 495	if (!__sata_set_spd_needed(link, &scontrol))
 496		return 0;
 497
 498	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
 499		return rc;
 500
 501	return 1;
 502}
 503EXPORT_SYMBOL_GPL(sata_set_spd);
 504
 505/**
 506 *	sata_link_hardreset - reset link via SATA phy reset
 507 *	@link: link to reset
 508 *	@timing: timing parameters { interval, duration, timeout } in msec
 509 *	@deadline: deadline jiffies for the operation
 510 *	@online: optional out parameter indicating link onlineness
 511 *	@check_ready: optional callback to check link readiness
 512 *
 513 *	SATA phy-reset @link using DET bits of SControl register.
 514 *	After hardreset, link readiness is waited upon using
 515 *	ata_wait_ready() if @check_ready is specified.  LLDs are
 516 *	allowed to not specify @check_ready and wait itself after this
 517 *	function returns.  Device classification is LLD's
 518 *	responsibility.
 519 *
 520 *	*@online is set to one iff reset succeeded and @link is online
 521 *	after reset.
 522 *
 523 *	LOCKING:
 524 *	Kernel thread context (may sleep)
 525 *
 526 *	RETURNS:
 527 *	0 on success, -errno otherwise.
 528 */
 529int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
 530			unsigned long deadline,
 531			bool *online, int (*check_ready)(struct ata_link *))
 532{
 533	u32 scontrol;
 534	int rc;
 535
 536	if (online)
 537		*online = false;
 538
 539	if (sata_set_spd_needed(link)) {
 540		/* SATA spec says nothing about how to reconfigure
 541		 * spd.  To be on the safe side, turn off phy during
 542		 * reconfiguration.  This works for at least ICH7 AHCI
 543		 * and Sil3124.
 544		 */
 545		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
 546			goto out;
 547
 548		scontrol = (scontrol & 0x0f0) | 0x304;
 549
 550		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
 551			goto out;
 552
 553		sata_set_spd(link);
 554	}
 555
 556	/* issue phy wake/reset */
 557	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
 558		goto out;
 559
 560	scontrol = (scontrol & 0x0f0) | 0x301;
 561
 562	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
 563		goto out;
 564
 565	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
 566	 * 10.4.2 says at least 1 ms.
 567	 */
 568	ata_msleep(link->ap, 1);
 569
 570	/* bring link back */
 571	rc = sata_link_resume(link, timing, deadline);
 572	if (rc)
 573		goto out;
 574	/* if link is offline nothing more to do */
 575	if (ata_phys_link_offline(link))
 576		goto out;
 577
 578	/* Link is online.  From this point, -ENODEV too is an error. */
 579	if (online)
 580		*online = true;
 581
 582	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
 583		/* If PMP is supported, we have to do follow-up SRST.
 584		 * Some PMPs don't send D2H Reg FIS after hardreset if
 585		 * the first port is empty.  Wait only for
 586		 * ATA_TMOUT_PMP_SRST_WAIT.
 587		 */
 588		if (check_ready) {
 589			unsigned long pmp_deadline;
 590
 591			pmp_deadline = ata_deadline(jiffies,
 592						    ATA_TMOUT_PMP_SRST_WAIT);
 593			if (time_after(pmp_deadline, deadline))
 594				pmp_deadline = deadline;
 595			ata_wait_ready(link, pmp_deadline, check_ready);
 596		}
 597		rc = -EAGAIN;
 598		goto out;
 599	}
 600
 601	rc = 0;
 602	if (check_ready)
 603		rc = ata_wait_ready(link, deadline, check_ready);
 604 out:
 605	if (rc && rc != -EAGAIN) {
 606		/* online is set iff link is online && reset succeeded */
 607		if (online)
 608			*online = false;
 609		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
 610	}
 611	return rc;
 612}
 613EXPORT_SYMBOL_GPL(sata_link_hardreset);
 614
 615/**
 616 *	ata_qc_complete_multiple - Complete multiple qcs successfully
 617 *	@ap: port in question
 618 *	@qc_active: new qc_active mask
 619 *
 620 *	Complete in-flight commands.  This functions is meant to be
 621 *	called from low-level driver's interrupt routine to complete
 622 *	requests normally.  ap->qc_active and @qc_active is compared
 623 *	and commands are completed accordingly.
 624 *
 625 *	Always use this function when completing multiple NCQ commands
 626 *	from IRQ handlers instead of calling ata_qc_complete()
 627 *	multiple times to keep IRQ expect status properly in sync.
 628 *
 629 *	LOCKING:
 630 *	spin_lock_irqsave(host lock)
 631 *
 632 *	RETURNS:
 633 *	Number of completed commands on success, -errno otherwise.
 634 */
 635int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
 636{
 637	u64 done_mask, ap_qc_active = ap->qc_active;
 638	int nr_done = 0;
 639
 640	/*
 641	 * If the internal tag is set on ap->qc_active, then we care about
 642	 * bit0 on the passed in qc_active mask. Move that bit up to match
 643	 * the internal tag.
 644	 */
 645	if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
 646		qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
 647		qc_active ^= qc_active & 0x01;
 648	}
 649
 650	done_mask = ap_qc_active ^ qc_active;
 651
 652	if (unlikely(done_mask & qc_active)) {
 653		ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
 654			     ap->qc_active, qc_active);
 655		return -EINVAL;
 656	}
 657
 658	while (done_mask) {
 659		struct ata_queued_cmd *qc;
 660		unsigned int tag = __ffs64(done_mask);
 661
 662		qc = ata_qc_from_tag(ap, tag);
 663		if (qc) {
 664			ata_qc_complete(qc);
 665			nr_done++;
 666		}
 667		done_mask &= ~(1ULL << tag);
 668	}
 669
 670	return nr_done;
 671}
 672EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
 673
 674/**
 675 *	ata_slave_link_init - initialize slave link
 676 *	@ap: port to initialize slave link for
 677 *
 678 *	Create and initialize slave link for @ap.  This enables slave
 679 *	link handling on the port.
 680 *
 681 *	In libata, a port contains links and a link contains devices.
 682 *	There is single host link but if a PMP is attached to it,
 683 *	there can be multiple fan-out links.  On SATA, there's usually
 684 *	a single device connected to a link but PATA and SATA
 685 *	controllers emulating TF based interface can have two - master
 686 *	and slave.
 687 *
 688 *	However, there are a few controllers which don't fit into this
 689 *	abstraction too well - SATA controllers which emulate TF
 690 *	interface with both master and slave devices but also have
 691 *	separate SCR register sets for each device.  These controllers
 692 *	need separate links for physical link handling
 693 *	(e.g. onlineness, link speed) but should be treated like a
 694 *	traditional M/S controller for everything else (e.g. command
 695 *	issue, softreset).
 696 *
 697 *	slave_link is libata's way of handling this class of
 698 *	controllers without impacting core layer too much.  For
 699 *	anything other than physical link handling, the default host
 700 *	link is used for both master and slave.  For physical link
 701 *	handling, separate @ap->slave_link is used.  All dirty details
 702 *	are implemented inside libata core layer.  From LLD's POV, the
 703 *	only difference is that prereset, hardreset and postreset are
 704 *	called once more for the slave link, so the reset sequence
 705 *	looks like the following.
 706 *
 707 *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
 708 *	softreset(M) -> postreset(M) -> postreset(S)
 709 *
 710 *	Note that softreset is called only for the master.  Softreset
 711 *	resets both M/S by definition, so SRST on master should handle
 712 *	both (the standard method will work just fine).
 713 *
 714 *	LOCKING:
 715 *	Should be called before host is registered.
 716 *
 717 *	RETURNS:
 718 *	0 on success, -errno on failure.
 719 */
 720int ata_slave_link_init(struct ata_port *ap)
 721{
 722	struct ata_link *link;
 723
 724	WARN_ON(ap->slave_link);
 725	WARN_ON(ap->flags & ATA_FLAG_PMP);
 726
 727	link = kzalloc(sizeof(*link), GFP_KERNEL);
 728	if (!link)
 729		return -ENOMEM;
 730
 731	ata_link_init(ap, link, 1);
 732	ap->slave_link = link;
 733	return 0;
 734}
 735EXPORT_SYMBOL_GPL(ata_slave_link_init);
 736
 737/**
 738 *	sata_lpm_ignore_phy_events - test if PHY event should be ignored
 739 *	@link: Link receiving the event
 740 *
 741 *	Test whether the received PHY event has to be ignored or not.
 742 *
 743 *	LOCKING:
 744 *	None:
 745 *
 746 *	RETURNS:
 747 *	True if the event has to be ignored.
 748 */
 749bool sata_lpm_ignore_phy_events(struct ata_link *link)
 750{
 751	unsigned long lpm_timeout = link->last_lpm_change +
 752				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
 753
 754	/* if LPM is enabled, PHYRDY doesn't mean anything */
 755	if (link->lpm_policy > ATA_LPM_MAX_POWER)
 756		return true;
 757
 758	/* ignore the first PHY event after the LPM policy changed
 759	 * as it is might be spurious
 760	 */
 761	if ((link->flags & ATA_LFLAG_CHANGED) &&
 762	    time_before(jiffies, lpm_timeout))
 763		return true;
 764
 765	return false;
 766}
 767EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
 768
 769static const char *ata_lpm_policy_names[] = {
 770	[ATA_LPM_UNKNOWN]		= "max_performance",
 771	[ATA_LPM_MAX_POWER]		= "max_performance",
 772	[ATA_LPM_MED_POWER]		= "medium_power",
 773	[ATA_LPM_MED_POWER_WITH_DIPM]	= "med_power_with_dipm",
 774	[ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial",
 775	[ATA_LPM_MIN_POWER]		= "min_power",
 776};
 777
 778static ssize_t ata_scsi_lpm_store(struct device *device,
 779				  struct device_attribute *attr,
 780				  const char *buf, size_t count)
 781{
 782	struct Scsi_Host *shost = class_to_shost(device);
 783	struct ata_port *ap = ata_shost_to_port(shost);
 784	struct ata_link *link;
 785	struct ata_device *dev;
 786	enum ata_lpm_policy policy;
 787	unsigned long flags;
 788
 789	/* UNKNOWN is internal state, iterate from MAX_POWER */
 790	for (policy = ATA_LPM_MAX_POWER;
 791	     policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
 792		const char *name = ata_lpm_policy_names[policy];
 793
 794		if (strncmp(name, buf, strlen(name)) == 0)
 795			break;
 796	}
 797	if (policy == ARRAY_SIZE(ata_lpm_policy_names))
 798		return -EINVAL;
 799
 800	spin_lock_irqsave(ap->lock, flags);
 801
 802	ata_for_each_link(link, ap, EDGE) {
 803		ata_for_each_dev(dev, &ap->link, ENABLED) {
 804			if (dev->horkage & ATA_HORKAGE_NOLPM) {
 805				count = -EOPNOTSUPP;
 806				goto out_unlock;
 807			}
 808		}
 809	}
 810
 811	ap->target_lpm_policy = policy;
 812	ata_port_schedule_eh(ap);
 813out_unlock:
 814	spin_unlock_irqrestore(ap->lock, flags);
 815	return count;
 816}
 817
 818static ssize_t ata_scsi_lpm_show(struct device *dev,
 819				 struct device_attribute *attr, char *buf)
 820{
 821	struct Scsi_Host *shost = class_to_shost(dev);
 822	struct ata_port *ap = ata_shost_to_port(shost);
 823
 824	if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
 825		return -EINVAL;
 826
 827	return sysfs_emit(buf, "%s\n",
 828			ata_lpm_policy_names[ap->target_lpm_policy]);
 829}
 830DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
 831	    ata_scsi_lpm_show, ata_scsi_lpm_store);
 832EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
 833
 834static ssize_t ata_ncq_prio_supported_show(struct device *device,
 835					   struct device_attribute *attr,
 836					   char *buf)
 837{
 838	struct scsi_device *sdev = to_scsi_device(device);
 839	struct ata_port *ap = ata_shost_to_port(sdev->host);
 840	struct ata_device *dev;
 841	bool ncq_prio_supported;
 842	int rc = 0;
 843
 844	spin_lock_irq(ap->lock);
 845	dev = ata_scsi_find_dev(ap, sdev);
 846	if (!dev)
 847		rc = -ENODEV;
 848	else
 849		ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
 850	spin_unlock_irq(ap->lock);
 851
 852	return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported);
 853}
 854
 855DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL);
 856EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported);
 857
 858static ssize_t ata_ncq_prio_enable_show(struct device *device,
 859					struct device_attribute *attr,
 860					char *buf)
 861{
 862	struct scsi_device *sdev = to_scsi_device(device);
 863	struct ata_port *ap = ata_shost_to_port(sdev->host);
 864	struct ata_device *dev;
 865	bool ncq_prio_enable;
 866	int rc = 0;
 867
 868	spin_lock_irq(ap->lock);
 869	dev = ata_scsi_find_dev(ap, sdev);
 870	if (!dev)
 871		rc = -ENODEV;
 872	else
 873		ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
 874	spin_unlock_irq(ap->lock);
 875
 876	return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable);
 877}
 878
 879static ssize_t ata_ncq_prio_enable_store(struct device *device,
 880					 struct device_attribute *attr,
 881					 const char *buf, size_t len)
 882{
 883	struct scsi_device *sdev = to_scsi_device(device);
 884	struct ata_port *ap;
 885	struct ata_device *dev;
 886	long int input;
 887	int rc = 0;
 888
 889	rc = kstrtol(buf, 10, &input);
 890	if (rc)
 891		return rc;
 892	if ((input < 0) || (input > 1))
 893		return -EINVAL;
 894
 895	ap = ata_shost_to_port(sdev->host);
 896	dev = ata_scsi_find_dev(ap, sdev);
 897	if (unlikely(!dev))
 898		return  -ENODEV;
 899
 900	spin_lock_irq(ap->lock);
 901
 902	if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
 903		rc = -EINVAL;
 904		goto unlock;
 905	}
 906
 907	if (input)
 908		dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLED;
 909	else
 910		dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
 911
 912unlock:
 913	spin_unlock_irq(ap->lock);
 914
 915	return rc ? rc : len;
 916}
 917
 918DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
 919	    ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
 920EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
 921
 922static struct attribute *ata_ncq_sdev_attrs[] = {
 923	&dev_attr_unload_heads.attr,
 924	&dev_attr_ncq_prio_enable.attr,
 925	&dev_attr_ncq_prio_supported.attr,
 926	NULL
 927};
 928
 929static const struct attribute_group ata_ncq_sdev_attr_group = {
 930	.attrs = ata_ncq_sdev_attrs
 931};
 932
 933const struct attribute_group *ata_ncq_sdev_groups[] = {
 934	&ata_ncq_sdev_attr_group,
 935	NULL
 936};
 937EXPORT_SYMBOL_GPL(ata_ncq_sdev_groups);
 938
 939static ssize_t
 940ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
 941			  const char *buf, size_t count)
 942{
 943	struct Scsi_Host *shost = class_to_shost(dev);
 944	struct ata_port *ap = ata_shost_to_port(shost);
 945	if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
 946		return ap->ops->em_store(ap, buf, count);
 947	return -EINVAL;
 948}
 949
 950static ssize_t
 951ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
 952			 char *buf)
 953{
 954	struct Scsi_Host *shost = class_to_shost(dev);
 955	struct ata_port *ap = ata_shost_to_port(shost);
 956
 957	if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
 958		return ap->ops->em_show(ap, buf);
 959	return -EINVAL;
 960}
 961DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
 962		ata_scsi_em_message_show, ata_scsi_em_message_store);
 963EXPORT_SYMBOL_GPL(dev_attr_em_message);
 964
 965static ssize_t
 966ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
 967			      char *buf)
 968{
 969	struct Scsi_Host *shost = class_to_shost(dev);
 970	struct ata_port *ap = ata_shost_to_port(shost);
 971
 972	return sysfs_emit(buf, "%d\n", ap->em_message_type);
 973}
 974DEVICE_ATTR(em_message_type, S_IRUGO,
 975		  ata_scsi_em_message_type_show, NULL);
 976EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
 977
 978static ssize_t
 979ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
 980		char *buf)
 981{
 982	struct scsi_device *sdev = to_scsi_device(dev);
 983	struct ata_port *ap = ata_shost_to_port(sdev->host);
 984	struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
 985
 986	if (atadev && ap->ops->sw_activity_show &&
 987	    (ap->flags & ATA_FLAG_SW_ACTIVITY))
 988		return ap->ops->sw_activity_show(atadev, buf);
 989	return -EINVAL;
 990}
 991
 992static ssize_t
 993ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
 994	const char *buf, size_t count)
 995{
 996	struct scsi_device *sdev = to_scsi_device(dev);
 997	struct ata_port *ap = ata_shost_to_port(sdev->host);
 998	struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
 999	enum sw_activity val;
1000	int rc;
1001
1002	if (atadev && ap->ops->sw_activity_store &&
1003	    (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
1004		val = simple_strtoul(buf, NULL, 0);
1005		switch (val) {
1006		case OFF: case BLINK_ON: case BLINK_OFF:
1007			rc = ap->ops->sw_activity_store(atadev, val);
1008			if (!rc)
1009				return count;
1010			else
1011				return rc;
1012		}
1013	}
1014	return -EINVAL;
1015}
1016DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
1017			ata_scsi_activity_store);
1018EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
1019
1020/**
1021 *	ata_change_queue_depth - Set a device maximum queue depth
1022 *	@ap: ATA port of the target device
1023 *	@dev: target ATA device
1024 *	@sdev: SCSI device to configure queue depth for
1025 *	@queue_depth: new queue depth
1026 *
1027 *	Helper to set a device maximum queue depth, usable with both libsas
1028 *	and libata.
1029 *
1030 */
1031int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
1032			   struct scsi_device *sdev, int queue_depth)
1033{
1034	unsigned long flags;
1035
1036	if (!dev || !ata_dev_enabled(dev))
1037		return sdev->queue_depth;
1038
1039	if (queue_depth < 1 || queue_depth == sdev->queue_depth)
1040		return sdev->queue_depth;
1041
1042	/* NCQ enabled? */
1043	spin_lock_irqsave(ap->lock, flags);
1044	dev->flags &= ~ATA_DFLAG_NCQ_OFF;
1045	if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
1046		dev->flags |= ATA_DFLAG_NCQ_OFF;
1047		queue_depth = 1;
1048	}
1049	spin_unlock_irqrestore(ap->lock, flags);
1050
1051	/* limit and apply queue depth */
1052	queue_depth = min(queue_depth, sdev->host->can_queue);
1053	queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
1054	queue_depth = min(queue_depth, ATA_MAX_QUEUE);
1055
1056	if (sdev->queue_depth == queue_depth)
1057		return -EINVAL;
1058
1059	return scsi_change_queue_depth(sdev, queue_depth);
1060}
1061EXPORT_SYMBOL_GPL(ata_change_queue_depth);
1062
1063/**
1064 *	ata_scsi_change_queue_depth - SCSI callback for queue depth config
1065 *	@sdev: SCSI device to configure queue depth for
1066 *	@queue_depth: new queue depth
1067 *
1068 *	This is libata standard hostt->change_queue_depth callback.
1069 *	SCSI will call into this callback when user tries to set queue
1070 *	depth via sysfs.
1071 *
1072 *	LOCKING:
1073 *	SCSI layer (we don't care)
1074 *
1075 *	RETURNS:
1076 *	Newly configured queue depth.
1077 */
1078int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
1079{
1080	struct ata_port *ap = ata_shost_to_port(sdev->host);
1081
1082	return ata_change_queue_depth(ap, ata_scsi_find_dev(ap, sdev),
1083				      sdev, queue_depth);
1084}
1085EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1086
1087/**
1088 *	ata_sas_port_alloc - Allocate port for a SAS attached SATA device
1089 *	@host: ATA host container for all SAS ports
1090 *	@port_info: Information from low-level host driver
1091 *	@shost: SCSI host that the scsi device is attached to
1092 *
1093 *	LOCKING:
1094 *	PCI/etc. bus probe sem.
1095 *
1096 *	RETURNS:
1097 *	ata_port pointer on success / NULL on failure.
1098 */
1099
1100struct ata_port *ata_sas_port_alloc(struct ata_host *host,
1101				    struct ata_port_info *port_info,
1102				    struct Scsi_Host *shost)
1103{
1104	struct ata_port *ap;
1105
1106	ap = ata_port_alloc(host);
1107	if (!ap)
1108		return NULL;
1109
1110	ap->port_no = 0;
1111	ap->lock = &host->lock;
1112	ap->pio_mask = port_info->pio_mask;
1113	ap->mwdma_mask = port_info->mwdma_mask;
1114	ap->udma_mask = port_info->udma_mask;
1115	ap->flags |= port_info->flags;
1116	ap->ops = port_info->port_ops;
1117	ap->cbl = ATA_CBL_SATA;
1118
1119	return ap;
1120}
1121EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
1122
1123/**
1124 *	ata_sas_port_start - Set port up for dma.
1125 *	@ap: Port to initialize
1126 *
1127 *	Called just after data structures for each port are
1128 *	initialized.
1129 *
1130 *	May be used as the port_start() entry in ata_port_operations.
1131 *
1132 *	LOCKING:
1133 *	Inherited from caller.
1134 */
1135int ata_sas_port_start(struct ata_port *ap)
1136{
1137	/*
1138	 * the port is marked as frozen at allocation time, but if we don't
1139	 * have new eh, we won't thaw it
1140	 */
1141	if (!ap->ops->error_handler)
1142		ap->pflags &= ~ATA_PFLAG_FROZEN;
1143	return 0;
1144}
1145EXPORT_SYMBOL_GPL(ata_sas_port_start);
1146
1147/**
1148 *	ata_sas_port_stop - Undo ata_sas_port_start()
1149 *	@ap: Port to shut down
1150 *
1151 *	May be used as the port_stop() entry in ata_port_operations.
1152 *
1153 *	LOCKING:
1154 *	Inherited from caller.
1155 */
1156
1157void ata_sas_port_stop(struct ata_port *ap)
1158{
1159}
1160EXPORT_SYMBOL_GPL(ata_sas_port_stop);
1161
1162/**
1163 * ata_sas_async_probe - simply schedule probing and return
1164 * @ap: Port to probe
1165 *
1166 * For batch scheduling of probe for sas attached ata devices, assumes
1167 * the port has already been through ata_sas_port_init()
1168 */
1169void ata_sas_async_probe(struct ata_port *ap)
1170{
1171	__ata_port_probe(ap);
1172}
1173EXPORT_SYMBOL_GPL(ata_sas_async_probe);
1174
1175int ata_sas_sync_probe(struct ata_port *ap)
1176{
1177	return ata_port_probe(ap);
1178}
1179EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
1180
1181
1182/**
1183 *	ata_sas_port_init - Initialize a SATA device
1184 *	@ap: SATA port to initialize
1185 *
1186 *	LOCKING:
1187 *	PCI/etc. bus probe sem.
1188 *
1189 *	RETURNS:
1190 *	Zero on success, non-zero on error.
1191 */
1192
1193int ata_sas_port_init(struct ata_port *ap)
1194{
1195	int rc = ap->ops->port_start(ap);
1196
1197	if (rc)
1198		return rc;
1199	ap->print_id = atomic_inc_return(&ata_print_id);
1200	return 0;
1201}
1202EXPORT_SYMBOL_GPL(ata_sas_port_init);
1203
1204int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
1205{
1206	return ata_tport_add(parent, ap);
1207}
1208EXPORT_SYMBOL_GPL(ata_sas_tport_add);
1209
1210void ata_sas_tport_delete(struct ata_port *ap)
1211{
1212	ata_tport_delete(ap);
1213}
1214EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
1215
1216/**
1217 *	ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
1218 *	@ap: SATA port to destroy
1219 *
1220 */
1221
1222void ata_sas_port_destroy(struct ata_port *ap)
1223{
1224	if (ap->ops->port_stop)
1225		ap->ops->port_stop(ap);
1226	kfree(ap);
1227}
1228EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
1229
1230/**
1231 *	ata_sas_slave_configure - Default slave_config routine for libata devices
1232 *	@sdev: SCSI device to configure
1233 *	@ap: ATA port to which SCSI device is attached
1234 *
1235 *	RETURNS:
1236 *	Zero.
1237 */
1238
1239int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
1240{
1241	ata_scsi_sdev_config(sdev);
1242	ata_scsi_dev_config(sdev, ap->link.device);
1243	return 0;
1244}
1245EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
1246
1247/**
1248 *	ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
1249 *	@cmd: SCSI command to be sent
1250 *	@ap:	ATA port to which the command is being sent
1251 *
1252 *	RETURNS:
1253 *	Return value from __ata_scsi_queuecmd() if @cmd can be queued,
1254 *	0 otherwise.
1255 */
1256
1257int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
1258{
1259	int rc = 0;
1260
1261	if (likely(ata_dev_enabled(ap->link.device)))
1262		rc = __ata_scsi_queuecmd(cmd, ap->link.device);
1263	else {
1264		cmd->result = (DID_BAD_TARGET << 16);
1265		scsi_done(cmd);
1266	}
1267	return rc;
1268}
1269EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
1270
1271/**
1272 *	sata_async_notification - SATA async notification handler
1273 *	@ap: ATA port where async notification is received
1274 *
1275 *	Handler to be called when async notification via SDB FIS is
1276 *	received.  This function schedules EH if necessary.
1277 *
1278 *	LOCKING:
1279 *	spin_lock_irqsave(host lock)
1280 *
1281 *	RETURNS:
1282 *	1 if EH is scheduled, 0 otherwise.
1283 */
1284int sata_async_notification(struct ata_port *ap)
1285{
1286	u32 sntf;
1287	int rc;
1288
1289	if (!(ap->flags & ATA_FLAG_AN))
1290		return 0;
1291
1292	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1293	if (rc == 0)
1294		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1295
1296	if (!sata_pmp_attached(ap) || rc) {
1297		/* PMP is not attached or SNTF is not available */
1298		if (!sata_pmp_attached(ap)) {
1299			/* PMP is not attached.  Check whether ATAPI
1300			 * AN is configured.  If so, notify media
1301			 * change.
1302			 */
1303			struct ata_device *dev = ap->link.device;
1304
1305			if ((dev->class == ATA_DEV_ATAPI) &&
1306			    (dev->flags & ATA_DFLAG_AN))
1307				ata_scsi_media_change_notify(dev);
1308			return 0;
1309		} else {
1310			/* PMP is attached but SNTF is not available.
1311			 * ATAPI async media change notification is
1312			 * not used.  The PMP must be reporting PHY
1313			 * status change, schedule EH.
1314			 */
1315			ata_port_schedule_eh(ap);
1316			return 1;
1317		}
1318	} else {
1319		/* PMP is attached and SNTF is available */
1320		struct ata_link *link;
1321
1322		/* check and notify ATAPI AN */
1323		ata_for_each_link(link, ap, EDGE) {
1324			if (!(sntf & (1 << link->pmp)))
1325				continue;
1326
1327			if ((link->device->class == ATA_DEV_ATAPI) &&
1328			    (link->device->flags & ATA_DFLAG_AN))
1329				ata_scsi_media_change_notify(link->device);
1330		}
1331
1332		/* If PMP is reporting that PHY status of some
1333		 * downstream ports has changed, schedule EH.
1334		 */
1335		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1336			ata_port_schedule_eh(ap);
1337			return 1;
1338		}
1339
1340		return 0;
1341	}
1342}
1343EXPORT_SYMBOL_GPL(sata_async_notification);
1344
1345/**
1346 *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1347 *	@dev: Device to read log page 10h from
1348 *	@tag: Resulting tag of the failed command
1349 *	@tf: Resulting taskfile registers of the failed command
1350 *
1351 *	Read log page 10h to obtain NCQ error details and clear error
1352 *	condition.
1353 *
1354 *	LOCKING:
1355 *	Kernel thread context (may sleep).
1356 *
1357 *	RETURNS:
1358 *	0 on success, -errno otherwise.
1359 */
1360static int ata_eh_read_log_10h(struct ata_device *dev,
1361			       int *tag, struct ata_taskfile *tf)
1362{
1363	u8 *buf = dev->link->ap->sector_buf;
1364	unsigned int err_mask;
1365	u8 csum;
1366	int i;
1367
1368	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1369	if (err_mask)
1370		return -EIO;
1371
1372	csum = 0;
1373	for (i = 0; i < ATA_SECT_SIZE; i++)
1374		csum += buf[i];
1375	if (csum)
1376		ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1377			     csum);
1378
1379	if (buf[0] & 0x80)
1380		return -ENOENT;
1381
1382	*tag = buf[0] & 0x1f;
1383
1384	tf->status = buf[2];
1385	tf->error = buf[3];
1386	tf->lbal = buf[4];
1387	tf->lbam = buf[5];
1388	tf->lbah = buf[6];
1389	tf->device = buf[7];
1390	tf->hob_lbal = buf[8];
1391	tf->hob_lbam = buf[9];
1392	tf->hob_lbah = buf[10];
1393	tf->nsect = buf[12];
1394	tf->hob_nsect = buf[13];
1395	if (ata_id_has_ncq_autosense(dev->id) && (tf->status & ATA_SENSE))
1396		tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1397
1398	return 0;
1399}
1400
1401/**
1402 *	ata_eh_analyze_ncq_error - analyze NCQ error
1403 *	@link: ATA link to analyze NCQ error for
1404 *
1405 *	Read log page 10h, determine the offending qc and acquire
1406 *	error status TF.  For NCQ device errors, all LLDDs have to do
1407 *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1408 *	care of the rest.
1409 *
1410 *	LOCKING:
1411 *	Kernel thread context (may sleep).
1412 */
1413void ata_eh_analyze_ncq_error(struct ata_link *link)
1414{
1415	struct ata_port *ap = link->ap;
1416	struct ata_eh_context *ehc = &link->eh_context;
1417	struct ata_device *dev = link->device;
1418	struct ata_queued_cmd *qc;
1419	struct ata_taskfile tf;
1420	int tag, rc;
1421
1422	/* if frozen, we can't do much */
1423	if (ata_port_is_frozen(ap))
1424		return;
1425
1426	/* is it NCQ device error? */
1427	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1428		return;
1429
1430	/* has LLDD analyzed already? */
1431	ata_qc_for_each_raw(ap, qc, tag) {
1432		if (!(qc->flags & ATA_QCFLAG_FAILED))
1433			continue;
1434
1435		if (qc->err_mask)
1436			return;
1437	}
1438
1439	/* okay, this error is ours */
1440	memset(&tf, 0, sizeof(tf));
1441	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1442	if (rc) {
1443		ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1444			     rc);
1445		return;
1446	}
1447
1448	if (!(link->sactive & (1 << tag))) {
1449		ata_link_err(link, "log page 10h reported inactive tag %d\n",
1450			     tag);
1451		return;
1452	}
1453
1454	/* we've got the perpetrator, condemn it */
1455	qc = __ata_qc_from_tag(ap, tag);
1456	memcpy(&qc->result_tf, &tf, sizeof(tf));
1457	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1458	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1459
1460	/*
1461	 * If the device supports NCQ autosense, ata_eh_read_log_10h() will have
1462	 * stored the sense data in qc->result_tf.auxiliary.
1463	 */
1464	if (qc->result_tf.auxiliary) {
1465		char sense_key, asc, ascq;
1466
1467		sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1468		asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1469		ascq = qc->result_tf.auxiliary & 0xff;
1470		if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) {
1471			ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc,
1472					   ascq);
1473			ata_scsi_set_sense_information(dev, qc->scsicmd,
1474						       &qc->result_tf);
1475			qc->flags |= ATA_QCFLAG_SENSE_VALID;
1476		}
1477	}
1478
1479	ata_qc_for_each_raw(ap, qc, tag) {
1480		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
1481		    ata_dev_phys_link(qc->dev) != link)
1482			continue;
1483
1484		/* Skip the single QC which caused the NCQ error. */
1485		if (qc->err_mask)
1486			continue;
1487
1488		/*
1489		 * For SATA, the STATUS and ERROR fields are shared for all NCQ
1490		 * commands that were completed with the same SDB FIS.
1491		 * Therefore, we have to clear the ATA_ERR bit for all QCs
1492		 * except the one that caused the NCQ error.
1493		 */
1494		qc->result_tf.status &= ~ATA_ERR;
1495		qc->result_tf.error = 0;
1496
1497		/*
1498		 * If we get a NCQ error, that means that a single command was
1499		 * aborted. All other failed commands for our link should be
1500		 * retried and has no business of going though further scrutiny
1501		 * by ata_eh_link_autopsy().
1502		 */
1503		qc->flags |= ATA_QCFLAG_RETRY;
1504	}
1505
1506	ehc->i.err_mask &= ~AC_ERR_DEV;
1507}
1508EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);