Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * This file is provided under a dual BSD/GPLv2 license.  When using or
   3 * redistributing this file, you may do so under either license.
   4 *
   5 * GPL LICENSE SUMMARY
   6 *
   7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21 * The full GNU General Public License is included in this distribution
  22 * in the file called LICENSE.GPL.
  23 *
  24 * BSD LICENSE
  25 *
  26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27 * All rights reserved.
  28 *
  29 * Redistribution and use in source and binary forms, with or without
  30 * modification, are permitted provided that the following conditions
  31 * are met:
  32 *
  33 *   * Redistributions of source code must retain the above copyright
  34 *     notice, this list of conditions and the following disclaimer.
  35 *   * Redistributions in binary form must reproduce the above copyright
  36 *     notice, this list of conditions and the following disclaimer in
  37 *     the documentation and/or other materials provided with the
  38 *     distribution.
  39 *   * Neither the name of Intel Corporation nor the names of its
  40 *     contributors may be used to endorse or promote products derived
  41 *     from this software without specific prior written permission.
  42 *
  43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54 */
  55#include <scsi/sas.h>
  56#include <linux/bitops.h>
  57#include "isci.h"
  58#include "port.h"
  59#include "remote_device.h"
  60#include "request.h"
  61#include "remote_node_context.h"
  62#include "scu_event_codes.h"
  63#include "task.h"
  64
  65#undef C
  66#define C(a) (#a)
  67const char *dev_state_name(enum sci_remote_device_states state)
  68{
  69	static const char * const strings[] = REMOTE_DEV_STATES;
  70
  71	return strings[state];
  72}
  73#undef C
  74
  75enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
  76					  enum sci_remote_node_suspension_reasons reason)
  77{
  78	return sci_remote_node_context_suspend(&idev->rnc, reason,
  79					       SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
  80}
  81
  82/**
  83 * isci_remote_device_ready() - This function is called by the ihost when the
  84 *    remote device is ready. We mark the isci device as ready and signal the
  85 *    waiting proccess.
  86 * @ihost: our valid isci_host
  87 * @idev: remote device
  88 *
  89 */
  90static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
  91{
  92	dev_dbg(&ihost->pdev->dev,
  93		"%s: idev = %p\n", __func__, idev);
  94
  95	clear_bit(IDEV_IO_NCQERROR, &idev->flags);
  96	set_bit(IDEV_IO_READY, &idev->flags);
  97	if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
  98		wake_up(&ihost->eventq);
  99}
 100
 101static enum sci_status sci_remote_device_terminate_req(
 102	struct isci_host *ihost,
 103	struct isci_remote_device *idev,
 104	int check_abort,
 105	struct isci_request *ireq)
 106{
 107	if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
 108	    (ireq->target_device != idev) ||
 109	    (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
 110		return SCI_SUCCESS;
 111
 112	dev_dbg(&ihost->pdev->dev,
 113		"%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
 114		__func__, idev, idev->flags, ireq, ireq->target_device);
 115
 116	set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
 117
 118	return sci_controller_terminate_request(ihost, idev, ireq);
 119}
 120
 121static enum sci_status sci_remote_device_terminate_reqs_checkabort(
 122	struct isci_remote_device *idev,
 123	int chk)
 124{
 125	struct isci_host *ihost = idev->owning_port->owning_controller;
 126	enum sci_status status  = SCI_SUCCESS;
 127	u32 i;
 128
 129	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
 130		struct isci_request *ireq = ihost->reqs[i];
 131		enum sci_status s;
 132
 133		s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
 134		if (s != SCI_SUCCESS)
 135			status = s;
 136	}
 137	return status;
 138}
 139
 140static bool isci_compare_suspendcount(
 141	struct isci_remote_device *idev,
 142	u32 localcount)
 143{
 144	smp_rmb();
 145
 146	/* Check for a change in the suspend count, or the RNC
 147	 * being destroyed.
 148	 */
 149	return (localcount != idev->rnc.suspend_count)
 150	    || sci_remote_node_context_is_being_destroyed(&idev->rnc);
 151}
 152
 153static bool isci_check_reqterm(
 154	struct isci_host *ihost,
 155	struct isci_remote_device *idev,
 156	struct isci_request *ireq,
 157	u32 localcount)
 158{
 159	unsigned long flags;
 160	bool res;
 161
 162	spin_lock_irqsave(&ihost->scic_lock, flags);
 163	res = isci_compare_suspendcount(idev, localcount)
 164		&& !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
 165	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 166
 167	return res;
 168}
 169
 170static bool isci_check_devempty(
 171	struct isci_host *ihost,
 172	struct isci_remote_device *idev,
 173	u32 localcount)
 174{
 175	unsigned long flags;
 176	bool res;
 177
 178	spin_lock_irqsave(&ihost->scic_lock, flags);
 179	res = isci_compare_suspendcount(idev, localcount)
 180		&& idev->started_request_count == 0;
 181	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 182
 183	return res;
 184}
 185
 186enum sci_status isci_remote_device_terminate_requests(
 187	struct isci_host *ihost,
 188	struct isci_remote_device *idev,
 189	struct isci_request *ireq)
 190{
 191	enum sci_status status = SCI_SUCCESS;
 192	unsigned long flags;
 193	u32 rnc_suspend_count;
 194
 195	spin_lock_irqsave(&ihost->scic_lock, flags);
 196
 197	if (isci_get_device(idev) == NULL) {
 198		dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
 199			__func__, idev);
 200		spin_unlock_irqrestore(&ihost->scic_lock, flags);
 201		status = SCI_FAILURE;
 202	} else {
 203		/* If already suspended, don't wait for another suspension. */
 204		smp_rmb();
 205		rnc_suspend_count
 206			= sci_remote_node_context_is_suspended(&idev->rnc)
 207				? 0 : idev->rnc.suspend_count;
 208
 209		dev_dbg(&ihost->pdev->dev,
 210			"%s: idev=%p, ireq=%p; started_request_count=%d, "
 211				"rnc_suspend_count=%d, rnc.suspend_count=%d"
 212				"about to wait\n",
 213			__func__, idev, ireq, idev->started_request_count,
 214			rnc_suspend_count, idev->rnc.suspend_count);
 215
 216		#define MAX_SUSPEND_MSECS 10000
 217		if (ireq) {
 218			/* Terminate a specific TC. */
 219			set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
 220			sci_remote_device_terminate_req(ihost, idev, 0, ireq);
 221			spin_unlock_irqrestore(&ihost->scic_lock, flags);
 222			if (!wait_event_timeout(ihost->eventq,
 223						isci_check_reqterm(ihost, idev, ireq,
 224								   rnc_suspend_count),
 225						msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
 226
 227				dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
 228					 __func__, ihost->id);
 229				dev_dbg(&ihost->pdev->dev,
 230					 "%s: ******* Timeout waiting for "
 231					 "suspend; idev=%p, current state %s; "
 232					 "started_request_count=%d, flags=%lx\n\t"
 233					 "rnc_suspend_count=%d, rnc.suspend_count=%d "
 234					 "RNC: current state %s, current "
 235					 "suspend_type %x dest state %d;\n"
 236					 "ireq=%p, ireq->flags = %lx\n",
 237					 __func__, idev,
 238					 dev_state_name(idev->sm.current_state_id),
 239					 idev->started_request_count, idev->flags,
 240					 rnc_suspend_count, idev->rnc.suspend_count,
 241					 rnc_state_name(idev->rnc.sm.current_state_id),
 242					 idev->rnc.suspend_type,
 243					 idev->rnc.destination_state,
 244					 ireq, ireq->flags);
 245			}
 246			spin_lock_irqsave(&ihost->scic_lock, flags);
 247			clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
 248			if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
 249				isci_free_tag(ihost, ireq->io_tag);
 250			spin_unlock_irqrestore(&ihost->scic_lock, flags);
 251		} else {
 252			/* Terminate all TCs. */
 253			sci_remote_device_terminate_requests(idev);
 254			spin_unlock_irqrestore(&ihost->scic_lock, flags);
 255			if (!wait_event_timeout(ihost->eventq,
 256						isci_check_devempty(ihost, idev,
 257								    rnc_suspend_count),
 258						msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
 259
 260				dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
 261					 __func__, ihost->id);
 262				dev_dbg(&ihost->pdev->dev,
 263					"%s: ******* Timeout waiting for "
 264					"suspend; idev=%p, current state %s; "
 265					"started_request_count=%d, flags=%lx\n\t"
 266					"rnc_suspend_count=%d, "
 267					"RNC: current state %s, "
 268					"rnc.suspend_count=%d, current "
 269					"suspend_type %x dest state %d\n",
 270					__func__, idev,
 271					dev_state_name(idev->sm.current_state_id),
 272					idev->started_request_count, idev->flags,
 273					rnc_suspend_count,
 274					rnc_state_name(idev->rnc.sm.current_state_id),
 275					idev->rnc.suspend_count,
 276					idev->rnc.suspend_type,
 277					idev->rnc.destination_state);
 278			}
 279		}
 280		dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
 281			__func__, idev);
 282		isci_put_device(idev);
 283	}
 284	return status;
 285}
 286
 287/**
 288* isci_remote_device_not_ready() - This function is called by the ihost when
 289*    the remote device is not ready. We mark the isci device as ready (not
 290*    "ready_for_io") and signal the waiting proccess.
 291* @ihost: This parameter specifies the isci host object.
 292* @idev: This parameter specifies the remote device
 293* @reason: Reason to switch on
 294*
 295* sci_lock is held on entrance to this function.
 296*/
 297static void isci_remote_device_not_ready(struct isci_host *ihost,
 298					 struct isci_remote_device *idev,
 299					 u32 reason)
 300{
 301	dev_dbg(&ihost->pdev->dev,
 302		"%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
 303
 304	switch (reason) {
 305	case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
 306		set_bit(IDEV_IO_NCQERROR, &idev->flags);
 307
 308		/* Suspend the remote device so the I/O can be terminated. */
 309		sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
 310
 311		/* Kill all outstanding requests for the device. */
 312		sci_remote_device_terminate_requests(idev);
 313
 314		fallthrough;	/* into the default case */
 315	default:
 316		clear_bit(IDEV_IO_READY, &idev->flags);
 317		break;
 318	}
 319}
 320
 321/* called once the remote node context is ready to be freed.
 322 * The remote device can now report that its stop operation is complete. none
 323 */
 324static void rnc_destruct_done(void *_dev)
 325{
 326	struct isci_remote_device *idev = _dev;
 327
 328	BUG_ON(idev->started_request_count != 0);
 329	sci_change_state(&idev->sm, SCI_DEV_STOPPED);
 330}
 331
 332enum sci_status sci_remote_device_terminate_requests(
 333	struct isci_remote_device *idev)
 334{
 335	return sci_remote_device_terminate_reqs_checkabort(idev, 0);
 336}
 337
 338enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
 339					u32 timeout)
 340{
 341	struct sci_base_state_machine *sm = &idev->sm;
 342	enum sci_remote_device_states state = sm->current_state_id;
 343
 344	switch (state) {
 345	case SCI_DEV_INITIAL:
 346	case SCI_DEV_FAILED:
 347	case SCI_DEV_FINAL:
 348	default:
 349		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 350			 __func__, dev_state_name(state));
 351		return SCI_FAILURE_INVALID_STATE;
 352	case SCI_DEV_STOPPED:
 353		return SCI_SUCCESS;
 354	case SCI_DEV_STARTING:
 355		/* device not started so there had better be no requests */
 356		BUG_ON(idev->started_request_count != 0);
 357		sci_remote_node_context_destruct(&idev->rnc,
 358						      rnc_destruct_done, idev);
 359		/* Transition to the stopping state and wait for the
 360		 * remote node to complete being posted and invalidated.
 361		 */
 362		sci_change_state(sm, SCI_DEV_STOPPING);
 363		return SCI_SUCCESS;
 364	case SCI_DEV_READY:
 365	case SCI_STP_DEV_IDLE:
 366	case SCI_STP_DEV_CMD:
 367	case SCI_STP_DEV_NCQ:
 368	case SCI_STP_DEV_NCQ_ERROR:
 369	case SCI_STP_DEV_AWAIT_RESET:
 370	case SCI_SMP_DEV_IDLE:
 371	case SCI_SMP_DEV_CMD:
 372		sci_change_state(sm, SCI_DEV_STOPPING);
 373		if (idev->started_request_count == 0)
 374			sci_remote_node_context_destruct(&idev->rnc,
 375							 rnc_destruct_done,
 376							 idev);
 377		else {
 378			sci_remote_device_suspend(
 379				idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
 380			sci_remote_device_terminate_requests(idev);
 381		}
 382		return SCI_SUCCESS;
 383	case SCI_DEV_STOPPING:
 384		/* All requests should have been terminated, but if there is an
 385		 * attempt to stop a device already in the stopping state, then
 386		 * try again to terminate.
 387		 */
 388		return sci_remote_device_terminate_requests(idev);
 389	case SCI_DEV_RESETTING:
 390		sci_change_state(sm, SCI_DEV_STOPPING);
 391		return SCI_SUCCESS;
 392	}
 393}
 394
 395enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
 396{
 397	struct sci_base_state_machine *sm = &idev->sm;
 398	enum sci_remote_device_states state = sm->current_state_id;
 399
 400	switch (state) {
 401	case SCI_DEV_INITIAL:
 402	case SCI_DEV_STOPPED:
 403	case SCI_DEV_STARTING:
 404	case SCI_SMP_DEV_IDLE:
 405	case SCI_SMP_DEV_CMD:
 406	case SCI_DEV_STOPPING:
 407	case SCI_DEV_FAILED:
 408	case SCI_DEV_RESETTING:
 409	case SCI_DEV_FINAL:
 410	default:
 411		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 412			 __func__, dev_state_name(state));
 413		return SCI_FAILURE_INVALID_STATE;
 414	case SCI_DEV_READY:
 415	case SCI_STP_DEV_IDLE:
 416	case SCI_STP_DEV_CMD:
 417	case SCI_STP_DEV_NCQ:
 418	case SCI_STP_DEV_NCQ_ERROR:
 419	case SCI_STP_DEV_AWAIT_RESET:
 420		sci_change_state(sm, SCI_DEV_RESETTING);
 421		return SCI_SUCCESS;
 422	}
 423}
 424
 425enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
 426{
 427	struct sci_base_state_machine *sm = &idev->sm;
 428	enum sci_remote_device_states state = sm->current_state_id;
 429
 430	if (state != SCI_DEV_RESETTING) {
 431		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 432			 __func__, dev_state_name(state));
 433		return SCI_FAILURE_INVALID_STATE;
 434	}
 435
 436	sci_change_state(sm, SCI_DEV_READY);
 437	return SCI_SUCCESS;
 438}
 439
 440enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
 441						     u32 frame_index)
 442{
 443	struct sci_base_state_machine *sm = &idev->sm;
 444	enum sci_remote_device_states state = sm->current_state_id;
 445	struct isci_host *ihost = idev->owning_port->owning_controller;
 446	enum sci_status status;
 447
 448	switch (state) {
 449	case SCI_DEV_INITIAL:
 450	case SCI_DEV_STOPPED:
 451	case SCI_DEV_STARTING:
 452	case SCI_STP_DEV_IDLE:
 453	case SCI_SMP_DEV_IDLE:
 454	case SCI_DEV_FINAL:
 455	default:
 456		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 457			 __func__, dev_state_name(state));
 458		/* Return the frame back to the controller */
 459		sci_controller_release_frame(ihost, frame_index);
 460		return SCI_FAILURE_INVALID_STATE;
 461	case SCI_DEV_READY:
 462	case SCI_STP_DEV_NCQ_ERROR:
 463	case SCI_STP_DEV_AWAIT_RESET:
 464	case SCI_DEV_STOPPING:
 465	case SCI_DEV_FAILED:
 466	case SCI_DEV_RESETTING: {
 467		struct isci_request *ireq;
 468		struct ssp_frame_hdr hdr;
 469		void *frame_header;
 470		ssize_t word_cnt;
 471
 472		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
 473								       frame_index,
 474								       &frame_header);
 475		if (status != SCI_SUCCESS)
 476			return status;
 477
 478		word_cnt = sizeof(hdr) / sizeof(u32);
 479		sci_swab32_cpy(&hdr, frame_header, word_cnt);
 480
 481		ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
 482		if (ireq && ireq->target_device == idev) {
 483			/* The IO request is now in charge of releasing the frame */
 484			status = sci_io_request_frame_handler(ireq, frame_index);
 485		} else {
 486			/* We could not map this tag to a valid IO
 487			 * request Just toss the frame and continue
 488			 */
 489			sci_controller_release_frame(ihost, frame_index);
 490		}
 491		break;
 492	}
 493	case SCI_STP_DEV_NCQ: {
 494		struct dev_to_host_fis *hdr;
 495
 496		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
 497								       frame_index,
 498								       (void **)&hdr);
 499		if (status != SCI_SUCCESS)
 500			return status;
 501
 502		if (hdr->fis_type == FIS_SETDEVBITS &&
 503		    (hdr->status & ATA_ERR)) {
 504			idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
 505
 506			/* TODO Check sactive and complete associated IO if any. */
 507			sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
 508		} else if (hdr->fis_type == FIS_REGD2H &&
 509			   (hdr->status & ATA_ERR)) {
 510			/*
 511			 * Some devices return D2H FIS when an NCQ error is detected.
 512			 * Treat this like an SDB error FIS ready reason.
 513			 */
 514			idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
 515			sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
 516		} else
 517			status = SCI_FAILURE;
 518
 519		sci_controller_release_frame(ihost, frame_index);
 520		break;
 521	}
 522	case SCI_STP_DEV_CMD:
 523	case SCI_SMP_DEV_CMD:
 524		/* The device does not process any UF received from the hardware while
 525		 * in this state.  All unsolicited frames are forwarded to the io request
 526		 * object.
 527		 */
 528		status = sci_io_request_frame_handler(idev->working_request, frame_index);
 529		break;
 530	}
 531
 532	return status;
 533}
 534
 535static bool is_remote_device_ready(struct isci_remote_device *idev)
 536{
 537
 538	struct sci_base_state_machine *sm = &idev->sm;
 539	enum sci_remote_device_states state = sm->current_state_id;
 540
 541	switch (state) {
 542	case SCI_DEV_READY:
 543	case SCI_STP_DEV_IDLE:
 544	case SCI_STP_DEV_CMD:
 545	case SCI_STP_DEV_NCQ:
 546	case SCI_STP_DEV_NCQ_ERROR:
 547	case SCI_STP_DEV_AWAIT_RESET:
 548	case SCI_SMP_DEV_IDLE:
 549	case SCI_SMP_DEV_CMD:
 550		return true;
 551	default:
 552		return false;
 553	}
 554}
 555
 556/*
 557 * called once the remote node context has transisitioned to a ready
 558 * state (after suspending RX and/or TX due to early D2H fis)
 559 */
 560static void atapi_remote_device_resume_done(void *_dev)
 561{
 562	struct isci_remote_device *idev = _dev;
 563	struct isci_request *ireq = idev->working_request;
 564
 565	sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
 566}
 567
 568enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
 569						     u32 event_code)
 570{
 571	enum sci_status status;
 572	struct sci_base_state_machine *sm = &idev->sm;
 573	enum sci_remote_device_states state = sm->current_state_id;
 574
 575	switch (scu_get_event_type(event_code)) {
 576	case SCU_EVENT_TYPE_RNC_OPS_MISC:
 577	case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
 578	case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
 579		status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
 580		break;
 581	case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
 582		if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
 583			status = SCI_SUCCESS;
 584
 585			/* Suspend the associated RNC */
 586			sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
 587
 588			dev_dbg(scirdev_to_dev(idev),
 589				"%s: device: %p event code: %x: %s\n",
 590				__func__, idev, event_code,
 591				is_remote_device_ready(idev)
 592				? "I_T_Nexus_Timeout event"
 593				: "I_T_Nexus_Timeout event in wrong state");
 594
 595			break;
 596		}
 597		fallthrough;	/* and treat as unhandled */
 598	default:
 599		dev_dbg(scirdev_to_dev(idev),
 600			"%s: device: %p event code: %x: %s\n",
 601			__func__, idev, event_code,
 602			is_remote_device_ready(idev)
 603			? "unexpected event"
 604			: "unexpected event in wrong state");
 605		status = SCI_FAILURE_INVALID_STATE;
 606		break;
 607	}
 608
 609	if (status != SCI_SUCCESS)
 610		return status;
 611
 612	/* Decode device-specific states that may require an RNC resume during
 613	 * normal operation.  When the abort path is active, these resumes are
 614	 * managed when the abort path exits.
 615	 */
 616	if (state == SCI_STP_DEV_ATAPI_ERROR) {
 617		/* For ATAPI error state resume the RNC right away. */
 618		if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
 619		    scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
 620			return sci_remote_node_context_resume(&idev->rnc,
 621							      atapi_remote_device_resume_done,
 622							      idev);
 623		}
 624	}
 625
 626	if (state == SCI_STP_DEV_IDLE) {
 627
 628		/* We pick up suspension events to handle specifically to this
 629		 * state. We resume the RNC right away.
 630		 */
 631		if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
 632		    scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
 633			status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
 634	}
 635
 636	return status;
 637}
 638
 639static void sci_remote_device_start_request(struct isci_remote_device *idev,
 640						 struct isci_request *ireq,
 641						 enum sci_status status)
 642{
 643	struct isci_port *iport = idev->owning_port;
 644
 645	/* cleanup requests that failed after starting on the port */
 646	if (status != SCI_SUCCESS)
 647		sci_port_complete_io(iport, idev, ireq);
 648	else {
 649		kref_get(&idev->kref);
 650		idev->started_request_count++;
 651	}
 652}
 653
 654enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
 655						struct isci_remote_device *idev,
 656						struct isci_request *ireq)
 657{
 658	struct sci_base_state_machine *sm = &idev->sm;
 659	enum sci_remote_device_states state = sm->current_state_id;
 660	struct isci_port *iport = idev->owning_port;
 661	enum sci_status status;
 662
 663	switch (state) {
 664	case SCI_DEV_INITIAL:
 665	case SCI_DEV_STOPPED:
 666	case SCI_DEV_STARTING:
 667	case SCI_STP_DEV_NCQ_ERROR:
 668	case SCI_DEV_STOPPING:
 669	case SCI_DEV_FAILED:
 670	case SCI_DEV_RESETTING:
 671	case SCI_DEV_FINAL:
 672	default:
 673		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 674			 __func__, dev_state_name(state));
 675		return SCI_FAILURE_INVALID_STATE;
 676	case SCI_DEV_READY:
 677		/* attempt to start an io request for this device object. The remote
 678		 * device object will issue the start request for the io and if
 679		 * successful it will start the request for the port object then
 680		 * increment its own request count.
 681		 */
 682		status = sci_port_start_io(iport, idev, ireq);
 683		if (status != SCI_SUCCESS)
 684			return status;
 685
 686		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
 687		if (status != SCI_SUCCESS)
 688			break;
 689
 690		status = sci_request_start(ireq);
 691		break;
 692	case SCI_STP_DEV_IDLE: {
 693		/* handle the start io operation for a sata device that is in
 694		 * the command idle state. - Evalute the type of IO request to
 695		 * be started - If its an NCQ request change to NCQ substate -
 696		 * If its any other command change to the CMD substate
 697		 *
 698		 * If this is a softreset we may want to have a different
 699		 * substate.
 700		 */
 701		enum sci_remote_device_states new_state;
 702		struct sas_task *task = isci_request_access_task(ireq);
 703
 704		status = sci_port_start_io(iport, idev, ireq);
 705		if (status != SCI_SUCCESS)
 706			return status;
 707
 708		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
 709		if (status != SCI_SUCCESS)
 710			break;
 711
 712		status = sci_request_start(ireq);
 713		if (status != SCI_SUCCESS)
 714			break;
 715
 716		if (task->ata_task.use_ncq)
 717			new_state = SCI_STP_DEV_NCQ;
 718		else {
 719			idev->working_request = ireq;
 720			new_state = SCI_STP_DEV_CMD;
 721		}
 722		sci_change_state(sm, new_state);
 723		break;
 724	}
 725	case SCI_STP_DEV_NCQ: {
 726		struct sas_task *task = isci_request_access_task(ireq);
 727
 728		if (task->ata_task.use_ncq) {
 729			status = sci_port_start_io(iport, idev, ireq);
 730			if (status != SCI_SUCCESS)
 731				return status;
 732
 733			status = sci_remote_node_context_start_io(&idev->rnc, ireq);
 734			if (status != SCI_SUCCESS)
 735				break;
 736
 737			status = sci_request_start(ireq);
 738		} else
 739			return SCI_FAILURE_INVALID_STATE;
 740		break;
 741	}
 742	case SCI_STP_DEV_AWAIT_RESET:
 743		return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
 744	case SCI_SMP_DEV_IDLE:
 745		status = sci_port_start_io(iport, idev, ireq);
 746		if (status != SCI_SUCCESS)
 747			return status;
 748
 749		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
 750		if (status != SCI_SUCCESS)
 751			break;
 752
 753		status = sci_request_start(ireq);
 754		if (status != SCI_SUCCESS)
 755			break;
 756
 757		idev->working_request = ireq;
 758		sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
 759		break;
 760	case SCI_STP_DEV_CMD:
 761	case SCI_SMP_DEV_CMD:
 762		/* device is already handling a command it can not accept new commands
 763		 * until this one is complete.
 764		 */
 765		return SCI_FAILURE_INVALID_STATE;
 766	}
 767
 768	sci_remote_device_start_request(idev, ireq, status);
 769	return status;
 770}
 771
 772static enum sci_status common_complete_io(struct isci_port *iport,
 773					  struct isci_remote_device *idev,
 774					  struct isci_request *ireq)
 775{
 776	enum sci_status status;
 777
 778	status = sci_request_complete(ireq);
 779	if (status != SCI_SUCCESS)
 780		return status;
 781
 782	status = sci_port_complete_io(iport, idev, ireq);
 783	if (status != SCI_SUCCESS)
 784		return status;
 785
 786	sci_remote_device_decrement_request_count(idev);
 787	return status;
 788}
 789
 790enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
 791						   struct isci_remote_device *idev,
 792						   struct isci_request *ireq)
 793{
 794	struct sci_base_state_machine *sm = &idev->sm;
 795	enum sci_remote_device_states state = sm->current_state_id;
 796	struct isci_port *iport = idev->owning_port;
 797	enum sci_status status;
 798
 799	switch (state) {
 800	case SCI_DEV_INITIAL:
 801	case SCI_DEV_STOPPED:
 802	case SCI_DEV_STARTING:
 803	case SCI_STP_DEV_IDLE:
 804	case SCI_SMP_DEV_IDLE:
 805	case SCI_DEV_FAILED:
 806	case SCI_DEV_FINAL:
 807	default:
 808		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 809			 __func__, dev_state_name(state));
 810		return SCI_FAILURE_INVALID_STATE;
 811	case SCI_DEV_READY:
 812	case SCI_STP_DEV_AWAIT_RESET:
 813	case SCI_DEV_RESETTING:
 814		status = common_complete_io(iport, idev, ireq);
 815		break;
 816	case SCI_STP_DEV_CMD:
 817	case SCI_STP_DEV_NCQ:
 818	case SCI_STP_DEV_NCQ_ERROR:
 819	case SCI_STP_DEV_ATAPI_ERROR:
 820		status = common_complete_io(iport, idev, ireq);
 821		if (status != SCI_SUCCESS)
 822			break;
 823
 824		if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
 825			/* This request causes hardware error, device needs to be Lun Reset.
 826			 * So here we force the state machine to IDLE state so the rest IOs
 827			 * can reach RNC state handler, these IOs will be completed by RNC with
 828			 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
 829			 */
 830			sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
 831		} else if (idev->started_request_count == 0)
 832			sci_change_state(sm, SCI_STP_DEV_IDLE);
 833		break;
 834	case SCI_SMP_DEV_CMD:
 835		status = common_complete_io(iport, idev, ireq);
 836		if (status != SCI_SUCCESS)
 837			break;
 838		sci_change_state(sm, SCI_SMP_DEV_IDLE);
 839		break;
 840	case SCI_DEV_STOPPING:
 841		status = common_complete_io(iport, idev, ireq);
 842		if (status != SCI_SUCCESS)
 843			break;
 844
 845		if (idev->started_request_count == 0)
 846			sci_remote_node_context_destruct(&idev->rnc,
 847							 rnc_destruct_done,
 848							 idev);
 849		break;
 850	}
 851
 852	if (status != SCI_SUCCESS)
 853		dev_err(scirdev_to_dev(idev),
 854			"%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
 855			"could not complete\n", __func__, iport,
 856			idev, ireq, status);
 857	else
 858		isci_put_device(idev);
 859
 860	return status;
 861}
 862
 863static void sci_remote_device_continue_request(void *dev)
 864{
 865	struct isci_remote_device *idev = dev;
 866
 867	/* we need to check if this request is still valid to continue. */
 868	if (idev->working_request)
 869		sci_controller_continue_io(idev->working_request);
 870}
 871
 872enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
 873						  struct isci_remote_device *idev,
 874						  struct isci_request *ireq)
 875{
 876	struct sci_base_state_machine *sm = &idev->sm;
 877	enum sci_remote_device_states state = sm->current_state_id;
 878	struct isci_port *iport = idev->owning_port;
 879	enum sci_status status;
 880
 881	switch (state) {
 882	case SCI_DEV_INITIAL:
 883	case SCI_DEV_STOPPED:
 884	case SCI_DEV_STARTING:
 885	case SCI_SMP_DEV_IDLE:
 886	case SCI_SMP_DEV_CMD:
 887	case SCI_DEV_STOPPING:
 888	case SCI_DEV_FAILED:
 889	case SCI_DEV_RESETTING:
 890	case SCI_DEV_FINAL:
 891	default:
 892		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 893			 __func__, dev_state_name(state));
 894		return SCI_FAILURE_INVALID_STATE;
 895	case SCI_STP_DEV_IDLE:
 896	case SCI_STP_DEV_CMD:
 897	case SCI_STP_DEV_NCQ:
 898	case SCI_STP_DEV_NCQ_ERROR:
 899	case SCI_STP_DEV_AWAIT_RESET:
 900		status = sci_port_start_io(iport, idev, ireq);
 901		if (status != SCI_SUCCESS)
 902			return status;
 903
 904		status = sci_request_start(ireq);
 905		if (status != SCI_SUCCESS)
 906			goto out;
 907
 908		/* Note: If the remote device state is not IDLE this will
 909		 * replace the request that probably resulted in the task
 910		 * management request.
 911		 */
 912		idev->working_request = ireq;
 913		sci_change_state(sm, SCI_STP_DEV_CMD);
 914
 915		/* The remote node context must cleanup the TCi to NCQ mapping
 916		 * table.  The only way to do this correctly is to either write
 917		 * to the TLCR register or to invalidate and repost the RNC. In
 918		 * either case the remote node context state machine will take
 919		 * the correct action when the remote node context is suspended
 920		 * and later resumed.
 921		 */
 922		sci_remote_device_suspend(idev,
 923					  SCI_SW_SUSPEND_LINKHANG_DETECT);
 924
 925		status = sci_remote_node_context_start_task(&idev->rnc, ireq,
 926				sci_remote_device_continue_request, idev);
 927
 928	out:
 929		sci_remote_device_start_request(idev, ireq, status);
 930		/* We need to let the controller start request handler know that
 931		 * it can't post TC yet. We will provide a callback function to
 932		 * post TC when RNC gets resumed.
 933		 */
 934		return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
 935	case SCI_DEV_READY:
 936		status = sci_port_start_io(iport, idev, ireq);
 937		if (status != SCI_SUCCESS)
 938			return status;
 939
 940		/* Resume the RNC as needed: */
 941		status = sci_remote_node_context_start_task(&idev->rnc, ireq,
 942							    NULL, NULL);
 943		if (status != SCI_SUCCESS)
 944			break;
 945
 946		status = sci_request_start(ireq);
 947		break;
 948	}
 949	sci_remote_device_start_request(idev, ireq, status);
 950
 951	return status;
 952}
 953
 954void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
 955{
 956	struct isci_port *iport = idev->owning_port;
 957	u32 context;
 958
 959	context = request |
 960		  (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
 961		  (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
 962		  idev->rnc.remote_node_index;
 963
 964	sci_controller_post_request(iport->owning_controller, context);
 965}
 966
 967/* called once the remote node context has transisitioned to a
 968 * ready state.  This is the indication that the remote device object can also
 969 * transition to ready.
 970 */
 971static void remote_device_resume_done(void *_dev)
 972{
 973	struct isci_remote_device *idev = _dev;
 974
 975	if (is_remote_device_ready(idev))
 976		return;
 977
 978	/* go 'ready' if we are not already in a ready state */
 979	sci_change_state(&idev->sm, SCI_DEV_READY);
 980}
 981
 982static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
 983{
 984	struct isci_remote_device *idev = _dev;
 985	struct isci_host *ihost = idev->owning_port->owning_controller;
 986
 987	/* For NCQ operation we do not issue a isci_remote_device_not_ready().
 988	 * As a result, avoid sending the ready notification.
 989	 */
 990	if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
 991		isci_remote_device_ready(ihost, idev);
 992}
 993
 994static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
 995{
 996	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
 997
 998	/* Initial state is a transitional state to the stopped state */
 999	sci_change_state(&idev->sm, SCI_DEV_STOPPED);
1000}
1001
1002/**
1003 * sci_remote_device_destruct() - free remote node context and destruct
1004 * @idev: This parameter specifies the remote device to be destructed.
1005 *
1006 * Remote device objects are a limited resource.  As such, they must be
1007 * protected.  Thus calls to construct and destruct are mutually exclusive and
1008 * non-reentrant. The return value shall indicate if the device was
1009 * successfully destructed or if some failure occurred. enum sci_status This value
1010 * is returned if the device is successfully destructed.
1011 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
1012 * device isn't valid (e.g. it's already been destoryed, the handle isn't
1013 * valid, etc.).
1014 */
1015static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
1016{
1017	struct sci_base_state_machine *sm = &idev->sm;
1018	enum sci_remote_device_states state = sm->current_state_id;
1019	struct isci_host *ihost;
1020
1021	if (state != SCI_DEV_STOPPED) {
1022		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1023			 __func__, dev_state_name(state));
1024		return SCI_FAILURE_INVALID_STATE;
1025	}
1026
1027	ihost = idev->owning_port->owning_controller;
1028	sci_controller_free_remote_node_context(ihost, idev,
1029						     idev->rnc.remote_node_index);
1030	idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
1031	sci_change_state(sm, SCI_DEV_FINAL);
1032
1033	return SCI_SUCCESS;
1034}
1035
1036/**
1037 * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
1038 * @ihost: This parameter specifies the isci host object.
1039 * @idev: This parameter specifies the remote device to be freed.
1040 *
1041 */
1042static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
1043{
1044	dev_dbg(&ihost->pdev->dev,
1045		"%s: isci_device = %p\n", __func__, idev);
1046
1047	/* There should not be any outstanding io's. All paths to
1048	 * here should go through isci_remote_device_nuke_requests.
1049	 * If we hit this condition, we will need a way to complete
1050	 * io requests in process */
1051	BUG_ON(idev->started_request_count > 0);
1052
1053	sci_remote_device_destruct(idev);
1054	list_del_init(&idev->node);
1055	isci_put_device(idev);
1056}
1057
1058static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
1059{
1060	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1061	struct isci_host *ihost = idev->owning_port->owning_controller;
1062	u32 prev_state;
1063
1064	/* If we are entering from the stopping state let the SCI User know that
1065	 * the stop operation has completed.
1066	 */
1067	prev_state = idev->sm.previous_state_id;
1068	if (prev_state == SCI_DEV_STOPPING)
1069		isci_remote_device_deconstruct(ihost, idev);
1070
1071	sci_controller_remote_device_stopped(ihost, idev);
1072}
1073
1074static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
1075{
1076	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1077	struct isci_host *ihost = idev->owning_port->owning_controller;
1078
1079	isci_remote_device_not_ready(ihost, idev,
1080				     SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
1081}
1082
1083static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
1084{
1085	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1086	struct isci_host *ihost = idev->owning_port->owning_controller;
1087	struct domain_device *dev = idev->domain_dev;
1088
1089	if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
1090		sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
1091	} else if (dev_is_expander(dev->dev_type)) {
1092		sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
1093	} else
1094		isci_remote_device_ready(ihost, idev);
1095}
1096
1097static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
1098{
1099	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1100	struct domain_device *dev = idev->domain_dev;
1101
1102	if (dev->dev_type == SAS_END_DEVICE) {
1103		struct isci_host *ihost = idev->owning_port->owning_controller;
1104
1105		isci_remote_device_not_ready(ihost, idev,
1106					     SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
1107	}
1108}
1109
1110static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
1111{
1112	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1113	struct isci_host *ihost = idev->owning_port->owning_controller;
1114
1115	dev_dbg(&ihost->pdev->dev,
1116		"%s: isci_device = %p\n", __func__, idev);
1117
1118	sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1119}
1120
1121static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
1122{
1123	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1124	struct isci_host *ihost = idev->owning_port->owning_controller;
1125
1126	dev_dbg(&ihost->pdev->dev,
1127		"%s: isci_device = %p\n", __func__, idev);
1128
1129	sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
1130}
1131
1132static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1133{
1134	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1135
1136	idev->working_request = NULL;
1137	if (sci_remote_node_context_is_ready(&idev->rnc)) {
1138		/*
1139		 * Since the RNC is ready, it's alright to finish completion
1140		 * processing (e.g. signal the remote device is ready). */
1141		sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
1142	} else {
1143		sci_remote_node_context_resume(&idev->rnc,
1144			sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
1145			idev);
1146	}
1147}
1148
1149static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1150{
1151	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1152	struct isci_host *ihost = idev->owning_port->owning_controller;
1153
1154	BUG_ON(idev->working_request == NULL);
1155
1156	isci_remote_device_not_ready(ihost, idev,
1157				     SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
1158}
1159
1160static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
1161{
1162	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1163	struct isci_host *ihost = idev->owning_port->owning_controller;
1164
1165	if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
1166		isci_remote_device_not_ready(ihost, idev,
1167					     idev->not_ready_reason);
1168}
1169
1170static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1171{
1172	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1173	struct isci_host *ihost = idev->owning_port->owning_controller;
1174
1175	isci_remote_device_ready(ihost, idev);
1176}
1177
1178static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1179{
1180	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1181	struct isci_host *ihost = idev->owning_port->owning_controller;
1182
1183	BUG_ON(idev->working_request == NULL);
1184
1185	isci_remote_device_not_ready(ihost, idev,
1186				     SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
1187}
1188
1189static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
1190{
1191	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1192
1193	idev->working_request = NULL;
1194}
1195
1196static const struct sci_base_state sci_remote_device_state_table[] = {
1197	[SCI_DEV_INITIAL] = {
1198		.enter_state = sci_remote_device_initial_state_enter,
1199	},
1200	[SCI_DEV_STOPPED] = {
1201		.enter_state = sci_remote_device_stopped_state_enter,
1202	},
1203	[SCI_DEV_STARTING] = {
1204		.enter_state = sci_remote_device_starting_state_enter,
1205	},
1206	[SCI_DEV_READY] = {
1207		.enter_state = sci_remote_device_ready_state_enter,
1208		.exit_state  = sci_remote_device_ready_state_exit
1209	},
1210	[SCI_STP_DEV_IDLE] = {
1211		.enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1212	},
1213	[SCI_STP_DEV_CMD] = {
1214		.enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1215	},
1216	[SCI_STP_DEV_NCQ] = { },
1217	[SCI_STP_DEV_NCQ_ERROR] = {
1218		.enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1219	},
1220	[SCI_STP_DEV_ATAPI_ERROR] = { },
1221	[SCI_STP_DEV_AWAIT_RESET] = { },
1222	[SCI_SMP_DEV_IDLE] = {
1223		.enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1224	},
1225	[SCI_SMP_DEV_CMD] = {
1226		.enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1227		.exit_state  = sci_smp_remote_device_ready_cmd_substate_exit,
1228	},
1229	[SCI_DEV_STOPPING] = { },
1230	[SCI_DEV_FAILED] = { },
1231	[SCI_DEV_RESETTING] = {
1232		.enter_state = sci_remote_device_resetting_state_enter,
1233		.exit_state  = sci_remote_device_resetting_state_exit
1234	},
1235	[SCI_DEV_FINAL] = { },
1236};
1237
1238/**
1239 * sci_remote_device_construct() - common construction
1240 * @iport: SAS/SATA port through which this device is accessed.
1241 * @idev: remote device to construct
1242 *
1243 * This routine just performs benign initialization and does not
1244 * allocate the remote_node_context which is left to
1245 * sci_remote_device_[de]a_construct().  sci_remote_device_destruct()
1246 * frees the remote_node_context(s) for the device.
1247 */
1248static void sci_remote_device_construct(struct isci_port *iport,
1249				  struct isci_remote_device *idev)
1250{
1251	idev->owning_port = iport;
1252	idev->started_request_count = 0;
1253
1254	sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1255
1256	sci_remote_node_context_construct(&idev->rnc,
1257					       SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
1258}
1259
1260/*
1261 * sci_remote_device_da_construct() - construct direct attached device.
1262 *
1263 * The information (e.g. IAF, Signature FIS, etc.) necessary to build
1264 * the device is known to the SCI Core since it is contained in the
1265 * sci_phy object.  Remote node context(s) is/are a global resource
1266 * allocated by this routine, freed by sci_remote_device_destruct().
1267 *
1268 * Returns:
1269 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1270 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1271 * sata-only controller instance.
1272 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1273 */
1274static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1275						       struct isci_remote_device *idev)
1276{
1277	enum sci_status status;
1278	struct sci_port_properties properties;
1279
1280	sci_remote_device_construct(iport, idev);
1281
1282	sci_port_get_properties(iport, &properties);
1283	/* Get accurate port width from port's phy mask for a DA device. */
1284	idev->device_port_width = hweight32(properties.phy_mask);
1285
1286	status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1287							     idev,
1288							     &idev->rnc.remote_node_index);
1289
1290	if (status != SCI_SUCCESS)
1291		return status;
1292
1293	idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1294
1295	return SCI_SUCCESS;
1296}
1297
1298/*
1299 * sci_remote_device_ea_construct() - construct expander attached device
1300 *
1301 * Remote node context(s) is/are a global resource allocated by this
1302 * routine, freed by sci_remote_device_destruct().
1303 *
1304 * Returns:
1305 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1306 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1307 * sata-only controller instance.
1308 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1309 */
1310static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1311						       struct isci_remote_device *idev)
1312{
1313	struct domain_device *dev = idev->domain_dev;
1314	enum sci_status status;
1315
1316	sci_remote_device_construct(iport, idev);
1317
1318	status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1319								  idev,
1320								  &idev->rnc.remote_node_index);
1321	if (status != SCI_SUCCESS)
1322		return status;
1323
1324	/* For SAS-2 the physical link rate is actually a logical link
1325	 * rate that incorporates multiplexing.  The SCU doesn't
1326	 * incorporate multiplexing and for the purposes of the
1327	 * connection the logical link rate is that same as the
1328	 * physical.  Furthermore, the SAS-2 and SAS-1.1 fields overlay
1329	 * one another, so this code works for both situations.
1330	 */
1331	idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1332					 dev->linkrate);
1333
1334	/* / @todo Should I assign the port width by reading all of the phys on the port? */
1335	idev->device_port_width = 1;
1336
1337	return SCI_SUCCESS;
1338}
1339
1340enum sci_status sci_remote_device_resume(
1341	struct isci_remote_device *idev,
1342	scics_sds_remote_node_context_callback cb_fn,
1343	void *cb_p)
1344{
1345	enum sci_status status;
1346
1347	status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
1348	if (status != SCI_SUCCESS)
1349		dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
1350			__func__, status);
1351	return status;
1352}
1353
1354static void isci_remote_device_resume_from_abort_complete(void *cbparam)
1355{
1356	struct isci_remote_device *idev = cbparam;
1357	struct isci_host *ihost = idev->owning_port->owning_controller;
1358	scics_sds_remote_node_context_callback abort_resume_cb =
1359		idev->abort_resume_cb;
1360
1361	dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
1362		__func__, abort_resume_cb);
1363
1364	if (abort_resume_cb != NULL) {
1365		idev->abort_resume_cb = NULL;
1366		abort_resume_cb(idev->abort_resume_cbparam);
1367	}
1368	clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1369	wake_up(&ihost->eventq);
1370}
1371
1372static bool isci_remote_device_test_resume_done(
1373	struct isci_host *ihost,
1374	struct isci_remote_device *idev)
1375{
1376	unsigned long flags;
1377	bool done;
1378
1379	spin_lock_irqsave(&ihost->scic_lock, flags);
1380	done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
1381		|| test_bit(IDEV_STOP_PENDING, &idev->flags)
1382		|| sci_remote_node_context_is_being_destroyed(&idev->rnc);
1383	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1384
1385	return done;
1386}
1387
1388static void isci_remote_device_wait_for_resume_from_abort(
1389	struct isci_host *ihost,
1390	struct isci_remote_device *idev)
1391{
1392	dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
1393		 __func__, idev);
1394
1395	#define MAX_RESUME_MSECS 10000
1396	if (!wait_event_timeout(ihost->eventq,
1397				isci_remote_device_test_resume_done(ihost, idev),
1398				msecs_to_jiffies(MAX_RESUME_MSECS))) {
1399
1400		dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
1401			 "resume: %p\n", __func__, idev);
1402	}
1403	clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1404
1405	dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
1406		 __func__, idev);
1407}
1408
1409enum sci_status isci_remote_device_resume_from_abort(
1410	struct isci_host *ihost,
1411	struct isci_remote_device *idev)
1412{
1413	unsigned long flags;
1414	enum sci_status status = SCI_SUCCESS;
1415	int destroyed;
1416
1417	spin_lock_irqsave(&ihost->scic_lock, flags);
1418	/* Preserve any current resume callbacks, for instance from other
1419	 * resumptions.
1420	 */
1421	idev->abort_resume_cb = idev->rnc.user_callback;
1422	idev->abort_resume_cbparam = idev->rnc.user_cookie;
1423	set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1424	clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1425	destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
1426	if (!destroyed)
1427		status = sci_remote_device_resume(
1428			idev, isci_remote_device_resume_from_abort_complete,
1429			idev);
1430	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1431	if (!destroyed && (status == SCI_SUCCESS))
1432		isci_remote_device_wait_for_resume_from_abort(ihost, idev);
1433	else
1434		clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1435
1436	return status;
1437}
1438
1439/**
1440 * sci_remote_device_start() - This method will start the supplied remote
1441 *    device.  This method enables normal IO requests to flow through to the
1442 *    remote device.
1443 * @idev: This parameter specifies the device to be started.
1444 * @timeout: This parameter specifies the number of milliseconds in which the
1445 *    start operation should complete.
1446 *
1447 * An indication of whether the device was successfully started. SCI_SUCCESS
1448 * This value is returned if the device was successfully started.
1449 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
1450 * the device when there have been no phys added to it.
1451 */
1452static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1453					       u32 timeout)
1454{
1455	struct sci_base_state_machine *sm = &idev->sm;
1456	enum sci_remote_device_states state = sm->current_state_id;
1457	enum sci_status status;
1458
1459	if (state != SCI_DEV_STOPPED) {
1460		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1461			 __func__, dev_state_name(state));
1462		return SCI_FAILURE_INVALID_STATE;
1463	}
1464
1465	status = sci_remote_device_resume(idev, remote_device_resume_done,
1466					  idev);
1467	if (status != SCI_SUCCESS)
1468		return status;
1469
1470	sci_change_state(sm, SCI_DEV_STARTING);
1471
1472	return SCI_SUCCESS;
1473}
1474
1475static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1476						    struct isci_remote_device *idev)
1477{
1478	struct isci_host *ihost = iport->isci_host;
1479	struct domain_device *dev = idev->domain_dev;
1480	enum sci_status status;
1481
1482	if (dev->parent && dev_is_expander(dev->parent->dev_type))
1483		status = sci_remote_device_ea_construct(iport, idev);
1484	else
1485		status = sci_remote_device_da_construct(iport, idev);
1486
1487	if (status != SCI_SUCCESS) {
1488		dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1489			__func__, status);
1490
1491		return status;
1492	}
1493
1494	/* start the device. */
1495	status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1496
1497	if (status != SCI_SUCCESS)
1498		dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1499			 status);
1500
1501	return status;
1502}
1503
1504/**
1505 * isci_remote_device_alloc()
1506 * This function builds the isci_remote_device when a libsas dev_found message
1507 *    is received.
1508 * @ihost: This parameter specifies the isci host object.
1509 * @iport: This parameter specifies the isci_port connected to this device.
1510 *
1511 * pointer to new isci_remote_device.
1512 */
1513static struct isci_remote_device *
1514isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1515{
1516	struct isci_remote_device *idev;
1517	int i;
1518
1519	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1520		idev = &ihost->devices[i];
1521		if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1522			break;
1523	}
1524
1525	if (i >= SCI_MAX_REMOTE_DEVICES) {
1526		dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1527		return NULL;
1528	}
1529	if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1530		return NULL;
1531
1532	return idev;
1533}
1534
1535void isci_remote_device_release(struct kref *kref)
1536{
1537	struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1538	struct isci_host *ihost = idev->isci_port->isci_host;
1539
1540	idev->domain_dev = NULL;
1541	idev->isci_port = NULL;
1542	clear_bit(IDEV_START_PENDING, &idev->flags);
1543	clear_bit(IDEV_STOP_PENDING, &idev->flags);
1544	clear_bit(IDEV_IO_READY, &idev->flags);
1545	clear_bit(IDEV_GONE, &idev->flags);
1546	smp_mb__before_atomic();
1547	clear_bit(IDEV_ALLOCATED, &idev->flags);
1548	wake_up(&ihost->eventq);
1549}
1550
1551/**
1552 * isci_remote_device_stop() - This function is called internally to stop the
1553 *    remote device.
1554 * @ihost: This parameter specifies the isci host object.
1555 * @idev: This parameter specifies the remote device.
1556 *
1557 * The status of the ihost request to stop.
1558 */
1559enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
1560{
1561	enum sci_status status;
1562	unsigned long flags;
1563
1564	dev_dbg(&ihost->pdev->dev,
1565		"%s: isci_device = %p\n", __func__, idev);
1566
1567	spin_lock_irqsave(&ihost->scic_lock, flags);
1568	idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1569	set_bit(IDEV_GONE, &idev->flags);
1570
1571	set_bit(IDEV_STOP_PENDING, &idev->flags);
1572	status = sci_remote_device_stop(idev, 50);
1573	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1574
1575	/* Wait for the stop complete callback. */
1576	if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1577		/* nothing to wait for */;
1578	else
1579		wait_for_device_stop(ihost, idev);
1580
1581	dev_dbg(&ihost->pdev->dev,
1582		"%s: isci_device = %p, waiting done.\n", __func__, idev);
1583
1584	return status;
1585}
1586
1587/**
1588 * isci_remote_device_gone() - This function is called by libsas when a domain
1589 *    device is removed.
1590 * @dev: This parameter specifies the libsas domain device.
 
1591 */
1592void isci_remote_device_gone(struct domain_device *dev)
1593{
1594	struct isci_host *ihost = dev_to_ihost(dev);
1595	struct isci_remote_device *idev = dev->lldd_dev;
1596
1597	dev_dbg(&ihost->pdev->dev,
1598		"%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1599		__func__, dev, idev, idev->isci_port);
1600
1601	isci_remote_device_stop(ihost, idev);
1602}
1603
1604
1605/**
1606 * isci_remote_device_found() - This function is called by libsas when a remote
1607 *    device is discovered. A remote device object is created and started. the
1608 *    function then sleeps until the sci core device started message is
1609 *    received.
1610 * @dev: This parameter specifies the libsas domain device.
1611 *
1612 * status, zero indicates success.
1613 */
1614int isci_remote_device_found(struct domain_device *dev)
1615{
1616	struct isci_host *isci_host = dev_to_ihost(dev);
1617	struct isci_port *isci_port = dev->port->lldd_port;
1618	struct isci_remote_device *isci_device;
1619	enum sci_status status;
1620
1621	dev_dbg(&isci_host->pdev->dev,
1622		"%s: domain_device = %p\n", __func__, dev);
1623
1624	if (!isci_port)
1625		return -ENODEV;
1626
1627	isci_device = isci_remote_device_alloc(isci_host, isci_port);
1628	if (!isci_device)
1629		return -ENODEV;
1630
1631	kref_init(&isci_device->kref);
1632	INIT_LIST_HEAD(&isci_device->node);
1633
1634	spin_lock_irq(&isci_host->scic_lock);
1635	isci_device->domain_dev = dev;
1636	isci_device->isci_port = isci_port;
1637	list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1638
1639	set_bit(IDEV_START_PENDING, &isci_device->flags);
1640	status = isci_remote_device_construct(isci_port, isci_device);
1641
1642	dev_dbg(&isci_host->pdev->dev,
1643		"%s: isci_device = %p\n",
1644		__func__, isci_device);
1645
1646	if (status == SCI_SUCCESS) {
1647		/* device came up, advertise it to the world */
1648		dev->lldd_dev = isci_device;
1649	} else
1650		isci_put_device(isci_device);
1651	spin_unlock_irq(&isci_host->scic_lock);
1652
1653	/* wait for the device ready callback. */
1654	wait_for_device_start(isci_host, isci_device);
1655
1656	return status == SCI_SUCCESS ? 0 : -ENODEV;
1657}
1658
1659enum sci_status isci_remote_device_suspend_terminate(
1660	struct isci_host *ihost,
1661	struct isci_remote_device *idev,
1662	struct isci_request *ireq)
1663{
1664	unsigned long flags;
1665	enum sci_status status;
1666
1667	/* Put the device into suspension. */
1668	spin_lock_irqsave(&ihost->scic_lock, flags);
1669	set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1670	sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1671	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1672
1673	/* Terminate and wait for the completions. */
1674	status = isci_remote_device_terminate_requests(ihost, idev, ireq);
1675	if (status != SCI_SUCCESS)
1676		dev_dbg(&ihost->pdev->dev,
1677			"%s: isci_remote_device_terminate_requests(%p) "
1678				"returned %d!\n",
1679			__func__, idev, status);
1680
1681	/* NOTE: RNC resumption is left to the caller! */
1682	return status;
1683}
1684
1685int isci_remote_device_is_safe_to_abort(
1686	struct isci_remote_device *idev)
1687{
1688	return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
1689}
1690
1691enum sci_status sci_remote_device_abort_requests_pending_abort(
1692	struct isci_remote_device *idev)
1693{
1694	return sci_remote_device_terminate_reqs_checkabort(idev, 1);
1695}
1696
1697enum sci_status isci_remote_device_reset_complete(
1698	struct isci_host *ihost,
1699	struct isci_remote_device *idev)
1700{
1701	unsigned long flags;
1702	enum sci_status status;
1703
1704	spin_lock_irqsave(&ihost->scic_lock, flags);
1705	status = sci_remote_device_reset_complete(idev);
1706	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1707
1708	return status;
1709}
1710
1711void isci_dev_set_hang_detection_timeout(
1712	struct isci_remote_device *idev,
1713	u32 timeout)
1714{
1715	if (dev_is_sata(idev->domain_dev)) {
1716		if (timeout) {
1717			if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
1718					     &idev->flags))
1719				return;  /* Already enabled. */
1720		} else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
1721					       &idev->flags))
1722			return;  /* Not enabled. */
1723
1724		sci_port_set_hang_detection_timeout(idev->owning_port,
1725						    timeout);
1726	}
1727}
v5.4
   1/*
   2 * This file is provided under a dual BSD/GPLv2 license.  When using or
   3 * redistributing this file, you may do so under either license.
   4 *
   5 * GPL LICENSE SUMMARY
   6 *
   7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21 * The full GNU General Public License is included in this distribution
  22 * in the file called LICENSE.GPL.
  23 *
  24 * BSD LICENSE
  25 *
  26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27 * All rights reserved.
  28 *
  29 * Redistribution and use in source and binary forms, with or without
  30 * modification, are permitted provided that the following conditions
  31 * are met:
  32 *
  33 *   * Redistributions of source code must retain the above copyright
  34 *     notice, this list of conditions and the following disclaimer.
  35 *   * Redistributions in binary form must reproduce the above copyright
  36 *     notice, this list of conditions and the following disclaimer in
  37 *     the documentation and/or other materials provided with the
  38 *     distribution.
  39 *   * Neither the name of Intel Corporation nor the names of its
  40 *     contributors may be used to endorse or promote products derived
  41 *     from this software without specific prior written permission.
  42 *
  43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54 */
  55#include <scsi/sas.h>
  56#include <linux/bitops.h>
  57#include "isci.h"
  58#include "port.h"
  59#include "remote_device.h"
  60#include "request.h"
  61#include "remote_node_context.h"
  62#include "scu_event_codes.h"
  63#include "task.h"
  64
  65#undef C
  66#define C(a) (#a)
  67const char *dev_state_name(enum sci_remote_device_states state)
  68{
  69	static const char * const strings[] = REMOTE_DEV_STATES;
  70
  71	return strings[state];
  72}
  73#undef C
  74
  75enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
  76					  enum sci_remote_node_suspension_reasons reason)
  77{
  78	return sci_remote_node_context_suspend(&idev->rnc, reason,
  79					       SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
  80}
  81
  82/**
  83 * isci_remote_device_ready() - This function is called by the ihost when the
  84 *    remote device is ready. We mark the isci device as ready and signal the
  85 *    waiting proccess.
  86 * @ihost: our valid isci_host
  87 * @idev: remote device
  88 *
  89 */
  90static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
  91{
  92	dev_dbg(&ihost->pdev->dev,
  93		"%s: idev = %p\n", __func__, idev);
  94
  95	clear_bit(IDEV_IO_NCQERROR, &idev->flags);
  96	set_bit(IDEV_IO_READY, &idev->flags);
  97	if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
  98		wake_up(&ihost->eventq);
  99}
 100
 101static enum sci_status sci_remote_device_terminate_req(
 102	struct isci_host *ihost,
 103	struct isci_remote_device *idev,
 104	int check_abort,
 105	struct isci_request *ireq)
 106{
 107	if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
 108	    (ireq->target_device != idev) ||
 109	    (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
 110		return SCI_SUCCESS;
 111
 112	dev_dbg(&ihost->pdev->dev,
 113		"%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
 114		__func__, idev, idev->flags, ireq, ireq->target_device);
 115
 116	set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
 117
 118	return sci_controller_terminate_request(ihost, idev, ireq);
 119}
 120
 121static enum sci_status sci_remote_device_terminate_reqs_checkabort(
 122	struct isci_remote_device *idev,
 123	int chk)
 124{
 125	struct isci_host *ihost = idev->owning_port->owning_controller;
 126	enum sci_status status  = SCI_SUCCESS;
 127	u32 i;
 128
 129	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
 130		struct isci_request *ireq = ihost->reqs[i];
 131		enum sci_status s;
 132
 133		s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
 134		if (s != SCI_SUCCESS)
 135			status = s;
 136	}
 137	return status;
 138}
 139
 140static bool isci_compare_suspendcount(
 141	struct isci_remote_device *idev,
 142	u32 localcount)
 143{
 144	smp_rmb();
 145
 146	/* Check for a change in the suspend count, or the RNC
 147	 * being destroyed.
 148	 */
 149	return (localcount != idev->rnc.suspend_count)
 150	    || sci_remote_node_context_is_being_destroyed(&idev->rnc);
 151}
 152
 153static bool isci_check_reqterm(
 154	struct isci_host *ihost,
 155	struct isci_remote_device *idev,
 156	struct isci_request *ireq,
 157	u32 localcount)
 158{
 159	unsigned long flags;
 160	bool res;
 161
 162	spin_lock_irqsave(&ihost->scic_lock, flags);
 163	res = isci_compare_suspendcount(idev, localcount)
 164		&& !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
 165	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 166
 167	return res;
 168}
 169
 170static bool isci_check_devempty(
 171	struct isci_host *ihost,
 172	struct isci_remote_device *idev,
 173	u32 localcount)
 174{
 175	unsigned long flags;
 176	bool res;
 177
 178	spin_lock_irqsave(&ihost->scic_lock, flags);
 179	res = isci_compare_suspendcount(idev, localcount)
 180		&& idev->started_request_count == 0;
 181	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 182
 183	return res;
 184}
 185
 186enum sci_status isci_remote_device_terminate_requests(
 187	struct isci_host *ihost,
 188	struct isci_remote_device *idev,
 189	struct isci_request *ireq)
 190{
 191	enum sci_status status = SCI_SUCCESS;
 192	unsigned long flags;
 193	u32 rnc_suspend_count;
 194
 195	spin_lock_irqsave(&ihost->scic_lock, flags);
 196
 197	if (isci_get_device(idev) == NULL) {
 198		dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
 199			__func__, idev);
 200		spin_unlock_irqrestore(&ihost->scic_lock, flags);
 201		status = SCI_FAILURE;
 202	} else {
 203		/* If already suspended, don't wait for another suspension. */
 204		smp_rmb();
 205		rnc_suspend_count
 206			= sci_remote_node_context_is_suspended(&idev->rnc)
 207				? 0 : idev->rnc.suspend_count;
 208
 209		dev_dbg(&ihost->pdev->dev,
 210			"%s: idev=%p, ireq=%p; started_request_count=%d, "
 211				"rnc_suspend_count=%d, rnc.suspend_count=%d"
 212				"about to wait\n",
 213			__func__, idev, ireq, idev->started_request_count,
 214			rnc_suspend_count, idev->rnc.suspend_count);
 215
 216		#define MAX_SUSPEND_MSECS 10000
 217		if (ireq) {
 218			/* Terminate a specific TC. */
 219			set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
 220			sci_remote_device_terminate_req(ihost, idev, 0, ireq);
 221			spin_unlock_irqrestore(&ihost->scic_lock, flags);
 222			if (!wait_event_timeout(ihost->eventq,
 223						isci_check_reqterm(ihost, idev, ireq,
 224								   rnc_suspend_count),
 225						msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
 226
 227				dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
 228					 __func__, ihost->id);
 229				dev_dbg(&ihost->pdev->dev,
 230					 "%s: ******* Timeout waiting for "
 231					 "suspend; idev=%p, current state %s; "
 232					 "started_request_count=%d, flags=%lx\n\t"
 233					 "rnc_suspend_count=%d, rnc.suspend_count=%d "
 234					 "RNC: current state %s, current "
 235					 "suspend_type %x dest state %d;\n"
 236					 "ireq=%p, ireq->flags = %lx\n",
 237					 __func__, idev,
 238					 dev_state_name(idev->sm.current_state_id),
 239					 idev->started_request_count, idev->flags,
 240					 rnc_suspend_count, idev->rnc.suspend_count,
 241					 rnc_state_name(idev->rnc.sm.current_state_id),
 242					 idev->rnc.suspend_type,
 243					 idev->rnc.destination_state,
 244					 ireq, ireq->flags);
 245			}
 246			spin_lock_irqsave(&ihost->scic_lock, flags);
 247			clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
 248			if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
 249				isci_free_tag(ihost, ireq->io_tag);
 250			spin_unlock_irqrestore(&ihost->scic_lock, flags);
 251		} else {
 252			/* Terminate all TCs. */
 253			sci_remote_device_terminate_requests(idev);
 254			spin_unlock_irqrestore(&ihost->scic_lock, flags);
 255			if (!wait_event_timeout(ihost->eventq,
 256						isci_check_devempty(ihost, idev,
 257								    rnc_suspend_count),
 258						msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
 259
 260				dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
 261					 __func__, ihost->id);
 262				dev_dbg(&ihost->pdev->dev,
 263					"%s: ******* Timeout waiting for "
 264					"suspend; idev=%p, current state %s; "
 265					"started_request_count=%d, flags=%lx\n\t"
 266					"rnc_suspend_count=%d, "
 267					"RNC: current state %s, "
 268					"rnc.suspend_count=%d, current "
 269					"suspend_type %x dest state %d\n",
 270					__func__, idev,
 271					dev_state_name(idev->sm.current_state_id),
 272					idev->started_request_count, idev->flags,
 273					rnc_suspend_count,
 274					rnc_state_name(idev->rnc.sm.current_state_id),
 275					idev->rnc.suspend_count,
 276					idev->rnc.suspend_type,
 277					idev->rnc.destination_state);
 278			}
 279		}
 280		dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
 281			__func__, idev);
 282		isci_put_device(idev);
 283	}
 284	return status;
 285}
 286
 287/**
 288* isci_remote_device_not_ready() - This function is called by the ihost when
 289*    the remote device is not ready. We mark the isci device as ready (not
 290*    "ready_for_io") and signal the waiting proccess.
 291* @isci_host: This parameter specifies the isci host object.
 292* @isci_device: This parameter specifies the remote device
 
 293*
 294* sci_lock is held on entrance to this function.
 295*/
 296static void isci_remote_device_not_ready(struct isci_host *ihost,
 297					 struct isci_remote_device *idev,
 298					 u32 reason)
 299{
 300	dev_dbg(&ihost->pdev->dev,
 301		"%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
 302
 303	switch (reason) {
 304	case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
 305		set_bit(IDEV_IO_NCQERROR, &idev->flags);
 306
 307		/* Suspend the remote device so the I/O can be terminated. */
 308		sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
 309
 310		/* Kill all outstanding requests for the device. */
 311		sci_remote_device_terminate_requests(idev);
 312
 313		/* Fall through - into the default case... */
 314	default:
 315		clear_bit(IDEV_IO_READY, &idev->flags);
 316		break;
 317	}
 318}
 319
 320/* called once the remote node context is ready to be freed.
 321 * The remote device can now report that its stop operation is complete. none
 322 */
 323static void rnc_destruct_done(void *_dev)
 324{
 325	struct isci_remote_device *idev = _dev;
 326
 327	BUG_ON(idev->started_request_count != 0);
 328	sci_change_state(&idev->sm, SCI_DEV_STOPPED);
 329}
 330
 331enum sci_status sci_remote_device_terminate_requests(
 332	struct isci_remote_device *idev)
 333{
 334	return sci_remote_device_terminate_reqs_checkabort(idev, 0);
 335}
 336
 337enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
 338					u32 timeout)
 339{
 340	struct sci_base_state_machine *sm = &idev->sm;
 341	enum sci_remote_device_states state = sm->current_state_id;
 342
 343	switch (state) {
 344	case SCI_DEV_INITIAL:
 345	case SCI_DEV_FAILED:
 346	case SCI_DEV_FINAL:
 347	default:
 348		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 349			 __func__, dev_state_name(state));
 350		return SCI_FAILURE_INVALID_STATE;
 351	case SCI_DEV_STOPPED:
 352		return SCI_SUCCESS;
 353	case SCI_DEV_STARTING:
 354		/* device not started so there had better be no requests */
 355		BUG_ON(idev->started_request_count != 0);
 356		sci_remote_node_context_destruct(&idev->rnc,
 357						      rnc_destruct_done, idev);
 358		/* Transition to the stopping state and wait for the
 359		 * remote node to complete being posted and invalidated.
 360		 */
 361		sci_change_state(sm, SCI_DEV_STOPPING);
 362		return SCI_SUCCESS;
 363	case SCI_DEV_READY:
 364	case SCI_STP_DEV_IDLE:
 365	case SCI_STP_DEV_CMD:
 366	case SCI_STP_DEV_NCQ:
 367	case SCI_STP_DEV_NCQ_ERROR:
 368	case SCI_STP_DEV_AWAIT_RESET:
 369	case SCI_SMP_DEV_IDLE:
 370	case SCI_SMP_DEV_CMD:
 371		sci_change_state(sm, SCI_DEV_STOPPING);
 372		if (idev->started_request_count == 0)
 373			sci_remote_node_context_destruct(&idev->rnc,
 374							 rnc_destruct_done,
 375							 idev);
 376		else {
 377			sci_remote_device_suspend(
 378				idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
 379			sci_remote_device_terminate_requests(idev);
 380		}
 381		return SCI_SUCCESS;
 382	case SCI_DEV_STOPPING:
 383		/* All requests should have been terminated, but if there is an
 384		 * attempt to stop a device already in the stopping state, then
 385		 * try again to terminate.
 386		 */
 387		return sci_remote_device_terminate_requests(idev);
 388	case SCI_DEV_RESETTING:
 389		sci_change_state(sm, SCI_DEV_STOPPING);
 390		return SCI_SUCCESS;
 391	}
 392}
 393
 394enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
 395{
 396	struct sci_base_state_machine *sm = &idev->sm;
 397	enum sci_remote_device_states state = sm->current_state_id;
 398
 399	switch (state) {
 400	case SCI_DEV_INITIAL:
 401	case SCI_DEV_STOPPED:
 402	case SCI_DEV_STARTING:
 403	case SCI_SMP_DEV_IDLE:
 404	case SCI_SMP_DEV_CMD:
 405	case SCI_DEV_STOPPING:
 406	case SCI_DEV_FAILED:
 407	case SCI_DEV_RESETTING:
 408	case SCI_DEV_FINAL:
 409	default:
 410		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 411			 __func__, dev_state_name(state));
 412		return SCI_FAILURE_INVALID_STATE;
 413	case SCI_DEV_READY:
 414	case SCI_STP_DEV_IDLE:
 415	case SCI_STP_DEV_CMD:
 416	case SCI_STP_DEV_NCQ:
 417	case SCI_STP_DEV_NCQ_ERROR:
 418	case SCI_STP_DEV_AWAIT_RESET:
 419		sci_change_state(sm, SCI_DEV_RESETTING);
 420		return SCI_SUCCESS;
 421	}
 422}
 423
 424enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
 425{
 426	struct sci_base_state_machine *sm = &idev->sm;
 427	enum sci_remote_device_states state = sm->current_state_id;
 428
 429	if (state != SCI_DEV_RESETTING) {
 430		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 431			 __func__, dev_state_name(state));
 432		return SCI_FAILURE_INVALID_STATE;
 433	}
 434
 435	sci_change_state(sm, SCI_DEV_READY);
 436	return SCI_SUCCESS;
 437}
 438
 439enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
 440						     u32 frame_index)
 441{
 442	struct sci_base_state_machine *sm = &idev->sm;
 443	enum sci_remote_device_states state = sm->current_state_id;
 444	struct isci_host *ihost = idev->owning_port->owning_controller;
 445	enum sci_status status;
 446
 447	switch (state) {
 448	case SCI_DEV_INITIAL:
 449	case SCI_DEV_STOPPED:
 450	case SCI_DEV_STARTING:
 451	case SCI_STP_DEV_IDLE:
 452	case SCI_SMP_DEV_IDLE:
 453	case SCI_DEV_FINAL:
 454	default:
 455		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 456			 __func__, dev_state_name(state));
 457		/* Return the frame back to the controller */
 458		sci_controller_release_frame(ihost, frame_index);
 459		return SCI_FAILURE_INVALID_STATE;
 460	case SCI_DEV_READY:
 461	case SCI_STP_DEV_NCQ_ERROR:
 462	case SCI_STP_DEV_AWAIT_RESET:
 463	case SCI_DEV_STOPPING:
 464	case SCI_DEV_FAILED:
 465	case SCI_DEV_RESETTING: {
 466		struct isci_request *ireq;
 467		struct ssp_frame_hdr hdr;
 468		void *frame_header;
 469		ssize_t word_cnt;
 470
 471		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
 472								       frame_index,
 473								       &frame_header);
 474		if (status != SCI_SUCCESS)
 475			return status;
 476
 477		word_cnt = sizeof(hdr) / sizeof(u32);
 478		sci_swab32_cpy(&hdr, frame_header, word_cnt);
 479
 480		ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
 481		if (ireq && ireq->target_device == idev) {
 482			/* The IO request is now in charge of releasing the frame */
 483			status = sci_io_request_frame_handler(ireq, frame_index);
 484		} else {
 485			/* We could not map this tag to a valid IO
 486			 * request Just toss the frame and continue
 487			 */
 488			sci_controller_release_frame(ihost, frame_index);
 489		}
 490		break;
 491	}
 492	case SCI_STP_DEV_NCQ: {
 493		struct dev_to_host_fis *hdr;
 494
 495		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
 496								       frame_index,
 497								       (void **)&hdr);
 498		if (status != SCI_SUCCESS)
 499			return status;
 500
 501		if (hdr->fis_type == FIS_SETDEVBITS &&
 502		    (hdr->status & ATA_ERR)) {
 503			idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
 504
 505			/* TODO Check sactive and complete associated IO if any. */
 506			sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
 507		} else if (hdr->fis_type == FIS_REGD2H &&
 508			   (hdr->status & ATA_ERR)) {
 509			/*
 510			 * Some devices return D2H FIS when an NCQ error is detected.
 511			 * Treat this like an SDB error FIS ready reason.
 512			 */
 513			idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
 514			sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
 515		} else
 516			status = SCI_FAILURE;
 517
 518		sci_controller_release_frame(ihost, frame_index);
 519		break;
 520	}
 521	case SCI_STP_DEV_CMD:
 522	case SCI_SMP_DEV_CMD:
 523		/* The device does not process any UF received from the hardware while
 524		 * in this state.  All unsolicited frames are forwarded to the io request
 525		 * object.
 526		 */
 527		status = sci_io_request_frame_handler(idev->working_request, frame_index);
 528		break;
 529	}
 530
 531	return status;
 532}
 533
 534static bool is_remote_device_ready(struct isci_remote_device *idev)
 535{
 536
 537	struct sci_base_state_machine *sm = &idev->sm;
 538	enum sci_remote_device_states state = sm->current_state_id;
 539
 540	switch (state) {
 541	case SCI_DEV_READY:
 542	case SCI_STP_DEV_IDLE:
 543	case SCI_STP_DEV_CMD:
 544	case SCI_STP_DEV_NCQ:
 545	case SCI_STP_DEV_NCQ_ERROR:
 546	case SCI_STP_DEV_AWAIT_RESET:
 547	case SCI_SMP_DEV_IDLE:
 548	case SCI_SMP_DEV_CMD:
 549		return true;
 550	default:
 551		return false;
 552	}
 553}
 554
 555/*
 556 * called once the remote node context has transisitioned to a ready
 557 * state (after suspending RX and/or TX due to early D2H fis)
 558 */
 559static void atapi_remote_device_resume_done(void *_dev)
 560{
 561	struct isci_remote_device *idev = _dev;
 562	struct isci_request *ireq = idev->working_request;
 563
 564	sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
 565}
 566
 567enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
 568						     u32 event_code)
 569{
 570	enum sci_status status;
 571	struct sci_base_state_machine *sm = &idev->sm;
 572	enum sci_remote_device_states state = sm->current_state_id;
 573
 574	switch (scu_get_event_type(event_code)) {
 575	case SCU_EVENT_TYPE_RNC_OPS_MISC:
 576	case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
 577	case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
 578		status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
 579		break;
 580	case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
 581		if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
 582			status = SCI_SUCCESS;
 583
 584			/* Suspend the associated RNC */
 585			sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
 586
 587			dev_dbg(scirdev_to_dev(idev),
 588				"%s: device: %p event code: %x: %s\n",
 589				__func__, idev, event_code,
 590				is_remote_device_ready(idev)
 591				? "I_T_Nexus_Timeout event"
 592				: "I_T_Nexus_Timeout event in wrong state");
 593
 594			break;
 595		}
 596		/* fall through - and treat as unhandled... */
 597	default:
 598		dev_dbg(scirdev_to_dev(idev),
 599			"%s: device: %p event code: %x: %s\n",
 600			__func__, idev, event_code,
 601			is_remote_device_ready(idev)
 602			? "unexpected event"
 603			: "unexpected event in wrong state");
 604		status = SCI_FAILURE_INVALID_STATE;
 605		break;
 606	}
 607
 608	if (status != SCI_SUCCESS)
 609		return status;
 610
 611	/* Decode device-specific states that may require an RNC resume during
 612	 * normal operation.  When the abort path is active, these resumes are
 613	 * managed when the abort path exits.
 614	 */
 615	if (state == SCI_STP_DEV_ATAPI_ERROR) {
 616		/* For ATAPI error state resume the RNC right away. */
 617		if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
 618		    scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
 619			return sci_remote_node_context_resume(&idev->rnc,
 620							      atapi_remote_device_resume_done,
 621							      idev);
 622		}
 623	}
 624
 625	if (state == SCI_STP_DEV_IDLE) {
 626
 627		/* We pick up suspension events to handle specifically to this
 628		 * state. We resume the RNC right away.
 629		 */
 630		if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
 631		    scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
 632			status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
 633	}
 634
 635	return status;
 636}
 637
 638static void sci_remote_device_start_request(struct isci_remote_device *idev,
 639						 struct isci_request *ireq,
 640						 enum sci_status status)
 641{
 642	struct isci_port *iport = idev->owning_port;
 643
 644	/* cleanup requests that failed after starting on the port */
 645	if (status != SCI_SUCCESS)
 646		sci_port_complete_io(iport, idev, ireq);
 647	else {
 648		kref_get(&idev->kref);
 649		idev->started_request_count++;
 650	}
 651}
 652
 653enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
 654						struct isci_remote_device *idev,
 655						struct isci_request *ireq)
 656{
 657	struct sci_base_state_machine *sm = &idev->sm;
 658	enum sci_remote_device_states state = sm->current_state_id;
 659	struct isci_port *iport = idev->owning_port;
 660	enum sci_status status;
 661
 662	switch (state) {
 663	case SCI_DEV_INITIAL:
 664	case SCI_DEV_STOPPED:
 665	case SCI_DEV_STARTING:
 666	case SCI_STP_DEV_NCQ_ERROR:
 667	case SCI_DEV_STOPPING:
 668	case SCI_DEV_FAILED:
 669	case SCI_DEV_RESETTING:
 670	case SCI_DEV_FINAL:
 671	default:
 672		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 673			 __func__, dev_state_name(state));
 674		return SCI_FAILURE_INVALID_STATE;
 675	case SCI_DEV_READY:
 676		/* attempt to start an io request for this device object. The remote
 677		 * device object will issue the start request for the io and if
 678		 * successful it will start the request for the port object then
 679		 * increment its own request count.
 680		 */
 681		status = sci_port_start_io(iport, idev, ireq);
 682		if (status != SCI_SUCCESS)
 683			return status;
 684
 685		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
 686		if (status != SCI_SUCCESS)
 687			break;
 688
 689		status = sci_request_start(ireq);
 690		break;
 691	case SCI_STP_DEV_IDLE: {
 692		/* handle the start io operation for a sata device that is in
 693		 * the command idle state. - Evalute the type of IO request to
 694		 * be started - If its an NCQ request change to NCQ substate -
 695		 * If its any other command change to the CMD substate
 696		 *
 697		 * If this is a softreset we may want to have a different
 698		 * substate.
 699		 */
 700		enum sci_remote_device_states new_state;
 701		struct sas_task *task = isci_request_access_task(ireq);
 702
 703		status = sci_port_start_io(iport, idev, ireq);
 704		if (status != SCI_SUCCESS)
 705			return status;
 706
 707		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
 708		if (status != SCI_SUCCESS)
 709			break;
 710
 711		status = sci_request_start(ireq);
 712		if (status != SCI_SUCCESS)
 713			break;
 714
 715		if (task->ata_task.use_ncq)
 716			new_state = SCI_STP_DEV_NCQ;
 717		else {
 718			idev->working_request = ireq;
 719			new_state = SCI_STP_DEV_CMD;
 720		}
 721		sci_change_state(sm, new_state);
 722		break;
 723	}
 724	case SCI_STP_DEV_NCQ: {
 725		struct sas_task *task = isci_request_access_task(ireq);
 726
 727		if (task->ata_task.use_ncq) {
 728			status = sci_port_start_io(iport, idev, ireq);
 729			if (status != SCI_SUCCESS)
 730				return status;
 731
 732			status = sci_remote_node_context_start_io(&idev->rnc, ireq);
 733			if (status != SCI_SUCCESS)
 734				break;
 735
 736			status = sci_request_start(ireq);
 737		} else
 738			return SCI_FAILURE_INVALID_STATE;
 739		break;
 740	}
 741	case SCI_STP_DEV_AWAIT_RESET:
 742		return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
 743	case SCI_SMP_DEV_IDLE:
 744		status = sci_port_start_io(iport, idev, ireq);
 745		if (status != SCI_SUCCESS)
 746			return status;
 747
 748		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
 749		if (status != SCI_SUCCESS)
 750			break;
 751
 752		status = sci_request_start(ireq);
 753		if (status != SCI_SUCCESS)
 754			break;
 755
 756		idev->working_request = ireq;
 757		sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
 758		break;
 759	case SCI_STP_DEV_CMD:
 760	case SCI_SMP_DEV_CMD:
 761		/* device is already handling a command it can not accept new commands
 762		 * until this one is complete.
 763		 */
 764		return SCI_FAILURE_INVALID_STATE;
 765	}
 766
 767	sci_remote_device_start_request(idev, ireq, status);
 768	return status;
 769}
 770
 771static enum sci_status common_complete_io(struct isci_port *iport,
 772					  struct isci_remote_device *idev,
 773					  struct isci_request *ireq)
 774{
 775	enum sci_status status;
 776
 777	status = sci_request_complete(ireq);
 778	if (status != SCI_SUCCESS)
 779		return status;
 780
 781	status = sci_port_complete_io(iport, idev, ireq);
 782	if (status != SCI_SUCCESS)
 783		return status;
 784
 785	sci_remote_device_decrement_request_count(idev);
 786	return status;
 787}
 788
 789enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
 790						   struct isci_remote_device *idev,
 791						   struct isci_request *ireq)
 792{
 793	struct sci_base_state_machine *sm = &idev->sm;
 794	enum sci_remote_device_states state = sm->current_state_id;
 795	struct isci_port *iport = idev->owning_port;
 796	enum sci_status status;
 797
 798	switch (state) {
 799	case SCI_DEV_INITIAL:
 800	case SCI_DEV_STOPPED:
 801	case SCI_DEV_STARTING:
 802	case SCI_STP_DEV_IDLE:
 803	case SCI_SMP_DEV_IDLE:
 804	case SCI_DEV_FAILED:
 805	case SCI_DEV_FINAL:
 806	default:
 807		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 808			 __func__, dev_state_name(state));
 809		return SCI_FAILURE_INVALID_STATE;
 810	case SCI_DEV_READY:
 811	case SCI_STP_DEV_AWAIT_RESET:
 812	case SCI_DEV_RESETTING:
 813		status = common_complete_io(iport, idev, ireq);
 814		break;
 815	case SCI_STP_DEV_CMD:
 816	case SCI_STP_DEV_NCQ:
 817	case SCI_STP_DEV_NCQ_ERROR:
 818	case SCI_STP_DEV_ATAPI_ERROR:
 819		status = common_complete_io(iport, idev, ireq);
 820		if (status != SCI_SUCCESS)
 821			break;
 822
 823		if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
 824			/* This request causes hardware error, device needs to be Lun Reset.
 825			 * So here we force the state machine to IDLE state so the rest IOs
 826			 * can reach RNC state handler, these IOs will be completed by RNC with
 827			 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
 828			 */
 829			sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
 830		} else if (idev->started_request_count == 0)
 831			sci_change_state(sm, SCI_STP_DEV_IDLE);
 832		break;
 833	case SCI_SMP_DEV_CMD:
 834		status = common_complete_io(iport, idev, ireq);
 835		if (status != SCI_SUCCESS)
 836			break;
 837		sci_change_state(sm, SCI_SMP_DEV_IDLE);
 838		break;
 839	case SCI_DEV_STOPPING:
 840		status = common_complete_io(iport, idev, ireq);
 841		if (status != SCI_SUCCESS)
 842			break;
 843
 844		if (idev->started_request_count == 0)
 845			sci_remote_node_context_destruct(&idev->rnc,
 846							 rnc_destruct_done,
 847							 idev);
 848		break;
 849	}
 850
 851	if (status != SCI_SUCCESS)
 852		dev_err(scirdev_to_dev(idev),
 853			"%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
 854			"could not complete\n", __func__, iport,
 855			idev, ireq, status);
 856	else
 857		isci_put_device(idev);
 858
 859	return status;
 860}
 861
 862static void sci_remote_device_continue_request(void *dev)
 863{
 864	struct isci_remote_device *idev = dev;
 865
 866	/* we need to check if this request is still valid to continue. */
 867	if (idev->working_request)
 868		sci_controller_continue_io(idev->working_request);
 869}
 870
 871enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
 872						  struct isci_remote_device *idev,
 873						  struct isci_request *ireq)
 874{
 875	struct sci_base_state_machine *sm = &idev->sm;
 876	enum sci_remote_device_states state = sm->current_state_id;
 877	struct isci_port *iport = idev->owning_port;
 878	enum sci_status status;
 879
 880	switch (state) {
 881	case SCI_DEV_INITIAL:
 882	case SCI_DEV_STOPPED:
 883	case SCI_DEV_STARTING:
 884	case SCI_SMP_DEV_IDLE:
 885	case SCI_SMP_DEV_CMD:
 886	case SCI_DEV_STOPPING:
 887	case SCI_DEV_FAILED:
 888	case SCI_DEV_RESETTING:
 889	case SCI_DEV_FINAL:
 890	default:
 891		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
 892			 __func__, dev_state_name(state));
 893		return SCI_FAILURE_INVALID_STATE;
 894	case SCI_STP_DEV_IDLE:
 895	case SCI_STP_DEV_CMD:
 896	case SCI_STP_DEV_NCQ:
 897	case SCI_STP_DEV_NCQ_ERROR:
 898	case SCI_STP_DEV_AWAIT_RESET:
 899		status = sci_port_start_io(iport, idev, ireq);
 900		if (status != SCI_SUCCESS)
 901			return status;
 902
 903		status = sci_request_start(ireq);
 904		if (status != SCI_SUCCESS)
 905			goto out;
 906
 907		/* Note: If the remote device state is not IDLE this will
 908		 * replace the request that probably resulted in the task
 909		 * management request.
 910		 */
 911		idev->working_request = ireq;
 912		sci_change_state(sm, SCI_STP_DEV_CMD);
 913
 914		/* The remote node context must cleanup the TCi to NCQ mapping
 915		 * table.  The only way to do this correctly is to either write
 916		 * to the TLCR register or to invalidate and repost the RNC. In
 917		 * either case the remote node context state machine will take
 918		 * the correct action when the remote node context is suspended
 919		 * and later resumed.
 920		 */
 921		sci_remote_device_suspend(idev,
 922					  SCI_SW_SUSPEND_LINKHANG_DETECT);
 923
 924		status = sci_remote_node_context_start_task(&idev->rnc, ireq,
 925				sci_remote_device_continue_request, idev);
 926
 927	out:
 928		sci_remote_device_start_request(idev, ireq, status);
 929		/* We need to let the controller start request handler know that
 930		 * it can't post TC yet. We will provide a callback function to
 931		 * post TC when RNC gets resumed.
 932		 */
 933		return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
 934	case SCI_DEV_READY:
 935		status = sci_port_start_io(iport, idev, ireq);
 936		if (status != SCI_SUCCESS)
 937			return status;
 938
 939		/* Resume the RNC as needed: */
 940		status = sci_remote_node_context_start_task(&idev->rnc, ireq,
 941							    NULL, NULL);
 942		if (status != SCI_SUCCESS)
 943			break;
 944
 945		status = sci_request_start(ireq);
 946		break;
 947	}
 948	sci_remote_device_start_request(idev, ireq, status);
 949
 950	return status;
 951}
 952
 953void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
 954{
 955	struct isci_port *iport = idev->owning_port;
 956	u32 context;
 957
 958	context = request |
 959		  (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
 960		  (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
 961		  idev->rnc.remote_node_index;
 962
 963	sci_controller_post_request(iport->owning_controller, context);
 964}
 965
 966/* called once the remote node context has transisitioned to a
 967 * ready state.  This is the indication that the remote device object can also
 968 * transition to ready.
 969 */
 970static void remote_device_resume_done(void *_dev)
 971{
 972	struct isci_remote_device *idev = _dev;
 973
 974	if (is_remote_device_ready(idev))
 975		return;
 976
 977	/* go 'ready' if we are not already in a ready state */
 978	sci_change_state(&idev->sm, SCI_DEV_READY);
 979}
 980
 981static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
 982{
 983	struct isci_remote_device *idev = _dev;
 984	struct isci_host *ihost = idev->owning_port->owning_controller;
 985
 986	/* For NCQ operation we do not issue a isci_remote_device_not_ready().
 987	 * As a result, avoid sending the ready notification.
 988	 */
 989	if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
 990		isci_remote_device_ready(ihost, idev);
 991}
 992
 993static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
 994{
 995	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
 996
 997	/* Initial state is a transitional state to the stopped state */
 998	sci_change_state(&idev->sm, SCI_DEV_STOPPED);
 999}
1000
1001/**
1002 * sci_remote_device_destruct() - free remote node context and destruct
1003 * @remote_device: This parameter specifies the remote device to be destructed.
1004 *
1005 * Remote device objects are a limited resource.  As such, they must be
1006 * protected.  Thus calls to construct and destruct are mutually exclusive and
1007 * non-reentrant. The return value shall indicate if the device was
1008 * successfully destructed or if some failure occurred. enum sci_status This value
1009 * is returned if the device is successfully destructed.
1010 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
1011 * device isn't valid (e.g. it's already been destoryed, the handle isn't
1012 * valid, etc.).
1013 */
1014static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
1015{
1016	struct sci_base_state_machine *sm = &idev->sm;
1017	enum sci_remote_device_states state = sm->current_state_id;
1018	struct isci_host *ihost;
1019
1020	if (state != SCI_DEV_STOPPED) {
1021		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1022			 __func__, dev_state_name(state));
1023		return SCI_FAILURE_INVALID_STATE;
1024	}
1025
1026	ihost = idev->owning_port->owning_controller;
1027	sci_controller_free_remote_node_context(ihost, idev,
1028						     idev->rnc.remote_node_index);
1029	idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
1030	sci_change_state(sm, SCI_DEV_FINAL);
1031
1032	return SCI_SUCCESS;
1033}
1034
1035/**
1036 * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
1037 * @ihost: This parameter specifies the isci host object.
1038 * @idev: This parameter specifies the remote device to be freed.
1039 *
1040 */
1041static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
1042{
1043	dev_dbg(&ihost->pdev->dev,
1044		"%s: isci_device = %p\n", __func__, idev);
1045
1046	/* There should not be any outstanding io's. All paths to
1047	 * here should go through isci_remote_device_nuke_requests.
1048	 * If we hit this condition, we will need a way to complete
1049	 * io requests in process */
1050	BUG_ON(idev->started_request_count > 0);
1051
1052	sci_remote_device_destruct(idev);
1053	list_del_init(&idev->node);
1054	isci_put_device(idev);
1055}
1056
1057static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
1058{
1059	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1060	struct isci_host *ihost = idev->owning_port->owning_controller;
1061	u32 prev_state;
1062
1063	/* If we are entering from the stopping state let the SCI User know that
1064	 * the stop operation has completed.
1065	 */
1066	prev_state = idev->sm.previous_state_id;
1067	if (prev_state == SCI_DEV_STOPPING)
1068		isci_remote_device_deconstruct(ihost, idev);
1069
1070	sci_controller_remote_device_stopped(ihost, idev);
1071}
1072
1073static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
1074{
1075	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1076	struct isci_host *ihost = idev->owning_port->owning_controller;
1077
1078	isci_remote_device_not_ready(ihost, idev,
1079				     SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
1080}
1081
1082static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
1083{
1084	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1085	struct isci_host *ihost = idev->owning_port->owning_controller;
1086	struct domain_device *dev = idev->domain_dev;
1087
1088	if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
1089		sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
1090	} else if (dev_is_expander(dev->dev_type)) {
1091		sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
1092	} else
1093		isci_remote_device_ready(ihost, idev);
1094}
1095
1096static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
1097{
1098	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1099	struct domain_device *dev = idev->domain_dev;
1100
1101	if (dev->dev_type == SAS_END_DEVICE) {
1102		struct isci_host *ihost = idev->owning_port->owning_controller;
1103
1104		isci_remote_device_not_ready(ihost, idev,
1105					     SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
1106	}
1107}
1108
1109static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
1110{
1111	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1112	struct isci_host *ihost = idev->owning_port->owning_controller;
1113
1114	dev_dbg(&ihost->pdev->dev,
1115		"%s: isci_device = %p\n", __func__, idev);
1116
1117	sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1118}
1119
1120static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
1121{
1122	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1123	struct isci_host *ihost = idev->owning_port->owning_controller;
1124
1125	dev_dbg(&ihost->pdev->dev,
1126		"%s: isci_device = %p\n", __func__, idev);
1127
1128	sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
1129}
1130
1131static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1132{
1133	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1134
1135	idev->working_request = NULL;
1136	if (sci_remote_node_context_is_ready(&idev->rnc)) {
1137		/*
1138		 * Since the RNC is ready, it's alright to finish completion
1139		 * processing (e.g. signal the remote device is ready). */
1140		sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
1141	} else {
1142		sci_remote_node_context_resume(&idev->rnc,
1143			sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
1144			idev);
1145	}
1146}
1147
1148static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1149{
1150	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1151	struct isci_host *ihost = idev->owning_port->owning_controller;
1152
1153	BUG_ON(idev->working_request == NULL);
1154
1155	isci_remote_device_not_ready(ihost, idev,
1156				     SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
1157}
1158
1159static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
1160{
1161	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1162	struct isci_host *ihost = idev->owning_port->owning_controller;
1163
1164	if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
1165		isci_remote_device_not_ready(ihost, idev,
1166					     idev->not_ready_reason);
1167}
1168
1169static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1170{
1171	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1172	struct isci_host *ihost = idev->owning_port->owning_controller;
1173
1174	isci_remote_device_ready(ihost, idev);
1175}
1176
1177static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1178{
1179	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1180	struct isci_host *ihost = idev->owning_port->owning_controller;
1181
1182	BUG_ON(idev->working_request == NULL);
1183
1184	isci_remote_device_not_ready(ihost, idev,
1185				     SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
1186}
1187
1188static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
1189{
1190	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1191
1192	idev->working_request = NULL;
1193}
1194
1195static const struct sci_base_state sci_remote_device_state_table[] = {
1196	[SCI_DEV_INITIAL] = {
1197		.enter_state = sci_remote_device_initial_state_enter,
1198	},
1199	[SCI_DEV_STOPPED] = {
1200		.enter_state = sci_remote_device_stopped_state_enter,
1201	},
1202	[SCI_DEV_STARTING] = {
1203		.enter_state = sci_remote_device_starting_state_enter,
1204	},
1205	[SCI_DEV_READY] = {
1206		.enter_state = sci_remote_device_ready_state_enter,
1207		.exit_state  = sci_remote_device_ready_state_exit
1208	},
1209	[SCI_STP_DEV_IDLE] = {
1210		.enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1211	},
1212	[SCI_STP_DEV_CMD] = {
1213		.enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1214	},
1215	[SCI_STP_DEV_NCQ] = { },
1216	[SCI_STP_DEV_NCQ_ERROR] = {
1217		.enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1218	},
1219	[SCI_STP_DEV_ATAPI_ERROR] = { },
1220	[SCI_STP_DEV_AWAIT_RESET] = { },
1221	[SCI_SMP_DEV_IDLE] = {
1222		.enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1223	},
1224	[SCI_SMP_DEV_CMD] = {
1225		.enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1226		.exit_state  = sci_smp_remote_device_ready_cmd_substate_exit,
1227	},
1228	[SCI_DEV_STOPPING] = { },
1229	[SCI_DEV_FAILED] = { },
1230	[SCI_DEV_RESETTING] = {
1231		.enter_state = sci_remote_device_resetting_state_enter,
1232		.exit_state  = sci_remote_device_resetting_state_exit
1233	},
1234	[SCI_DEV_FINAL] = { },
1235};
1236
1237/**
1238 * sci_remote_device_construct() - common construction
1239 * @sci_port: SAS/SATA port through which this device is accessed.
1240 * @sci_dev: remote device to construct
1241 *
1242 * This routine just performs benign initialization and does not
1243 * allocate the remote_node_context which is left to
1244 * sci_remote_device_[de]a_construct().  sci_remote_device_destruct()
1245 * frees the remote_node_context(s) for the device.
1246 */
1247static void sci_remote_device_construct(struct isci_port *iport,
1248				  struct isci_remote_device *idev)
1249{
1250	idev->owning_port = iport;
1251	idev->started_request_count = 0;
1252
1253	sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1254
1255	sci_remote_node_context_construct(&idev->rnc,
1256					       SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
1257}
1258
1259/**
1260 * sci_remote_device_da_construct() - construct direct attached device.
1261 *
1262 * The information (e.g. IAF, Signature FIS, etc.) necessary to build
1263 * the device is known to the SCI Core since it is contained in the
1264 * sci_phy object.  Remote node context(s) is/are a global resource
1265 * allocated by this routine, freed by sci_remote_device_destruct().
1266 *
1267 * Returns:
1268 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1269 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1270 * sata-only controller instance.
1271 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1272 */
1273static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1274						       struct isci_remote_device *idev)
1275{
1276	enum sci_status status;
1277	struct sci_port_properties properties;
1278
1279	sci_remote_device_construct(iport, idev);
1280
1281	sci_port_get_properties(iport, &properties);
1282	/* Get accurate port width from port's phy mask for a DA device. */
1283	idev->device_port_width = hweight32(properties.phy_mask);
1284
1285	status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1286							     idev,
1287							     &idev->rnc.remote_node_index);
1288
1289	if (status != SCI_SUCCESS)
1290		return status;
1291
1292	idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1293
1294	return SCI_SUCCESS;
1295}
1296
1297/**
1298 * sci_remote_device_ea_construct() - construct expander attached device
1299 *
1300 * Remote node context(s) is/are a global resource allocated by this
1301 * routine, freed by sci_remote_device_destruct().
1302 *
1303 * Returns:
1304 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1305 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1306 * sata-only controller instance.
1307 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1308 */
1309static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1310						       struct isci_remote_device *idev)
1311{
1312	struct domain_device *dev = idev->domain_dev;
1313	enum sci_status status;
1314
1315	sci_remote_device_construct(iport, idev);
1316
1317	status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1318								  idev,
1319								  &idev->rnc.remote_node_index);
1320	if (status != SCI_SUCCESS)
1321		return status;
1322
1323	/* For SAS-2 the physical link rate is actually a logical link
1324	 * rate that incorporates multiplexing.  The SCU doesn't
1325	 * incorporate multiplexing and for the purposes of the
1326	 * connection the logical link rate is that same as the
1327	 * physical.  Furthermore, the SAS-2 and SAS-1.1 fields overlay
1328	 * one another, so this code works for both situations.
1329	 */
1330	idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1331					 dev->linkrate);
1332
1333	/* / @todo Should I assign the port width by reading all of the phys on the port? */
1334	idev->device_port_width = 1;
1335
1336	return SCI_SUCCESS;
1337}
1338
1339enum sci_status sci_remote_device_resume(
1340	struct isci_remote_device *idev,
1341	scics_sds_remote_node_context_callback cb_fn,
1342	void *cb_p)
1343{
1344	enum sci_status status;
1345
1346	status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
1347	if (status != SCI_SUCCESS)
1348		dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
1349			__func__, status);
1350	return status;
1351}
1352
1353static void isci_remote_device_resume_from_abort_complete(void *cbparam)
1354{
1355	struct isci_remote_device *idev = cbparam;
1356	struct isci_host *ihost = idev->owning_port->owning_controller;
1357	scics_sds_remote_node_context_callback abort_resume_cb =
1358		idev->abort_resume_cb;
1359
1360	dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
1361		__func__, abort_resume_cb);
1362
1363	if (abort_resume_cb != NULL) {
1364		idev->abort_resume_cb = NULL;
1365		abort_resume_cb(idev->abort_resume_cbparam);
1366	}
1367	clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1368	wake_up(&ihost->eventq);
1369}
1370
1371static bool isci_remote_device_test_resume_done(
1372	struct isci_host *ihost,
1373	struct isci_remote_device *idev)
1374{
1375	unsigned long flags;
1376	bool done;
1377
1378	spin_lock_irqsave(&ihost->scic_lock, flags);
1379	done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
1380		|| test_bit(IDEV_STOP_PENDING, &idev->flags)
1381		|| sci_remote_node_context_is_being_destroyed(&idev->rnc);
1382	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1383
1384	return done;
1385}
1386
1387void isci_remote_device_wait_for_resume_from_abort(
1388	struct isci_host *ihost,
1389	struct isci_remote_device *idev)
1390{
1391	dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
1392		 __func__, idev);
1393
1394	#define MAX_RESUME_MSECS 10000
1395	if (!wait_event_timeout(ihost->eventq,
1396				isci_remote_device_test_resume_done(ihost, idev),
1397				msecs_to_jiffies(MAX_RESUME_MSECS))) {
1398
1399		dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
1400			 "resume: %p\n", __func__, idev);
1401	}
1402	clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1403
1404	dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
1405		 __func__, idev);
1406}
1407
1408enum sci_status isci_remote_device_resume_from_abort(
1409	struct isci_host *ihost,
1410	struct isci_remote_device *idev)
1411{
1412	unsigned long flags;
1413	enum sci_status status = SCI_SUCCESS;
1414	int destroyed;
1415
1416	spin_lock_irqsave(&ihost->scic_lock, flags);
1417	/* Preserve any current resume callbacks, for instance from other
1418	 * resumptions.
1419	 */
1420	idev->abort_resume_cb = idev->rnc.user_callback;
1421	idev->abort_resume_cbparam = idev->rnc.user_cookie;
1422	set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1423	clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1424	destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
1425	if (!destroyed)
1426		status = sci_remote_device_resume(
1427			idev, isci_remote_device_resume_from_abort_complete,
1428			idev);
1429	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1430	if (!destroyed && (status == SCI_SUCCESS))
1431		isci_remote_device_wait_for_resume_from_abort(ihost, idev);
1432	else
1433		clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1434
1435	return status;
1436}
1437
1438/**
1439 * sci_remote_device_start() - This method will start the supplied remote
1440 *    device.  This method enables normal IO requests to flow through to the
1441 *    remote device.
1442 * @remote_device: This parameter specifies the device to be started.
1443 * @timeout: This parameter specifies the number of milliseconds in which the
1444 *    start operation should complete.
1445 *
1446 * An indication of whether the device was successfully started. SCI_SUCCESS
1447 * This value is returned if the device was successfully started.
1448 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
1449 * the device when there have been no phys added to it.
1450 */
1451static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1452					       u32 timeout)
1453{
1454	struct sci_base_state_machine *sm = &idev->sm;
1455	enum sci_remote_device_states state = sm->current_state_id;
1456	enum sci_status status;
1457
1458	if (state != SCI_DEV_STOPPED) {
1459		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1460			 __func__, dev_state_name(state));
1461		return SCI_FAILURE_INVALID_STATE;
1462	}
1463
1464	status = sci_remote_device_resume(idev, remote_device_resume_done,
1465					  idev);
1466	if (status != SCI_SUCCESS)
1467		return status;
1468
1469	sci_change_state(sm, SCI_DEV_STARTING);
1470
1471	return SCI_SUCCESS;
1472}
1473
1474static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1475						    struct isci_remote_device *idev)
1476{
1477	struct isci_host *ihost = iport->isci_host;
1478	struct domain_device *dev = idev->domain_dev;
1479	enum sci_status status;
1480
1481	if (dev->parent && dev_is_expander(dev->parent->dev_type))
1482		status = sci_remote_device_ea_construct(iport, idev);
1483	else
1484		status = sci_remote_device_da_construct(iport, idev);
1485
1486	if (status != SCI_SUCCESS) {
1487		dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1488			__func__, status);
1489
1490		return status;
1491	}
1492
1493	/* start the device. */
1494	status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1495
1496	if (status != SCI_SUCCESS)
1497		dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1498			 status);
1499
1500	return status;
1501}
1502
1503/**
 
1504 * This function builds the isci_remote_device when a libsas dev_found message
1505 *    is received.
1506 * @isci_host: This parameter specifies the isci host object.
1507 * @port: This parameter specifies the isci_port conected to this device.
1508 *
1509 * pointer to new isci_remote_device.
1510 */
1511static struct isci_remote_device *
1512isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1513{
1514	struct isci_remote_device *idev;
1515	int i;
1516
1517	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1518		idev = &ihost->devices[i];
1519		if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1520			break;
1521	}
1522
1523	if (i >= SCI_MAX_REMOTE_DEVICES) {
1524		dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1525		return NULL;
1526	}
1527	if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1528		return NULL;
1529
1530	return idev;
1531}
1532
1533void isci_remote_device_release(struct kref *kref)
1534{
1535	struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1536	struct isci_host *ihost = idev->isci_port->isci_host;
1537
1538	idev->domain_dev = NULL;
1539	idev->isci_port = NULL;
1540	clear_bit(IDEV_START_PENDING, &idev->flags);
1541	clear_bit(IDEV_STOP_PENDING, &idev->flags);
1542	clear_bit(IDEV_IO_READY, &idev->flags);
1543	clear_bit(IDEV_GONE, &idev->flags);
1544	smp_mb__before_atomic();
1545	clear_bit(IDEV_ALLOCATED, &idev->flags);
1546	wake_up(&ihost->eventq);
1547}
1548
1549/**
1550 * isci_remote_device_stop() - This function is called internally to stop the
1551 *    remote device.
1552 * @isci_host: This parameter specifies the isci host object.
1553 * @isci_device: This parameter specifies the remote device.
1554 *
1555 * The status of the ihost request to stop.
1556 */
1557enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
1558{
1559	enum sci_status status;
1560	unsigned long flags;
1561
1562	dev_dbg(&ihost->pdev->dev,
1563		"%s: isci_device = %p\n", __func__, idev);
1564
1565	spin_lock_irqsave(&ihost->scic_lock, flags);
1566	idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1567	set_bit(IDEV_GONE, &idev->flags);
1568
1569	set_bit(IDEV_STOP_PENDING, &idev->flags);
1570	status = sci_remote_device_stop(idev, 50);
1571	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1572
1573	/* Wait for the stop complete callback. */
1574	if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1575		/* nothing to wait for */;
1576	else
1577		wait_for_device_stop(ihost, idev);
1578
1579	dev_dbg(&ihost->pdev->dev,
1580		"%s: isci_device = %p, waiting done.\n", __func__, idev);
1581
1582	return status;
1583}
1584
1585/**
1586 * isci_remote_device_gone() - This function is called by libsas when a domain
1587 *    device is removed.
1588 * @domain_device: This parameter specifies the libsas domain device.
1589 *
1590 */
1591void isci_remote_device_gone(struct domain_device *dev)
1592{
1593	struct isci_host *ihost = dev_to_ihost(dev);
1594	struct isci_remote_device *idev = dev->lldd_dev;
1595
1596	dev_dbg(&ihost->pdev->dev,
1597		"%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1598		__func__, dev, idev, idev->isci_port);
1599
1600	isci_remote_device_stop(ihost, idev);
1601}
1602
1603
1604/**
1605 * isci_remote_device_found() - This function is called by libsas when a remote
1606 *    device is discovered. A remote device object is created and started. the
1607 *    function then sleeps until the sci core device started message is
1608 *    received.
1609 * @domain_device: This parameter specifies the libsas domain device.
1610 *
1611 * status, zero indicates success.
1612 */
1613int isci_remote_device_found(struct domain_device *dev)
1614{
1615	struct isci_host *isci_host = dev_to_ihost(dev);
1616	struct isci_port *isci_port = dev->port->lldd_port;
1617	struct isci_remote_device *isci_device;
1618	enum sci_status status;
1619
1620	dev_dbg(&isci_host->pdev->dev,
1621		"%s: domain_device = %p\n", __func__, dev);
1622
1623	if (!isci_port)
1624		return -ENODEV;
1625
1626	isci_device = isci_remote_device_alloc(isci_host, isci_port);
1627	if (!isci_device)
1628		return -ENODEV;
1629
1630	kref_init(&isci_device->kref);
1631	INIT_LIST_HEAD(&isci_device->node);
1632
1633	spin_lock_irq(&isci_host->scic_lock);
1634	isci_device->domain_dev = dev;
1635	isci_device->isci_port = isci_port;
1636	list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1637
1638	set_bit(IDEV_START_PENDING, &isci_device->flags);
1639	status = isci_remote_device_construct(isci_port, isci_device);
1640
1641	dev_dbg(&isci_host->pdev->dev,
1642		"%s: isci_device = %p\n",
1643		__func__, isci_device);
1644
1645	if (status == SCI_SUCCESS) {
1646		/* device came up, advertise it to the world */
1647		dev->lldd_dev = isci_device;
1648	} else
1649		isci_put_device(isci_device);
1650	spin_unlock_irq(&isci_host->scic_lock);
1651
1652	/* wait for the device ready callback. */
1653	wait_for_device_start(isci_host, isci_device);
1654
1655	return status == SCI_SUCCESS ? 0 : -ENODEV;
1656}
1657
1658enum sci_status isci_remote_device_suspend_terminate(
1659	struct isci_host *ihost,
1660	struct isci_remote_device *idev,
1661	struct isci_request *ireq)
1662{
1663	unsigned long flags;
1664	enum sci_status status;
1665
1666	/* Put the device into suspension. */
1667	spin_lock_irqsave(&ihost->scic_lock, flags);
1668	set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1669	sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1670	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1671
1672	/* Terminate and wait for the completions. */
1673	status = isci_remote_device_terminate_requests(ihost, idev, ireq);
1674	if (status != SCI_SUCCESS)
1675		dev_dbg(&ihost->pdev->dev,
1676			"%s: isci_remote_device_terminate_requests(%p) "
1677				"returned %d!\n",
1678			__func__, idev, status);
1679
1680	/* NOTE: RNC resumption is left to the caller! */
1681	return status;
1682}
1683
1684int isci_remote_device_is_safe_to_abort(
1685	struct isci_remote_device *idev)
1686{
1687	return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
1688}
1689
1690enum sci_status sci_remote_device_abort_requests_pending_abort(
1691	struct isci_remote_device *idev)
1692{
1693	return sci_remote_device_terminate_reqs_checkabort(idev, 1);
1694}
1695
1696enum sci_status isci_remote_device_reset_complete(
1697	struct isci_host *ihost,
1698	struct isci_remote_device *idev)
1699{
1700	unsigned long flags;
1701	enum sci_status status;
1702
1703	spin_lock_irqsave(&ihost->scic_lock, flags);
1704	status = sci_remote_device_reset_complete(idev);
1705	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1706
1707	return status;
1708}
1709
1710void isci_dev_set_hang_detection_timeout(
1711	struct isci_remote_device *idev,
1712	u32 timeout)
1713{
1714	if (dev_is_sata(idev->domain_dev)) {
1715		if (timeout) {
1716			if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
1717					     &idev->flags))
1718				return;  /* Already enabled. */
1719		} else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
1720					       &idev->flags))
1721			return;  /* Not enabled. */
1722
1723		sci_port_set_hang_detection_timeout(idev->owning_port,
1724						    timeout);
1725	}
1726}