Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Serial Attached SCSI (SAS) class SCSI Host glue.
   4 *
   5 * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
   6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
   7 */
   8
   9#include <linux/kthread.h>
  10#include <linux/firmware.h>
  11#include <linux/export.h>
  12#include <linux/ctype.h>
  13#include <linux/kernel.h>
  14
  15#include "sas_internal.h"
  16
  17#include <scsi/scsi_host.h>
  18#include <scsi/scsi_device.h>
  19#include <scsi/scsi_tcq.h>
  20#include <scsi/scsi.h>
  21#include <scsi/scsi_eh.h>
  22#include <scsi/scsi_transport.h>
  23#include <scsi/scsi_transport_sas.h>
  24#include <scsi/sas_ata.h>
  25#include "scsi_sas_internal.h"
  26#include "scsi_transport_api.h"
  27#include "scsi_priv.h"
  28
  29#include <linux/err.h>
  30#include <linux/blkdev.h>
  31#include <linux/freezer.h>
  32#include <linux/gfp.h>
  33#include <linux/scatterlist.h>
  34#include <linux/libata.h>
  35
  36/* record final status and free the task */
  37static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
  38{
  39	struct task_status_struct *ts = &task->task_status;
  40	enum scsi_host_status hs = DID_OK;
  41	enum exec_status stat = SAS_SAM_STAT_GOOD;
  42
  43	if (ts->resp == SAS_TASK_UNDELIVERED) {
  44		/* transport error */
  45		hs = DID_NO_CONNECT;
  46	} else { /* ts->resp == SAS_TASK_COMPLETE */
  47		/* task delivered, what happened afterwards? */
  48		switch (ts->stat) {
  49		case SAS_DEV_NO_RESPONSE:
  50		case SAS_INTERRUPTED:
  51		case SAS_PHY_DOWN:
  52		case SAS_NAK_R_ERR:
  53		case SAS_OPEN_TO:
  54			hs = DID_NO_CONNECT;
  55			break;
  56		case SAS_DATA_UNDERRUN:
  57			scsi_set_resid(sc, ts->residual);
  58			if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
  59				hs = DID_ERROR;
  60			break;
  61		case SAS_DATA_OVERRUN:
  62			hs = DID_ERROR;
  63			break;
  64		case SAS_QUEUE_FULL:
  65			hs = DID_SOFT_ERROR; /* retry */
  66			break;
  67		case SAS_DEVICE_UNKNOWN:
  68			hs = DID_BAD_TARGET;
  69			break;
  70		case SAS_OPEN_REJECT:
  71			if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
  72				hs = DID_SOFT_ERROR; /* retry */
  73			else
  74				hs = DID_ERROR;
  75			break;
  76		case SAS_PROTO_RESPONSE:
  77			pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
  78				  task->dev->port->ha->sas_ha_name);
  79			break;
  80		case SAS_ABORTED_TASK:
  81			hs = DID_ABORT;
  82			break;
  83		case SAS_SAM_STAT_CHECK_CONDITION:
  84			memcpy(sc->sense_buffer, ts->buf,
  85			       min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
  86			stat = SAS_SAM_STAT_CHECK_CONDITION;
  87			break;
  88		default:
  89			stat = ts->stat;
  90			break;
  91		}
  92	}
  93
  94	sc->result = (hs << 16) | stat;
  95	ASSIGN_SAS_TASK(sc, NULL);
  96	sas_free_task(task);
  97}
  98
  99static void sas_scsi_task_done(struct sas_task *task)
 100{
 101	struct scsi_cmnd *sc = task->uldd_task;
 102	struct domain_device *dev = task->dev;
 103	struct sas_ha_struct *ha = dev->port->ha;
 104	unsigned long flags;
 105
 106	spin_lock_irqsave(&dev->done_lock, flags);
 107	if (test_bit(SAS_HA_FROZEN, &ha->state))
 108		task = NULL;
 109	else
 110		ASSIGN_SAS_TASK(sc, NULL);
 111	spin_unlock_irqrestore(&dev->done_lock, flags);
 112
 113	if (unlikely(!task)) {
 114		/* task will be completed by the error handler */
 115		pr_debug("task done but aborted\n");
 116		return;
 117	}
 118
 119	if (unlikely(!sc)) {
 120		pr_debug("task_done called with non existing SCSI cmnd!\n");
 121		sas_free_task(task);
 122		return;
 123	}
 124
 125	sas_end_task(sc, task);
 126	scsi_done(sc);
 127}
 128
 129static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
 130					       struct domain_device *dev,
 131					       gfp_t gfp_flags)
 132{
 133	struct sas_task *task = sas_alloc_task(gfp_flags);
 134	struct scsi_lun lun;
 135
 136	if (!task)
 137		return NULL;
 138
 139	task->uldd_task = cmd;
 140	ASSIGN_SAS_TASK(cmd, task);
 141
 142	task->dev = dev;
 143	task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
 144
 
 145	int_to_scsilun(cmd->device->lun, &lun);
 146	memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
 147	task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
 148	task->ssp_task.cmd = cmd;
 149
 150	task->scatter = scsi_sglist(cmd);
 151	task->num_scatter = scsi_sg_count(cmd);
 152	task->total_xfer_len = scsi_bufflen(cmd);
 153	task->data_dir = cmd->sc_data_direction;
 154
 155	task->task_done = sas_scsi_task_done;
 156
 157	return task;
 158}
 159
 160int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 161{
 162	struct sas_internal *i = to_sas_internal(host->transportt);
 163	struct domain_device *dev = cmd_to_domain_dev(cmd);
 164	struct sas_task *task;
 165	int res = 0;
 166
 167	/* If the device fell off, no sense in issuing commands */
 168	if (test_bit(SAS_DEV_GONE, &dev->state)) {
 169		cmd->result = DID_BAD_TARGET << 16;
 170		goto out_done;
 171	}
 172
 173	if (dev_is_sata(dev)) {
 174		spin_lock_irq(dev->sata_dev.ap->lock);
 175		res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
 176		spin_unlock_irq(dev->sata_dev.ap->lock);
 177		return res;
 178	}
 179
 180	task = sas_create_task(cmd, dev, GFP_ATOMIC);
 181	if (!task)
 182		return SCSI_MLQUEUE_HOST_BUSY;
 183
 184	res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
 185	if (res)
 186		goto out_free_task;
 187	return 0;
 188
 189out_free_task:
 190	pr_debug("lldd_execute_task returned: %d\n", res);
 191	ASSIGN_SAS_TASK(cmd, NULL);
 192	sas_free_task(task);
 193	if (res == -SAS_QUEUE_FULL)
 194		cmd->result = DID_SOFT_ERROR << 16; /* retry */
 195	else
 196		cmd->result = DID_ERROR << 16;
 197out_done:
 198	scsi_done(cmd);
 199	return 0;
 200}
 201EXPORT_SYMBOL_GPL(sas_queuecommand);
 202
 203static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
 204{
 205	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
 206	struct domain_device *dev = cmd_to_domain_dev(cmd);
 207	struct sas_task *task = TO_SAS_TASK(cmd);
 208
 209	/* At this point, we only get called following an actual abort
 210	 * of the task, so we should be guaranteed not to be racing with
 211	 * any completions from the LLD.  Task is freed after this.
 212	 */
 213	sas_end_task(cmd, task);
 214
 215	if (dev_is_sata(dev)) {
 216		/* defer commands to libata so that libata EH can
 217		 * handle ata qcs correctly
 218		 */
 219		list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
 220		return;
 221	}
 222
 223	/* now finish the command and move it on to the error
 224	 * handler done list, this also takes it off the
 225	 * error handler pending list.
 226	 */
 227	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
 228}
 229
 230static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
 231{
 232	struct scsi_cmnd *cmd, *n;
 233
 234	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
 235		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
 236		    cmd->device->lun == my_cmd->device->lun)
 237			sas_eh_finish_cmd(cmd);
 238	}
 239}
 240
 241static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
 242				     struct domain_device *dev)
 243{
 244	struct scsi_cmnd *cmd, *n;
 245
 246	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
 247		struct domain_device *x = cmd_to_domain_dev(cmd);
 248
 249		if (x == dev)
 250			sas_eh_finish_cmd(cmd);
 251	}
 252}
 253
 254static void sas_scsi_clear_queue_port(struct list_head *error_q,
 255				      struct asd_sas_port *port)
 256{
 257	struct scsi_cmnd *cmd, *n;
 258
 259	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
 260		struct domain_device *dev = cmd_to_domain_dev(cmd);
 261		struct asd_sas_port *x = dev->port;
 262
 263		if (x == port)
 264			sas_eh_finish_cmd(cmd);
 265	}
 266}
 267
 268enum task_disposition {
 269	TASK_IS_DONE,
 270	TASK_IS_ABORTED,
 271	TASK_IS_AT_LU,
 272	TASK_IS_NOT_AT_LU,
 273	TASK_ABORT_FAILED,
 274};
 275
 276static enum task_disposition sas_scsi_find_task(struct sas_task *task)
 277{
 278	unsigned long flags;
 279	int i, res;
 280	struct sas_internal *si =
 281		to_sas_internal(task->dev->port->ha->shost->transportt);
 282
 283	for (i = 0; i < 5; i++) {
 284		pr_notice("%s: aborting task 0x%p\n", __func__, task);
 285		res = si->dft->lldd_abort_task(task);
 286
 287		spin_lock_irqsave(&task->task_state_lock, flags);
 288		if (task->task_state_flags & SAS_TASK_STATE_DONE) {
 289			spin_unlock_irqrestore(&task->task_state_lock, flags);
 290			pr_debug("%s: task 0x%p is done\n", __func__, task);
 291			return TASK_IS_DONE;
 292		}
 293		spin_unlock_irqrestore(&task->task_state_lock, flags);
 294
 295		if (res == TMF_RESP_FUNC_COMPLETE) {
 296			pr_notice("%s: task 0x%p is aborted\n",
 297				  __func__, task);
 298			return TASK_IS_ABORTED;
 299		} else if (si->dft->lldd_query_task) {
 300			pr_notice("%s: querying task 0x%p\n", __func__, task);
 301			res = si->dft->lldd_query_task(task);
 302			switch (res) {
 303			case TMF_RESP_FUNC_SUCC:
 304				pr_notice("%s: task 0x%p at LU\n", __func__,
 305					  task);
 306				return TASK_IS_AT_LU;
 307			case TMF_RESP_FUNC_COMPLETE:
 308				pr_notice("%s: task 0x%p not at LU\n",
 309					  __func__, task);
 310				return TASK_IS_NOT_AT_LU;
 311			case TMF_RESP_FUNC_FAILED:
 312				pr_notice("%s: task 0x%p failed to abort\n",
 313					  __func__, task);
 314				return TASK_ABORT_FAILED;
 315			default:
 316				pr_notice("%s: task 0x%p result code %d not handled\n",
 317					  __func__, task, res);
 318			}
 319		}
 320	}
 321	return TASK_ABORT_FAILED;
 322}
 323
 324static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
 325{
 326	int res = TMF_RESP_FUNC_FAILED;
 327	struct scsi_lun lun;
 328	struct sas_internal *i =
 329		to_sas_internal(dev->port->ha->shost->transportt);
 330
 331	int_to_scsilun(cmd->device->lun, &lun);
 332
 333	pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
 334		  SAS_ADDR(dev->sas_addr),
 335		  cmd->device->lun);
 336
 337	if (i->dft->lldd_abort_task_set)
 338		res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
 339
 340	if (res == TMF_RESP_FUNC_FAILED) {
 341		if (i->dft->lldd_clear_task_set)
 342			res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
 343	}
 344
 345	if (res == TMF_RESP_FUNC_FAILED) {
 346		if (i->dft->lldd_lu_reset)
 347			res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
 348	}
 349
 350	return res;
 351}
 352
 353static int sas_recover_I_T(struct domain_device *dev)
 354{
 355	int res = TMF_RESP_FUNC_FAILED;
 356	struct sas_internal *i =
 357		to_sas_internal(dev->port->ha->shost->transportt);
 358
 359	pr_notice("I_T nexus reset for dev %016llx\n",
 360		  SAS_ADDR(dev->sas_addr));
 361
 362	if (i->dft->lldd_I_T_nexus_reset)
 363		res = i->dft->lldd_I_T_nexus_reset(dev);
 364
 365	return res;
 366}
 367
 368/* take a reference on the last known good phy for this device */
 369struct sas_phy *sas_get_local_phy(struct domain_device *dev)
 370{
 371	struct sas_ha_struct *ha = dev->port->ha;
 372	struct sas_phy *phy;
 373	unsigned long flags;
 374
 375	/* a published domain device always has a valid phy, it may be
 376	 * stale, but it is never NULL
 377	 */
 378	BUG_ON(!dev->phy);
 379
 380	spin_lock_irqsave(&ha->phy_port_lock, flags);
 381	phy = dev->phy;
 382	get_device(&phy->dev);
 383	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
 384
 385	return phy;
 386}
 387EXPORT_SYMBOL_GPL(sas_get_local_phy);
 388
 389static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 390{
 391	struct sas_ha_struct *ha = dev->port->ha;
 392	int scheduled = 0, tries = 100;
 393
 394	/* ata: promote lun reset to bus reset */
 395	if (dev_is_sata(dev)) {
 396		sas_ata_schedule_reset(dev);
 
 
 397		return SUCCESS;
 398	}
 399
 400	while (!scheduled && tries--) {
 401		spin_lock_irq(&ha->lock);
 402		if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
 403		    !test_bit(reset_type, &dev->state)) {
 404			scheduled = 1;
 405			ha->eh_active++;
 406			list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
 407			set_bit(SAS_DEV_EH_PENDING, &dev->state);
 408			set_bit(reset_type, &dev->state);
 409			int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
 410			scsi_schedule_eh(ha->shost);
 411		}
 412		spin_unlock_irq(&ha->lock);
 413
 
 
 
 414		if (scheduled)
 415			return SUCCESS;
 416	}
 417
 418	pr_warn("%s reset of %s failed\n",
 419		reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
 420		dev_name(&dev->rphy->dev));
 421
 422	return FAILED;
 423}
 424
 425int sas_eh_abort_handler(struct scsi_cmnd *cmd)
 426{
 427	int res = TMF_RESP_FUNC_FAILED;
 428	struct sas_task *task = TO_SAS_TASK(cmd);
 429	struct Scsi_Host *host = cmd->device->host;
 430	struct domain_device *dev = cmd_to_domain_dev(cmd);
 431	struct sas_internal *i = to_sas_internal(host->transportt);
 432	unsigned long flags;
 433
 434	if (!i->dft->lldd_abort_task)
 435		return FAILED;
 436
 437	spin_lock_irqsave(host->host_lock, flags);
 438	/* We cannot do async aborts for SATA devices */
 439	if (dev_is_sata(dev) && !host->host_eh_scheduled) {
 440		spin_unlock_irqrestore(host->host_lock, flags);
 441		return FAILED;
 442	}
 443	spin_unlock_irqrestore(host->host_lock, flags);
 444
 445	if (task)
 446		res = i->dft->lldd_abort_task(task);
 447	else
 448		pr_notice("no task to abort\n");
 449	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
 450		return SUCCESS;
 451
 452	return FAILED;
 453}
 454EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
 455
 456/* Attempt to send a LUN reset message to a device */
 457int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
 458{
 459	int res;
 460	struct scsi_lun lun;
 461	struct Scsi_Host *host = cmd->device->host;
 462	struct domain_device *dev = cmd_to_domain_dev(cmd);
 463	struct sas_internal *i = to_sas_internal(host->transportt);
 464
 465	if (current != host->ehandler)
 466		return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun);
 467
 468	int_to_scsilun(cmd->device->lun, &lun);
 469
 470	if (!i->dft->lldd_lu_reset)
 471		return FAILED;
 472
 473	res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
 474	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
 475		return SUCCESS;
 476
 477	return FAILED;
 478}
 479EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
 480
 481int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
 482{
 483	int res;
 484	struct Scsi_Host *host = cmd->device->host;
 485	struct domain_device *dev = cmd_to_domain_dev(cmd);
 486	struct sas_internal *i = to_sas_internal(host->transportt);
 487
 488	if (current != host->ehandler)
 489		return sas_queue_reset(dev, SAS_DEV_RESET, 0);
 490
 491	if (!i->dft->lldd_I_T_nexus_reset)
 492		return FAILED;
 493
 494	res = i->dft->lldd_I_T_nexus_reset(dev);
 495	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
 496	    res == -ENODEV)
 497		return SUCCESS;
 498
 499	return FAILED;
 500}
 501EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
 502
 503/* Try to reset a device */
 504static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
 505{
 506	int res;
 507	struct Scsi_Host *shost = cmd->device->host;
 508
 509	if (!shost->hostt->eh_device_reset_handler)
 510		goto try_target_reset;
 511
 512	res = shost->hostt->eh_device_reset_handler(cmd);
 513	if (res == SUCCESS)
 514		return res;
 515
 516try_target_reset:
 517	if (shost->hostt->eh_target_reset_handler)
 518		return shost->hostt->eh_target_reset_handler(cmd);
 519
 520	return FAILED;
 521}
 522
 523static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
 524{
 525	struct scsi_cmnd *cmd, *n;
 526	enum task_disposition res = TASK_IS_DONE;
 527	int tmf_resp, need_reset;
 528	struct sas_internal *i = to_sas_internal(shost->transportt);
 529	unsigned long flags;
 530	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
 531	LIST_HEAD(done);
 532
 533	/* clean out any commands that won the completion vs eh race */
 534	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
 535		struct domain_device *dev = cmd_to_domain_dev(cmd);
 536		struct sas_task *task;
 537
 538		spin_lock_irqsave(&dev->done_lock, flags);
 539		/* by this point the lldd has either observed
 540		 * SAS_HA_FROZEN and is leaving the task alone, or has
 541		 * won the race with eh and decided to complete it
 542		 */
 543		task = TO_SAS_TASK(cmd);
 544		spin_unlock_irqrestore(&dev->done_lock, flags);
 545
 546		if (!task)
 547			list_move_tail(&cmd->eh_entry, &done);
 548	}
 549
 550 Again:
 551	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
 552		struct sas_task *task = TO_SAS_TASK(cmd);
 553
 554		list_del_init(&cmd->eh_entry);
 555
 556		spin_lock_irqsave(&task->task_state_lock, flags);
 557		need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
 558		spin_unlock_irqrestore(&task->task_state_lock, flags);
 559
 560		if (need_reset) {
 561			pr_notice("%s: task 0x%p requests reset\n",
 562				  __func__, task);
 563			goto reset;
 564		}
 565
 566		pr_debug("trying to find task 0x%p\n", task);
 567		res = sas_scsi_find_task(task);
 568
 569		switch (res) {
 570		case TASK_IS_DONE:
 571			pr_notice("%s: task 0x%p is done\n", __func__,
 572				    task);
 573			sas_eh_finish_cmd(cmd);
 574			continue;
 575		case TASK_IS_ABORTED:
 576			pr_notice("%s: task 0x%p is aborted\n",
 577				  __func__, task);
 578			sas_eh_finish_cmd(cmd);
 579			continue;
 580		case TASK_IS_AT_LU:
 581			pr_info("task 0x%p is at LU: lu recover\n", task);
 582 reset:
 583			tmf_resp = sas_recover_lu(task->dev, cmd);
 584			if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
 585				pr_notice("dev %016llx LU 0x%llx is recovered\n",
 586					  SAS_ADDR(task->dev),
 587					  cmd->device->lun);
 588				sas_eh_finish_cmd(cmd);
 589				sas_scsi_clear_queue_lu(work_q, cmd);
 590				goto Again;
 591			}
 592			fallthrough;
 593		case TASK_IS_NOT_AT_LU:
 594		case TASK_ABORT_FAILED:
 595			pr_notice("task 0x%p is not at LU: I_T recover\n",
 596				  task);
 597			tmf_resp = sas_recover_I_T(task->dev);
 598			if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
 599			    tmf_resp == -ENODEV) {
 600				struct domain_device *dev = task->dev;
 601				pr_notice("I_T %016llx recovered\n",
 602					  SAS_ADDR(task->dev->sas_addr));
 603				sas_eh_finish_cmd(cmd);
 604				sas_scsi_clear_queue_I_T(work_q, dev);
 605				goto Again;
 606			}
 607			/* Hammer time :-) */
 608			try_to_reset_cmd_device(cmd);
 609			if (i->dft->lldd_clear_nexus_port) {
 610				struct asd_sas_port *port = task->dev->port;
 611				pr_debug("clearing nexus for port:%d\n",
 612					  port->id);
 613				res = i->dft->lldd_clear_nexus_port(port);
 614				if (res == TMF_RESP_FUNC_COMPLETE) {
 615					pr_notice("clear nexus port:%d succeeded\n",
 616						  port->id);
 617					sas_eh_finish_cmd(cmd);
 618					sas_scsi_clear_queue_port(work_q,
 619								  port);
 620					goto Again;
 621				}
 622			}
 623			if (i->dft->lldd_clear_nexus_ha) {
 624				pr_debug("clear nexus ha\n");
 625				res = i->dft->lldd_clear_nexus_ha(ha);
 626				if (res == TMF_RESP_FUNC_COMPLETE) {
 627					pr_notice("clear nexus ha succeeded\n");
 628					sas_eh_finish_cmd(cmd);
 629					goto clear_q;
 630				}
 631			}
 632			/* If we are here -- this means that no amount
 633			 * of effort could recover from errors.  Quite
 634			 * possibly the HA just disappeared.
 635			 */
 636			pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
 637			       SAS_ADDR(task->dev->sas_addr),
 638			       cmd->device->lun);
 639
 640			sas_eh_finish_cmd(cmd);
 641			goto clear_q;
 642		}
 643	}
 644 out:
 645	list_splice_tail(&done, work_q);
 646	list_splice_tail_init(&ha->eh_ata_q, work_q);
 647	return;
 648
 649 clear_q:
 650	pr_debug("--- Exit %s -- clear_q\n", __func__);
 651	list_for_each_entry_safe(cmd, n, work_q, eh_entry)
 652		sas_eh_finish_cmd(cmd);
 653	goto out;
 654}
 655
 656static void sas_eh_handle_resets(struct Scsi_Host *shost)
 657{
 658	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
 659	struct sas_internal *i = to_sas_internal(shost->transportt);
 660
 661	/* handle directed resets to sas devices */
 662	spin_lock_irq(&ha->lock);
 663	while (!list_empty(&ha->eh_dev_q)) {
 664		struct domain_device *dev;
 665		struct ssp_device *ssp;
 666
 667		ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
 668		list_del_init(&ssp->eh_list_node);
 669		dev = container_of(ssp, typeof(*dev), ssp_dev);
 670		kref_get(&dev->kref);
 671		WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
 672
 673		spin_unlock_irq(&ha->lock);
 674
 675		if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
 676			i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
 677
 678		if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
 679			i->dft->lldd_I_T_nexus_reset(dev);
 680
 681		sas_put_device(dev);
 682		spin_lock_irq(&ha->lock);
 683		clear_bit(SAS_DEV_EH_PENDING, &dev->state);
 684		ha->eh_active--;
 685	}
 686	spin_unlock_irq(&ha->lock);
 687}
 688
 689
 690void sas_scsi_recover_host(struct Scsi_Host *shost)
 691{
 692	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
 693	LIST_HEAD(eh_work_q);
 694	int tries = 0;
 695	bool retry;
 696
 697retry:
 698	tries++;
 699	retry = true;
 700	spin_lock_irq(shost->host_lock);
 701	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
 702	spin_unlock_irq(shost->host_lock);
 703
 704	pr_notice("Enter %s busy: %d failed: %d\n",
 705		  __func__, scsi_host_busy(shost), shost->host_failed);
 706	/*
 707	 * Deal with commands that still have SAS tasks (i.e. they didn't
 708	 * complete via the normal sas_task completion mechanism),
 709	 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
 710	 */
 711	set_bit(SAS_HA_FROZEN, &ha->state);
 712	sas_eh_handle_sas_errors(shost, &eh_work_q);
 713	clear_bit(SAS_HA_FROZEN, &ha->state);
 714	if (list_empty(&eh_work_q))
 715		goto out;
 716
 717	/*
 718	 * Now deal with SCSI commands that completed ok but have a an error
 719	 * code (and hopefully sense data) attached.  This is roughly what
 720	 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
 721	 * command we see here has no sas_task and is thus unknown to the HA.
 722	 */
 723	sas_ata_eh(shost, &eh_work_q);
 724	if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
 725		scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
 726
 727out:
 728	sas_eh_handle_resets(shost);
 729
 730	/* now link into libata eh --- if we have any ata devices */
 731	sas_ata_strategy_handler(shost);
 732
 733	scsi_eh_flush_done_q(&ha->eh_done_q);
 734
 735	/* check if any new eh work was scheduled during the last run */
 736	spin_lock_irq(&ha->lock);
 737	if (ha->eh_active == 0) {
 738		shost->host_eh_scheduled = 0;
 739		retry = false;
 740	}
 741	spin_unlock_irq(&ha->lock);
 742
 743	if (retry)
 744		goto retry;
 745
 746	pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
 747		  __func__, scsi_host_busy(shost),
 748		  shost->host_failed, tries);
 749}
 750
 751int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
 752{
 753	struct domain_device *dev = sdev_to_domain_dev(sdev);
 754
 755	if (dev_is_sata(dev))
 756		return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
 757
 758	return -EINVAL;
 759}
 760EXPORT_SYMBOL_GPL(sas_ioctl);
 761
 762struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
 763{
 764	struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
 765	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
 766	struct domain_device *found_dev = NULL;
 767	int i;
 768	unsigned long flags;
 769
 770	spin_lock_irqsave(&ha->phy_port_lock, flags);
 771	for (i = 0; i < ha->num_phys; i++) {
 772		struct asd_sas_port *port = ha->sas_port[i];
 773		struct domain_device *dev;
 774
 775		spin_lock(&port->dev_list_lock);
 776		list_for_each_entry(dev, &port->dev_list, dev_list_node) {
 777			if (rphy == dev->rphy) {
 778				found_dev = dev;
 779				spin_unlock(&port->dev_list_lock);
 780				goto found;
 781			}
 782		}
 783		spin_unlock(&port->dev_list_lock);
 784	}
 785 found:
 786	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
 787
 788	return found_dev;
 789}
 790
 791int sas_target_alloc(struct scsi_target *starget)
 792{
 793	struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
 794	struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
 795
 796	if (!found_dev)
 797		return -ENODEV;
 798
 799	kref_get(&found_dev->kref);
 800	starget->hostdata = found_dev;
 801	return 0;
 802}
 803EXPORT_SYMBOL_GPL(sas_target_alloc);
 804
 805#define SAS_DEF_QD 256
 806
 807int sas_slave_configure(struct scsi_device *scsi_dev)
 808{
 809	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
 810
 811	BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
 812
 813	if (dev_is_sata(dev)) {
 814		ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
 815		return 0;
 816	}
 817
 818	sas_read_port_mode_page(scsi_dev);
 819
 820	if (scsi_dev->tagged_supported) {
 821		scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
 822	} else {
 823		pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
 824			  SAS_ADDR(dev->sas_addr), scsi_dev->lun);
 825		scsi_change_queue_depth(scsi_dev, 1);
 826	}
 827
 828	scsi_dev->allow_restart = 1;
 829
 830	return 0;
 831}
 832EXPORT_SYMBOL_GPL(sas_slave_configure);
 833
 834int sas_change_queue_depth(struct scsi_device *sdev, int depth)
 835{
 836	struct domain_device *dev = sdev_to_domain_dev(sdev);
 837
 838	if (dev_is_sata(dev))
 839		return ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
 
 840
 841	if (!sdev->tagged_supported)
 842		depth = 1;
 843	return scsi_change_queue_depth(sdev, depth);
 844}
 845EXPORT_SYMBOL_GPL(sas_change_queue_depth);
 846
 847int sas_bios_param(struct scsi_device *scsi_dev,
 848			  struct block_device *bdev,
 849			  sector_t capacity, int *hsc)
 850{
 851	hsc[0] = 255;
 852	hsc[1] = 63;
 853	sector_div(capacity, 255*63);
 854	hsc[2] = capacity;
 855
 856	return 0;
 857}
 858EXPORT_SYMBOL_GPL(sas_bios_param);
 859
 860void sas_task_internal_done(struct sas_task *task)
 861{
 862	del_timer(&task->slow_task->timer);
 863	complete(&task->slow_task->completion);
 864}
 865
 866void sas_task_internal_timedout(struct timer_list *t)
 867{
 868	struct sas_task_slow *slow = from_timer(slow, t, timer);
 869	struct sas_task *task = slow->task;
 870	bool is_completed = true;
 871	unsigned long flags;
 872
 873	spin_lock_irqsave(&task->task_state_lock, flags);
 874	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
 875		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
 876		is_completed = false;
 877	}
 878	spin_unlock_irqrestore(&task->task_state_lock, flags);
 879
 880	if (!is_completed)
 881		complete(&task->slow_task->completion);
 882}
 883
 884#define TASK_TIMEOUT			(20 * HZ)
 885#define TASK_RETRY			3
 886
 887static int sas_execute_internal_abort(struct domain_device *device,
 888				      enum sas_internal_abort type, u16 tag,
 889				      unsigned int qid, void *data)
 890{
 891	struct sas_ha_struct *ha = device->port->ha;
 892	struct sas_internal *i = to_sas_internal(ha->shost->transportt);
 893	struct sas_task *task = NULL;
 894	int res, retry;
 895
 896	for (retry = 0; retry < TASK_RETRY; retry++) {
 897		task = sas_alloc_slow_task(GFP_KERNEL);
 898		if (!task)
 899			return -ENOMEM;
 900
 901		task->dev = device;
 902		task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
 903		task->task_done = sas_task_internal_done;
 904		task->slow_task->timer.function = sas_task_internal_timedout;
 905		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
 906		add_timer(&task->slow_task->timer);
 907
 908		task->abort_task.tag = tag;
 909		task->abort_task.type = type;
 910		task->abort_task.qid = qid;
 911
 912		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
 913		if (res) {
 914			del_timer_sync(&task->slow_task->timer);
 915			pr_err("Executing internal abort failed %016llx (%d)\n",
 916			       SAS_ADDR(device->sas_addr), res);
 917			break;
 918		}
 919
 920		wait_for_completion(&task->slow_task->completion);
 921		res = TMF_RESP_FUNC_FAILED;
 922
 923		/* Even if the internal abort timed out, return direct. */
 924		if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
 925			bool quit = true;
 926
 927			if (i->dft->lldd_abort_timeout)
 928				quit = i->dft->lldd_abort_timeout(task, data);
 929			else
 930				pr_err("Internal abort: timeout %016llx\n",
 931				       SAS_ADDR(device->sas_addr));
 932			res = -EIO;
 933			if (quit)
 934				break;
 935		}
 936
 937		if (task->task_status.resp == SAS_TASK_COMPLETE &&
 938			task->task_status.stat == SAS_SAM_STAT_GOOD) {
 939			res = TMF_RESP_FUNC_COMPLETE;
 940			break;
 941		}
 942
 943		if (task->task_status.resp == SAS_TASK_COMPLETE &&
 944			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
 945			res = TMF_RESP_FUNC_SUCC;
 946			break;
 947		}
 948
 949		pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
 950		       SAS_ADDR(device->sas_addr), task->task_status.resp,
 951		       task->task_status.stat);
 952		sas_free_task(task);
 953		task = NULL;
 954	}
 955	BUG_ON(retry == TASK_RETRY && task != NULL);
 956	sas_free_task(task);
 957	return res;
 958}
 959
 960int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
 961				      unsigned int qid, void *data)
 962{
 963	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
 964					  tag, qid, data);
 965}
 966EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
 967
 968int sas_execute_internal_abort_dev(struct domain_device *device,
 969				   unsigned int qid, void *data)
 970{
 971	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
 972					  SCSI_NO_TAG, qid, data);
 973}
 974EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
 975
 976int sas_execute_tmf(struct domain_device *device, void *parameter,
 977		    int para_len, int force_phy_id,
 978		    struct sas_tmf_task *tmf)
 979{
 980	struct sas_task *task;
 981	struct sas_internal *i =
 982		to_sas_internal(device->port->ha->shost->transportt);
 983	int res, retry;
 984
 985	for (retry = 0; retry < TASK_RETRY; retry++) {
 986		task = sas_alloc_slow_task(GFP_KERNEL);
 987		if (!task)
 988			return -ENOMEM;
 989
 990		task->dev = device;
 991		task->task_proto = device->tproto;
 992
 993		if (dev_is_sata(device)) {
 994			task->ata_task.device_control_reg_update = 1;
 995			if (force_phy_id >= 0) {
 996				task->ata_task.force_phy = true;
 997				task->ata_task.force_phy_id = force_phy_id;
 998			}
 999			memcpy(&task->ata_task.fis, parameter, para_len);
1000		} else {
1001			memcpy(&task->ssp_task, parameter, para_len);
1002		}
1003
1004		task->task_done = sas_task_internal_done;
1005		task->tmf = tmf;
1006
1007		task->slow_task->timer.function = sas_task_internal_timedout;
1008		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
1009		add_timer(&task->slow_task->timer);
1010
1011		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
1012		if (res) {
1013			del_timer_sync(&task->slow_task->timer);
1014			pr_err("executing TMF task failed %016llx (%d)\n",
1015			       SAS_ADDR(device->sas_addr), res);
1016			break;
1017		}
1018
1019		wait_for_completion(&task->slow_task->completion);
1020
1021		if (i->dft->lldd_tmf_exec_complete)
1022			i->dft->lldd_tmf_exec_complete(device);
1023
1024		res = TMF_RESP_FUNC_FAILED;
1025
1026		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1027			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1028				pr_err("TMF task timeout for %016llx and not done\n",
1029				       SAS_ADDR(device->sas_addr));
1030				if (i->dft->lldd_tmf_aborted)
1031					i->dft->lldd_tmf_aborted(task);
1032				break;
1033			}
1034			pr_warn("TMF task timeout for %016llx and done\n",
1035				SAS_ADDR(device->sas_addr));
1036		}
1037
1038		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1039		    task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1040			res = TMF_RESP_FUNC_COMPLETE;
1041			break;
1042		}
1043
1044		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1045		    task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1046			res = TMF_RESP_FUNC_SUCC;
1047			break;
1048		}
1049
1050		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1051		    task->task_status.stat == SAS_DATA_UNDERRUN) {
1052			/* no error, but return the number of bytes of
1053			 * underrun
1054			 */
1055			pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
1056				SAS_ADDR(device->sas_addr),
1057				task->task_status.resp,
1058				task->task_status.stat);
1059			res = task->task_status.residual;
1060			break;
1061		}
1062
1063		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1064		    task->task_status.stat == SAS_DATA_OVERRUN) {
1065			pr_warn("TMF task blocked task error %016llx\n",
1066				SAS_ADDR(device->sas_addr));
1067			res = -EMSGSIZE;
1068			break;
1069		}
1070
1071		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1072		    task->task_status.stat == SAS_OPEN_REJECT) {
1073			pr_warn("TMF task open reject failed  %016llx\n",
1074				SAS_ADDR(device->sas_addr));
1075			res = -EIO;
1076		} else {
1077			pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
1078				SAS_ADDR(device->sas_addr),
1079				task->task_status.resp,
1080				task->task_status.stat);
1081		}
1082		sas_free_task(task);
1083		task = NULL;
1084	}
1085
1086	if (retry == TASK_RETRY)
1087		pr_warn("executing TMF for %016llx failed after %d attempts!\n",
1088			SAS_ADDR(device->sas_addr), TASK_RETRY);
1089	sas_free_task(task);
1090
1091	return res;
1092}
1093
1094static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
1095			       struct sas_tmf_task *tmf)
1096{
1097	struct sas_ssp_task ssp_task;
1098
1099	if (!(device->tproto & SAS_PROTOCOL_SSP))
1100		return TMF_RESP_FUNC_ESUPP;
1101
1102	memcpy(ssp_task.LUN, lun, 8);
1103
1104	return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
1105}
1106
1107int sas_abort_task_set(struct domain_device *dev, u8 *lun)
1108{
1109	struct sas_tmf_task tmf_task = {
1110		.tmf = TMF_ABORT_TASK_SET,
1111	};
1112
1113	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1114}
1115EXPORT_SYMBOL_GPL(sas_abort_task_set);
1116
1117int sas_clear_task_set(struct domain_device *dev, u8 *lun)
1118{
1119	struct sas_tmf_task tmf_task = {
1120		.tmf = TMF_CLEAR_TASK_SET,
1121	};
1122
1123	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1124}
1125EXPORT_SYMBOL_GPL(sas_clear_task_set);
1126
1127int sas_lu_reset(struct domain_device *dev, u8 *lun)
1128{
1129	struct sas_tmf_task tmf_task = {
1130		.tmf = TMF_LU_RESET,
1131	};
1132
1133	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1134}
1135EXPORT_SYMBOL_GPL(sas_lu_reset);
1136
1137int sas_query_task(struct sas_task *task, u16 tag)
1138{
1139	struct sas_tmf_task tmf_task = {
1140		.tmf = TMF_QUERY_TASK,
1141		.tag_of_task_to_be_managed = tag,
1142	};
1143	struct scsi_cmnd *cmnd = task->uldd_task;
1144	struct domain_device *dev = task->dev;
1145	struct scsi_lun lun;
1146
1147	int_to_scsilun(cmnd->device->lun, &lun);
1148
1149	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1150}
1151EXPORT_SYMBOL_GPL(sas_query_task);
1152
1153int sas_abort_task(struct sas_task *task, u16 tag)
1154{
1155	struct sas_tmf_task tmf_task = {
1156		.tmf = TMF_ABORT_TASK,
1157		.tag_of_task_to_be_managed = tag,
1158	};
1159	struct scsi_cmnd *cmnd = task->uldd_task;
1160	struct domain_device *dev = task->dev;
1161	struct scsi_lun lun;
1162
1163	int_to_scsilun(cmnd->device->lun, &lun);
1164
1165	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1166}
1167EXPORT_SYMBOL_GPL(sas_abort_task);
1168
1169/*
1170 * Tell an upper layer that it needs to initiate an abort for a given task.
1171 * This should only ever be called by an LLDD.
1172 */
1173void sas_task_abort(struct sas_task *task)
1174{
1175	struct scsi_cmnd *sc = task->uldd_task;
1176
1177	/* Escape for libsas internal commands */
1178	if (!sc) {
1179		struct sas_task_slow *slow = task->slow_task;
1180
1181		if (!slow)
1182			return;
1183		if (!del_timer(&slow->timer))
1184			return;
1185		slow->timer.function(&slow->timer);
1186		return;
1187	}
1188
1189	if (dev_is_sata(task->dev))
1190		sas_ata_task_abort(task);
1191	else
1192		blk_abort_request(scsi_cmd_to_rq(sc));
1193}
1194EXPORT_SYMBOL_GPL(sas_task_abort);
1195
1196int sas_slave_alloc(struct scsi_device *sdev)
1197{
1198	if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
1199		return -ENXIO;
1200
1201	return 0;
1202}
1203EXPORT_SYMBOL_GPL(sas_slave_alloc);
1204
1205void sas_target_destroy(struct scsi_target *starget)
1206{
1207	struct domain_device *found_dev = starget->hostdata;
1208
1209	if (!found_dev)
1210		return;
1211
1212	starget->hostdata = NULL;
1213	sas_put_device(found_dev);
1214}
1215EXPORT_SYMBOL_GPL(sas_target_destroy);
1216
1217#define SAS_STRING_ADDR_SIZE	16
1218
1219int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
1220{
1221	int res;
1222	const struct firmware *fw;
1223
1224	res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
1225	if (res)
1226		return res;
1227
1228	if (fw->size < SAS_STRING_ADDR_SIZE) {
1229		res = -ENODEV;
1230		goto out;
1231	}
1232
1233	res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
1234	if (res)
1235		goto out;
1236
1237out:
1238	release_firmware(fw);
1239	return res;
1240}
1241EXPORT_SYMBOL_GPL(sas_request_addr);
1242
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Serial Attached SCSI (SAS) class SCSI Host glue.
   4 *
   5 * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
   6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
   7 */
   8
   9#include <linux/kthread.h>
  10#include <linux/firmware.h>
  11#include <linux/export.h>
  12#include <linux/ctype.h>
  13#include <linux/kernel.h>
  14
  15#include "sas_internal.h"
  16
  17#include <scsi/scsi_host.h>
  18#include <scsi/scsi_device.h>
  19#include <scsi/scsi_tcq.h>
  20#include <scsi/scsi.h>
  21#include <scsi/scsi_eh.h>
  22#include <scsi/scsi_transport.h>
  23#include <scsi/scsi_transport_sas.h>
  24#include <scsi/sas_ata.h>
  25#include "scsi_sas_internal.h"
  26#include "scsi_transport_api.h"
  27#include "scsi_priv.h"
  28
  29#include <linux/err.h>
  30#include <linux/blkdev.h>
  31#include <linux/freezer.h>
  32#include <linux/gfp.h>
  33#include <linux/scatterlist.h>
  34#include <linux/libata.h>
  35
  36/* record final status and free the task */
  37static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
  38{
  39	struct task_status_struct *ts = &task->task_status;
  40	enum scsi_host_status hs = DID_OK;
  41	enum exec_status stat = SAS_SAM_STAT_GOOD;
  42
  43	if (ts->resp == SAS_TASK_UNDELIVERED) {
  44		/* transport error */
  45		hs = DID_NO_CONNECT;
  46	} else { /* ts->resp == SAS_TASK_COMPLETE */
  47		/* task delivered, what happened afterwards? */
  48		switch (ts->stat) {
  49		case SAS_DEV_NO_RESPONSE:
  50		case SAS_INTERRUPTED:
  51		case SAS_PHY_DOWN:
  52		case SAS_NAK_R_ERR:
  53		case SAS_OPEN_TO:
  54			hs = DID_NO_CONNECT;
  55			break;
  56		case SAS_DATA_UNDERRUN:
  57			scsi_set_resid(sc, ts->residual);
  58			if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
  59				hs = DID_ERROR;
  60			break;
  61		case SAS_DATA_OVERRUN:
  62			hs = DID_ERROR;
  63			break;
  64		case SAS_QUEUE_FULL:
  65			hs = DID_SOFT_ERROR; /* retry */
  66			break;
  67		case SAS_DEVICE_UNKNOWN:
  68			hs = DID_BAD_TARGET;
  69			break;
  70		case SAS_OPEN_REJECT:
  71			if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
  72				hs = DID_SOFT_ERROR; /* retry */
  73			else
  74				hs = DID_ERROR;
  75			break;
  76		case SAS_PROTO_RESPONSE:
  77			pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
  78				  task->dev->port->ha->sas_ha_name);
  79			break;
  80		case SAS_ABORTED_TASK:
  81			hs = DID_ABORT;
  82			break;
  83		case SAS_SAM_STAT_CHECK_CONDITION:
  84			memcpy(sc->sense_buffer, ts->buf,
  85			       min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
  86			stat = SAS_SAM_STAT_CHECK_CONDITION;
  87			break;
  88		default:
  89			stat = ts->stat;
  90			break;
  91		}
  92	}
  93
  94	sc->result = (hs << 16) | stat;
  95	ASSIGN_SAS_TASK(sc, NULL);
  96	sas_free_task(task);
  97}
  98
  99static void sas_scsi_task_done(struct sas_task *task)
 100{
 101	struct scsi_cmnd *sc = task->uldd_task;
 102	struct domain_device *dev = task->dev;
 103	struct sas_ha_struct *ha = dev->port->ha;
 104	unsigned long flags;
 105
 106	spin_lock_irqsave(&dev->done_lock, flags);
 107	if (test_bit(SAS_HA_FROZEN, &ha->state))
 108		task = NULL;
 109	else
 110		ASSIGN_SAS_TASK(sc, NULL);
 111	spin_unlock_irqrestore(&dev->done_lock, flags);
 112
 113	if (unlikely(!task)) {
 114		/* task will be completed by the error handler */
 115		pr_debug("task done but aborted\n");
 116		return;
 117	}
 118
 119	if (unlikely(!sc)) {
 120		pr_debug("task_done called with non existing SCSI cmnd!\n");
 121		sas_free_task(task);
 122		return;
 123	}
 124
 125	sas_end_task(sc, task);
 126	scsi_done(sc);
 127}
 128
 129static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
 130					       struct domain_device *dev,
 131					       gfp_t gfp_flags)
 132{
 133	struct sas_task *task = sas_alloc_task(gfp_flags);
 134	struct scsi_lun lun;
 135
 136	if (!task)
 137		return NULL;
 138
 139	task->uldd_task = cmd;
 140	ASSIGN_SAS_TASK(cmd, task);
 141
 142	task->dev = dev;
 143	task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
 144
 145	task->ssp_task.retry_count = 1;
 146	int_to_scsilun(cmd->device->lun, &lun);
 147	memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
 148	task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
 149	task->ssp_task.cmd = cmd;
 150
 151	task->scatter = scsi_sglist(cmd);
 152	task->num_scatter = scsi_sg_count(cmd);
 153	task->total_xfer_len = scsi_bufflen(cmd);
 154	task->data_dir = cmd->sc_data_direction;
 155
 156	task->task_done = sas_scsi_task_done;
 157
 158	return task;
 159}
 160
 161int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 162{
 163	struct sas_internal *i = to_sas_internal(host->transportt);
 164	struct domain_device *dev = cmd_to_domain_dev(cmd);
 165	struct sas_task *task;
 166	int res = 0;
 167
 168	/* If the device fell off, no sense in issuing commands */
 169	if (test_bit(SAS_DEV_GONE, &dev->state)) {
 170		cmd->result = DID_BAD_TARGET << 16;
 171		goto out_done;
 172	}
 173
 174	if (dev_is_sata(dev)) {
 175		spin_lock_irq(dev->sata_dev.ap->lock);
 176		res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
 177		spin_unlock_irq(dev->sata_dev.ap->lock);
 178		return res;
 179	}
 180
 181	task = sas_create_task(cmd, dev, GFP_ATOMIC);
 182	if (!task)
 183		return SCSI_MLQUEUE_HOST_BUSY;
 184
 185	res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
 186	if (res)
 187		goto out_free_task;
 188	return 0;
 189
 190out_free_task:
 191	pr_debug("lldd_execute_task returned: %d\n", res);
 192	ASSIGN_SAS_TASK(cmd, NULL);
 193	sas_free_task(task);
 194	if (res == -SAS_QUEUE_FULL)
 195		cmd->result = DID_SOFT_ERROR << 16; /* retry */
 196	else
 197		cmd->result = DID_ERROR << 16;
 198out_done:
 199	scsi_done(cmd);
 200	return 0;
 201}
 202EXPORT_SYMBOL_GPL(sas_queuecommand);
 203
 204static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
 205{
 206	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
 207	struct domain_device *dev = cmd_to_domain_dev(cmd);
 208	struct sas_task *task = TO_SAS_TASK(cmd);
 209
 210	/* At this point, we only get called following an actual abort
 211	 * of the task, so we should be guaranteed not to be racing with
 212	 * any completions from the LLD.  Task is freed after this.
 213	 */
 214	sas_end_task(cmd, task);
 215
 216	if (dev_is_sata(dev)) {
 217		/* defer commands to libata so that libata EH can
 218		 * handle ata qcs correctly
 219		 */
 220		list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
 221		return;
 222	}
 223
 224	/* now finish the command and move it on to the error
 225	 * handler done list, this also takes it off the
 226	 * error handler pending list.
 227	 */
 228	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
 229}
 230
 231static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
 232{
 233	struct scsi_cmnd *cmd, *n;
 234
 235	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
 236		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
 237		    cmd->device->lun == my_cmd->device->lun)
 238			sas_eh_finish_cmd(cmd);
 239	}
 240}
 241
 242static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
 243				     struct domain_device *dev)
 244{
 245	struct scsi_cmnd *cmd, *n;
 246
 247	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
 248		struct domain_device *x = cmd_to_domain_dev(cmd);
 249
 250		if (x == dev)
 251			sas_eh_finish_cmd(cmd);
 252	}
 253}
 254
 255static void sas_scsi_clear_queue_port(struct list_head *error_q,
 256				      struct asd_sas_port *port)
 257{
 258	struct scsi_cmnd *cmd, *n;
 259
 260	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
 261		struct domain_device *dev = cmd_to_domain_dev(cmd);
 262		struct asd_sas_port *x = dev->port;
 263
 264		if (x == port)
 265			sas_eh_finish_cmd(cmd);
 266	}
 267}
 268
 269enum task_disposition {
 270	TASK_IS_DONE,
 271	TASK_IS_ABORTED,
 272	TASK_IS_AT_LU,
 273	TASK_IS_NOT_AT_LU,
 274	TASK_ABORT_FAILED,
 275};
 276
 277static enum task_disposition sas_scsi_find_task(struct sas_task *task)
 278{
 279	unsigned long flags;
 280	int i, res;
 281	struct sas_internal *si =
 282		to_sas_internal(task->dev->port->ha->core.shost->transportt);
 283
 284	for (i = 0; i < 5; i++) {
 285		pr_notice("%s: aborting task 0x%p\n", __func__, task);
 286		res = si->dft->lldd_abort_task(task);
 287
 288		spin_lock_irqsave(&task->task_state_lock, flags);
 289		if (task->task_state_flags & SAS_TASK_STATE_DONE) {
 290			spin_unlock_irqrestore(&task->task_state_lock, flags);
 291			pr_debug("%s: task 0x%p is done\n", __func__, task);
 292			return TASK_IS_DONE;
 293		}
 294		spin_unlock_irqrestore(&task->task_state_lock, flags);
 295
 296		if (res == TMF_RESP_FUNC_COMPLETE) {
 297			pr_notice("%s: task 0x%p is aborted\n",
 298				  __func__, task);
 299			return TASK_IS_ABORTED;
 300		} else if (si->dft->lldd_query_task) {
 301			pr_notice("%s: querying task 0x%p\n", __func__, task);
 302			res = si->dft->lldd_query_task(task);
 303			switch (res) {
 304			case TMF_RESP_FUNC_SUCC:
 305				pr_notice("%s: task 0x%p at LU\n", __func__,
 306					  task);
 307				return TASK_IS_AT_LU;
 308			case TMF_RESP_FUNC_COMPLETE:
 309				pr_notice("%s: task 0x%p not at LU\n",
 310					  __func__, task);
 311				return TASK_IS_NOT_AT_LU;
 312			case TMF_RESP_FUNC_FAILED:
 313				pr_notice("%s: task 0x%p failed to abort\n",
 314					  __func__, task);
 315				return TASK_ABORT_FAILED;
 316			default:
 317				pr_notice("%s: task 0x%p result code %d not handled\n",
 318					  __func__, task, res);
 319			}
 320		}
 321	}
 322	return TASK_ABORT_FAILED;
 323}
 324
 325static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
 326{
 327	int res = TMF_RESP_FUNC_FAILED;
 328	struct scsi_lun lun;
 329	struct sas_internal *i =
 330		to_sas_internal(dev->port->ha->core.shost->transportt);
 331
 332	int_to_scsilun(cmd->device->lun, &lun);
 333
 334	pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
 335		  SAS_ADDR(dev->sas_addr),
 336		  cmd->device->lun);
 337
 338	if (i->dft->lldd_abort_task_set)
 339		res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
 340
 341	if (res == TMF_RESP_FUNC_FAILED) {
 342		if (i->dft->lldd_clear_task_set)
 343			res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
 344	}
 345
 346	if (res == TMF_RESP_FUNC_FAILED) {
 347		if (i->dft->lldd_lu_reset)
 348			res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
 349	}
 350
 351	return res;
 352}
 353
 354static int sas_recover_I_T(struct domain_device *dev)
 355{
 356	int res = TMF_RESP_FUNC_FAILED;
 357	struct sas_internal *i =
 358		to_sas_internal(dev->port->ha->core.shost->transportt);
 359
 360	pr_notice("I_T nexus reset for dev %016llx\n",
 361		  SAS_ADDR(dev->sas_addr));
 362
 363	if (i->dft->lldd_I_T_nexus_reset)
 364		res = i->dft->lldd_I_T_nexus_reset(dev);
 365
 366	return res;
 367}
 368
 369/* take a reference on the last known good phy for this device */
 370struct sas_phy *sas_get_local_phy(struct domain_device *dev)
 371{
 372	struct sas_ha_struct *ha = dev->port->ha;
 373	struct sas_phy *phy;
 374	unsigned long flags;
 375
 376	/* a published domain device always has a valid phy, it may be
 377	 * stale, but it is never NULL
 378	 */
 379	BUG_ON(!dev->phy);
 380
 381	spin_lock_irqsave(&ha->phy_port_lock, flags);
 382	phy = dev->phy;
 383	get_device(&phy->dev);
 384	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
 385
 386	return phy;
 387}
 388EXPORT_SYMBOL_GPL(sas_get_local_phy);
 389
 390static void sas_wait_eh(struct domain_device *dev)
 391{
 392	struct sas_ha_struct *ha = dev->port->ha;
 393	DEFINE_WAIT(wait);
 394
 395	if (dev_is_sata(dev)) {
 396		ata_port_wait_eh(dev->sata_dev.ap);
 397		return;
 398	}
 399 retry:
 400	spin_lock_irq(&ha->lock);
 401
 402	while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
 403		prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
 404		spin_unlock_irq(&ha->lock);
 405		schedule();
 406		spin_lock_irq(&ha->lock);
 407	}
 408	finish_wait(&ha->eh_wait_q, &wait);
 409
 410	spin_unlock_irq(&ha->lock);
 411
 412	/* make sure SCSI EH is complete */
 413	if (scsi_host_in_recovery(ha->core.shost)) {
 414		msleep(10);
 415		goto retry;
 416	}
 417}
 418
 419static int sas_queue_reset(struct domain_device *dev, int reset_type,
 420			   u64 lun, int wait)
 421{
 422	struct sas_ha_struct *ha = dev->port->ha;
 423	int scheduled = 0, tries = 100;
 424
 425	/* ata: promote lun reset to bus reset */
 426	if (dev_is_sata(dev)) {
 427		sas_ata_schedule_reset(dev);
 428		if (wait)
 429			sas_ata_wait_eh(dev);
 430		return SUCCESS;
 431	}
 432
 433	while (!scheduled && tries--) {
 434		spin_lock_irq(&ha->lock);
 435		if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
 436		    !test_bit(reset_type, &dev->state)) {
 437			scheduled = 1;
 438			ha->eh_active++;
 439			list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
 440			set_bit(SAS_DEV_EH_PENDING, &dev->state);
 441			set_bit(reset_type, &dev->state);
 442			int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
 443			scsi_schedule_eh(ha->core.shost);
 444		}
 445		spin_unlock_irq(&ha->lock);
 446
 447		if (wait)
 448			sas_wait_eh(dev);
 449
 450		if (scheduled)
 451			return SUCCESS;
 452	}
 453
 454	pr_warn("%s reset of %s failed\n",
 455		reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
 456		dev_name(&dev->rphy->dev));
 457
 458	return FAILED;
 459}
 460
 461int sas_eh_abort_handler(struct scsi_cmnd *cmd)
 462{
 463	int res = TMF_RESP_FUNC_FAILED;
 464	struct sas_task *task = TO_SAS_TASK(cmd);
 465	struct Scsi_Host *host = cmd->device->host;
 466	struct domain_device *dev = cmd_to_domain_dev(cmd);
 467	struct sas_internal *i = to_sas_internal(host->transportt);
 468	unsigned long flags;
 469
 470	if (!i->dft->lldd_abort_task)
 471		return FAILED;
 472
 473	spin_lock_irqsave(host->host_lock, flags);
 474	/* We cannot do async aborts for SATA devices */
 475	if (dev_is_sata(dev) && !host->host_eh_scheduled) {
 476		spin_unlock_irqrestore(host->host_lock, flags);
 477		return FAILED;
 478	}
 479	spin_unlock_irqrestore(host->host_lock, flags);
 480
 481	if (task)
 482		res = i->dft->lldd_abort_task(task);
 483	else
 484		pr_notice("no task to abort\n");
 485	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
 486		return SUCCESS;
 487
 488	return FAILED;
 489}
 490EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
 491
 492/* Attempt to send a LUN reset message to a device */
 493int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
 494{
 495	int res;
 496	struct scsi_lun lun;
 497	struct Scsi_Host *host = cmd->device->host;
 498	struct domain_device *dev = cmd_to_domain_dev(cmd);
 499	struct sas_internal *i = to_sas_internal(host->transportt);
 500
 501	if (current != host->ehandler)
 502		return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
 503
 504	int_to_scsilun(cmd->device->lun, &lun);
 505
 506	if (!i->dft->lldd_lu_reset)
 507		return FAILED;
 508
 509	res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
 510	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
 511		return SUCCESS;
 512
 513	return FAILED;
 514}
 515EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
 516
 517int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
 518{
 519	int res;
 520	struct Scsi_Host *host = cmd->device->host;
 521	struct domain_device *dev = cmd_to_domain_dev(cmd);
 522	struct sas_internal *i = to_sas_internal(host->transportt);
 523
 524	if (current != host->ehandler)
 525		return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
 526
 527	if (!i->dft->lldd_I_T_nexus_reset)
 528		return FAILED;
 529
 530	res = i->dft->lldd_I_T_nexus_reset(dev);
 531	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
 532	    res == -ENODEV)
 533		return SUCCESS;
 534
 535	return FAILED;
 536}
 537EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
 538
 539/* Try to reset a device */
 540static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
 541{
 542	int res;
 543	struct Scsi_Host *shost = cmd->device->host;
 544
 545	if (!shost->hostt->eh_device_reset_handler)
 546		goto try_target_reset;
 547
 548	res = shost->hostt->eh_device_reset_handler(cmd);
 549	if (res == SUCCESS)
 550		return res;
 551
 552try_target_reset:
 553	if (shost->hostt->eh_target_reset_handler)
 554		return shost->hostt->eh_target_reset_handler(cmd);
 555
 556	return FAILED;
 557}
 558
 559static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
 560{
 561	struct scsi_cmnd *cmd, *n;
 562	enum task_disposition res = TASK_IS_DONE;
 563	int tmf_resp, need_reset;
 564	struct sas_internal *i = to_sas_internal(shost->transportt);
 565	unsigned long flags;
 566	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
 567	LIST_HEAD(done);
 568
 569	/* clean out any commands that won the completion vs eh race */
 570	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
 571		struct domain_device *dev = cmd_to_domain_dev(cmd);
 572		struct sas_task *task;
 573
 574		spin_lock_irqsave(&dev->done_lock, flags);
 575		/* by this point the lldd has either observed
 576		 * SAS_HA_FROZEN and is leaving the task alone, or has
 577		 * won the race with eh and decided to complete it
 578		 */
 579		task = TO_SAS_TASK(cmd);
 580		spin_unlock_irqrestore(&dev->done_lock, flags);
 581
 582		if (!task)
 583			list_move_tail(&cmd->eh_entry, &done);
 584	}
 585
 586 Again:
 587	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
 588		struct sas_task *task = TO_SAS_TASK(cmd);
 589
 590		list_del_init(&cmd->eh_entry);
 591
 592		spin_lock_irqsave(&task->task_state_lock, flags);
 593		need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
 594		spin_unlock_irqrestore(&task->task_state_lock, flags);
 595
 596		if (need_reset) {
 597			pr_notice("%s: task 0x%p requests reset\n",
 598				  __func__, task);
 599			goto reset;
 600		}
 601
 602		pr_debug("trying to find task 0x%p\n", task);
 603		res = sas_scsi_find_task(task);
 604
 605		switch (res) {
 606		case TASK_IS_DONE:
 607			pr_notice("%s: task 0x%p is done\n", __func__,
 608				    task);
 609			sas_eh_finish_cmd(cmd);
 610			continue;
 611		case TASK_IS_ABORTED:
 612			pr_notice("%s: task 0x%p is aborted\n",
 613				  __func__, task);
 614			sas_eh_finish_cmd(cmd);
 615			continue;
 616		case TASK_IS_AT_LU:
 617			pr_info("task 0x%p is at LU: lu recover\n", task);
 618 reset:
 619			tmf_resp = sas_recover_lu(task->dev, cmd);
 620			if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
 621				pr_notice("dev %016llx LU 0x%llx is recovered\n",
 622					  SAS_ADDR(task->dev),
 623					  cmd->device->lun);
 624				sas_eh_finish_cmd(cmd);
 625				sas_scsi_clear_queue_lu(work_q, cmd);
 626				goto Again;
 627			}
 628			fallthrough;
 629		case TASK_IS_NOT_AT_LU:
 630		case TASK_ABORT_FAILED:
 631			pr_notice("task 0x%p is not at LU: I_T recover\n",
 632				  task);
 633			tmf_resp = sas_recover_I_T(task->dev);
 634			if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
 635			    tmf_resp == -ENODEV) {
 636				struct domain_device *dev = task->dev;
 637				pr_notice("I_T %016llx recovered\n",
 638					  SAS_ADDR(task->dev->sas_addr));
 639				sas_eh_finish_cmd(cmd);
 640				sas_scsi_clear_queue_I_T(work_q, dev);
 641				goto Again;
 642			}
 643			/* Hammer time :-) */
 644			try_to_reset_cmd_device(cmd);
 645			if (i->dft->lldd_clear_nexus_port) {
 646				struct asd_sas_port *port = task->dev->port;
 647				pr_debug("clearing nexus for port:%d\n",
 648					  port->id);
 649				res = i->dft->lldd_clear_nexus_port(port);
 650				if (res == TMF_RESP_FUNC_COMPLETE) {
 651					pr_notice("clear nexus port:%d succeeded\n",
 652						  port->id);
 653					sas_eh_finish_cmd(cmd);
 654					sas_scsi_clear_queue_port(work_q,
 655								  port);
 656					goto Again;
 657				}
 658			}
 659			if (i->dft->lldd_clear_nexus_ha) {
 660				pr_debug("clear nexus ha\n");
 661				res = i->dft->lldd_clear_nexus_ha(ha);
 662				if (res == TMF_RESP_FUNC_COMPLETE) {
 663					pr_notice("clear nexus ha succeeded\n");
 664					sas_eh_finish_cmd(cmd);
 665					goto clear_q;
 666				}
 667			}
 668			/* If we are here -- this means that no amount
 669			 * of effort could recover from errors.  Quite
 670			 * possibly the HA just disappeared.
 671			 */
 672			pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
 673			       SAS_ADDR(task->dev->sas_addr),
 674			       cmd->device->lun);
 675
 676			sas_eh_finish_cmd(cmd);
 677			goto clear_q;
 678		}
 679	}
 680 out:
 681	list_splice_tail(&done, work_q);
 682	list_splice_tail_init(&ha->eh_ata_q, work_q);
 683	return;
 684
 685 clear_q:
 686	pr_debug("--- Exit %s -- clear_q\n", __func__);
 687	list_for_each_entry_safe(cmd, n, work_q, eh_entry)
 688		sas_eh_finish_cmd(cmd);
 689	goto out;
 690}
 691
 692static void sas_eh_handle_resets(struct Scsi_Host *shost)
 693{
 694	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
 695	struct sas_internal *i = to_sas_internal(shost->transportt);
 696
 697	/* handle directed resets to sas devices */
 698	spin_lock_irq(&ha->lock);
 699	while (!list_empty(&ha->eh_dev_q)) {
 700		struct domain_device *dev;
 701		struct ssp_device *ssp;
 702
 703		ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
 704		list_del_init(&ssp->eh_list_node);
 705		dev = container_of(ssp, typeof(*dev), ssp_dev);
 706		kref_get(&dev->kref);
 707		WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
 708
 709		spin_unlock_irq(&ha->lock);
 710
 711		if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
 712			i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
 713
 714		if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
 715			i->dft->lldd_I_T_nexus_reset(dev);
 716
 717		sas_put_device(dev);
 718		spin_lock_irq(&ha->lock);
 719		clear_bit(SAS_DEV_EH_PENDING, &dev->state);
 720		ha->eh_active--;
 721	}
 722	spin_unlock_irq(&ha->lock);
 723}
 724
 725
 726void sas_scsi_recover_host(struct Scsi_Host *shost)
 727{
 728	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
 729	LIST_HEAD(eh_work_q);
 730	int tries = 0;
 731	bool retry;
 732
 733retry:
 734	tries++;
 735	retry = true;
 736	spin_lock_irq(shost->host_lock);
 737	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
 738	spin_unlock_irq(shost->host_lock);
 739
 740	pr_notice("Enter %s busy: %d failed: %d\n",
 741		  __func__, scsi_host_busy(shost), shost->host_failed);
 742	/*
 743	 * Deal with commands that still have SAS tasks (i.e. they didn't
 744	 * complete via the normal sas_task completion mechanism),
 745	 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
 746	 */
 747	set_bit(SAS_HA_FROZEN, &ha->state);
 748	sas_eh_handle_sas_errors(shost, &eh_work_q);
 749	clear_bit(SAS_HA_FROZEN, &ha->state);
 750	if (list_empty(&eh_work_q))
 751		goto out;
 752
 753	/*
 754	 * Now deal with SCSI commands that completed ok but have a an error
 755	 * code (and hopefully sense data) attached.  This is roughly what
 756	 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
 757	 * command we see here has no sas_task and is thus unknown to the HA.
 758	 */
 759	sas_ata_eh(shost, &eh_work_q);
 760	if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
 761		scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
 762
 763out:
 764	sas_eh_handle_resets(shost);
 765
 766	/* now link into libata eh --- if we have any ata devices */
 767	sas_ata_strategy_handler(shost);
 768
 769	scsi_eh_flush_done_q(&ha->eh_done_q);
 770
 771	/* check if any new eh work was scheduled during the last run */
 772	spin_lock_irq(&ha->lock);
 773	if (ha->eh_active == 0) {
 774		shost->host_eh_scheduled = 0;
 775		retry = false;
 776	}
 777	spin_unlock_irq(&ha->lock);
 778
 779	if (retry)
 780		goto retry;
 781
 782	pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
 783		  __func__, scsi_host_busy(shost),
 784		  shost->host_failed, tries);
 785}
 786
 787int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
 788{
 789	struct domain_device *dev = sdev_to_domain_dev(sdev);
 790
 791	if (dev_is_sata(dev))
 792		return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
 793
 794	return -EINVAL;
 795}
 796EXPORT_SYMBOL_GPL(sas_ioctl);
 797
 798struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
 799{
 800	struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
 801	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
 802	struct domain_device *found_dev = NULL;
 803	int i;
 804	unsigned long flags;
 805
 806	spin_lock_irqsave(&ha->phy_port_lock, flags);
 807	for (i = 0; i < ha->num_phys; i++) {
 808		struct asd_sas_port *port = ha->sas_port[i];
 809		struct domain_device *dev;
 810
 811		spin_lock(&port->dev_list_lock);
 812		list_for_each_entry(dev, &port->dev_list, dev_list_node) {
 813			if (rphy == dev->rphy) {
 814				found_dev = dev;
 815				spin_unlock(&port->dev_list_lock);
 816				goto found;
 817			}
 818		}
 819		spin_unlock(&port->dev_list_lock);
 820	}
 821 found:
 822	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
 823
 824	return found_dev;
 825}
 826
 827int sas_target_alloc(struct scsi_target *starget)
 828{
 829	struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
 830	struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
 831
 832	if (!found_dev)
 833		return -ENODEV;
 834
 835	kref_get(&found_dev->kref);
 836	starget->hostdata = found_dev;
 837	return 0;
 838}
 839EXPORT_SYMBOL_GPL(sas_target_alloc);
 840
 841#define SAS_DEF_QD 256
 842
 843int sas_slave_configure(struct scsi_device *scsi_dev)
 844{
 845	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
 846
 847	BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
 848
 849	if (dev_is_sata(dev)) {
 850		ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
 851		return 0;
 852	}
 853
 854	sas_read_port_mode_page(scsi_dev);
 855
 856	if (scsi_dev->tagged_supported) {
 857		scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
 858	} else {
 859		pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
 860			  SAS_ADDR(dev->sas_addr), scsi_dev->lun);
 861		scsi_change_queue_depth(scsi_dev, 1);
 862	}
 863
 864	scsi_dev->allow_restart = 1;
 865
 866	return 0;
 867}
 868EXPORT_SYMBOL_GPL(sas_slave_configure);
 869
 870int sas_change_queue_depth(struct scsi_device *sdev, int depth)
 871{
 872	struct domain_device *dev = sdev_to_domain_dev(sdev);
 873
 874	if (dev_is_sata(dev))
 875		return ata_change_queue_depth(dev->sata_dev.ap,
 876					      sas_to_ata_dev(dev), sdev, depth);
 877
 878	if (!sdev->tagged_supported)
 879		depth = 1;
 880	return scsi_change_queue_depth(sdev, depth);
 881}
 882EXPORT_SYMBOL_GPL(sas_change_queue_depth);
 883
 884int sas_bios_param(struct scsi_device *scsi_dev,
 885			  struct block_device *bdev,
 886			  sector_t capacity, int *hsc)
 887{
 888	hsc[0] = 255;
 889	hsc[1] = 63;
 890	sector_div(capacity, 255*63);
 891	hsc[2] = capacity;
 892
 893	return 0;
 894}
 895EXPORT_SYMBOL_GPL(sas_bios_param);
 896
 897void sas_task_internal_done(struct sas_task *task)
 898{
 899	del_timer(&task->slow_task->timer);
 900	complete(&task->slow_task->completion);
 901}
 902
 903void sas_task_internal_timedout(struct timer_list *t)
 904{
 905	struct sas_task_slow *slow = from_timer(slow, t, timer);
 906	struct sas_task *task = slow->task;
 907	bool is_completed = true;
 908	unsigned long flags;
 909
 910	spin_lock_irqsave(&task->task_state_lock, flags);
 911	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
 912		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
 913		is_completed = false;
 914	}
 915	spin_unlock_irqrestore(&task->task_state_lock, flags);
 916
 917	if (!is_completed)
 918		complete(&task->slow_task->completion);
 919}
 920
 921#define TASK_TIMEOUT			(20 * HZ)
 922#define TASK_RETRY			3
 923
 924static int sas_execute_internal_abort(struct domain_device *device,
 925				      enum sas_internal_abort type, u16 tag,
 926				      unsigned int qid, void *data)
 927{
 928	struct sas_ha_struct *ha = device->port->ha;
 929	struct sas_internal *i = to_sas_internal(ha->core.shost->transportt);
 930	struct sas_task *task = NULL;
 931	int res, retry;
 932
 933	for (retry = 0; retry < TASK_RETRY; retry++) {
 934		task = sas_alloc_slow_task(GFP_KERNEL);
 935		if (!task)
 936			return -ENOMEM;
 937
 938		task->dev = device;
 939		task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
 940		task->task_done = sas_task_internal_done;
 941		task->slow_task->timer.function = sas_task_internal_timedout;
 942		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
 943		add_timer(&task->slow_task->timer);
 944
 945		task->abort_task.tag = tag;
 946		task->abort_task.type = type;
 947		task->abort_task.qid = qid;
 948
 949		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
 950		if (res) {
 951			del_timer_sync(&task->slow_task->timer);
 952			pr_err("Executing internal abort failed %016llx (%d)\n",
 953			       SAS_ADDR(device->sas_addr), res);
 954			break;
 955		}
 956
 957		wait_for_completion(&task->slow_task->completion);
 958		res = TMF_RESP_FUNC_FAILED;
 959
 960		/* Even if the internal abort timed out, return direct. */
 961		if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
 962			bool quit = true;
 963
 964			if (i->dft->lldd_abort_timeout)
 965				quit = i->dft->lldd_abort_timeout(task, data);
 966			else
 967				pr_err("Internal abort: timeout %016llx\n",
 968				       SAS_ADDR(device->sas_addr));
 969			res = -EIO;
 970			if (quit)
 971				break;
 972		}
 973
 974		if (task->task_status.resp == SAS_TASK_COMPLETE &&
 975			task->task_status.stat == SAS_SAM_STAT_GOOD) {
 976			res = TMF_RESP_FUNC_COMPLETE;
 977			break;
 978		}
 979
 980		if (task->task_status.resp == SAS_TASK_COMPLETE &&
 981			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
 982			res = TMF_RESP_FUNC_SUCC;
 983			break;
 984		}
 985
 986		pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
 987		       SAS_ADDR(device->sas_addr), task->task_status.resp,
 988		       task->task_status.stat);
 989		sas_free_task(task);
 990		task = NULL;
 991	}
 992	BUG_ON(retry == TASK_RETRY && task != NULL);
 993	sas_free_task(task);
 994	return res;
 995}
 996
 997int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
 998				      unsigned int qid, void *data)
 999{
1000	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
1001					  tag, qid, data);
1002}
1003EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
1004
1005int sas_execute_internal_abort_dev(struct domain_device *device,
1006				   unsigned int qid, void *data)
1007{
1008	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
1009					  SCSI_NO_TAG, qid, data);
1010}
1011EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
1012
1013int sas_execute_tmf(struct domain_device *device, void *parameter,
1014		    int para_len, int force_phy_id,
1015		    struct sas_tmf_task *tmf)
1016{
1017	struct sas_task *task;
1018	struct sas_internal *i =
1019		to_sas_internal(device->port->ha->core.shost->transportt);
1020	int res, retry;
1021
1022	for (retry = 0; retry < TASK_RETRY; retry++) {
1023		task = sas_alloc_slow_task(GFP_KERNEL);
1024		if (!task)
1025			return -ENOMEM;
1026
1027		task->dev = device;
1028		task->task_proto = device->tproto;
1029
1030		if (dev_is_sata(device)) {
1031			task->ata_task.device_control_reg_update = 1;
1032			if (force_phy_id >= 0) {
1033				task->ata_task.force_phy = true;
1034				task->ata_task.force_phy_id = force_phy_id;
1035			}
1036			memcpy(&task->ata_task.fis, parameter, para_len);
1037		} else {
1038			memcpy(&task->ssp_task, parameter, para_len);
1039		}
1040
1041		task->task_done = sas_task_internal_done;
1042		task->tmf = tmf;
1043
1044		task->slow_task->timer.function = sas_task_internal_timedout;
1045		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
1046		add_timer(&task->slow_task->timer);
1047
1048		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
1049		if (res) {
1050			del_timer_sync(&task->slow_task->timer);
1051			pr_err("executing TMF task failed %016llx (%d)\n",
1052			       SAS_ADDR(device->sas_addr), res);
1053			break;
1054		}
1055
1056		wait_for_completion(&task->slow_task->completion);
1057
1058		if (i->dft->lldd_tmf_exec_complete)
1059			i->dft->lldd_tmf_exec_complete(device);
1060
1061		res = TMF_RESP_FUNC_FAILED;
1062
1063		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1064			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1065				pr_err("TMF task timeout for %016llx and not done\n",
1066				       SAS_ADDR(device->sas_addr));
1067				if (i->dft->lldd_tmf_aborted)
1068					i->dft->lldd_tmf_aborted(task);
1069				break;
1070			}
1071			pr_warn("TMF task timeout for %016llx and done\n",
1072				SAS_ADDR(device->sas_addr));
1073		}
1074
1075		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1076		    task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1077			res = TMF_RESP_FUNC_COMPLETE;
1078			break;
1079		}
1080
1081		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1082		    task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1083			res = TMF_RESP_FUNC_SUCC;
1084			break;
1085		}
1086
1087		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1088		    task->task_status.stat == SAS_DATA_UNDERRUN) {
1089			/* no error, but return the number of bytes of
1090			 * underrun
1091			 */
1092			pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
1093				SAS_ADDR(device->sas_addr),
1094				task->task_status.resp,
1095				task->task_status.stat);
1096			res = task->task_status.residual;
1097			break;
1098		}
1099
1100		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1101		    task->task_status.stat == SAS_DATA_OVERRUN) {
1102			pr_warn("TMF task blocked task error %016llx\n",
1103				SAS_ADDR(device->sas_addr));
1104			res = -EMSGSIZE;
1105			break;
1106		}
1107
1108		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1109		    task->task_status.stat == SAS_OPEN_REJECT) {
1110			pr_warn("TMF task open reject failed  %016llx\n",
1111				SAS_ADDR(device->sas_addr));
1112			res = -EIO;
1113		} else {
1114			pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
1115				SAS_ADDR(device->sas_addr),
1116				task->task_status.resp,
1117				task->task_status.stat);
1118		}
1119		sas_free_task(task);
1120		task = NULL;
1121	}
1122
1123	if (retry == TASK_RETRY)
1124		pr_warn("executing TMF for %016llx failed after %d attempts!\n",
1125			SAS_ADDR(device->sas_addr), TASK_RETRY);
1126	sas_free_task(task);
1127
1128	return res;
1129}
1130
1131static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
1132			       struct sas_tmf_task *tmf)
1133{
1134	struct sas_ssp_task ssp_task;
1135
1136	if (!(device->tproto & SAS_PROTOCOL_SSP))
1137		return TMF_RESP_FUNC_ESUPP;
1138
1139	memcpy(ssp_task.LUN, lun, 8);
1140
1141	return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
1142}
1143
1144int sas_abort_task_set(struct domain_device *dev, u8 *lun)
1145{
1146	struct sas_tmf_task tmf_task = {
1147		.tmf = TMF_ABORT_TASK_SET,
1148	};
1149
1150	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1151}
1152EXPORT_SYMBOL_GPL(sas_abort_task_set);
1153
1154int sas_clear_task_set(struct domain_device *dev, u8 *lun)
1155{
1156	struct sas_tmf_task tmf_task = {
1157		.tmf = TMF_CLEAR_TASK_SET,
1158	};
1159
1160	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1161}
1162EXPORT_SYMBOL_GPL(sas_clear_task_set);
1163
1164int sas_lu_reset(struct domain_device *dev, u8 *lun)
1165{
1166	struct sas_tmf_task tmf_task = {
1167		.tmf = TMF_LU_RESET,
1168	};
1169
1170	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1171}
1172EXPORT_SYMBOL_GPL(sas_lu_reset);
1173
1174int sas_query_task(struct sas_task *task, u16 tag)
1175{
1176	struct sas_tmf_task tmf_task = {
1177		.tmf = TMF_QUERY_TASK,
1178		.tag_of_task_to_be_managed = tag,
1179	};
1180	struct scsi_cmnd *cmnd = task->uldd_task;
1181	struct domain_device *dev = task->dev;
1182	struct scsi_lun lun;
1183
1184	int_to_scsilun(cmnd->device->lun, &lun);
1185
1186	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1187}
1188EXPORT_SYMBOL_GPL(sas_query_task);
1189
1190int sas_abort_task(struct sas_task *task, u16 tag)
1191{
1192	struct sas_tmf_task tmf_task = {
1193		.tmf = TMF_ABORT_TASK,
1194		.tag_of_task_to_be_managed = tag,
1195	};
1196	struct scsi_cmnd *cmnd = task->uldd_task;
1197	struct domain_device *dev = task->dev;
1198	struct scsi_lun lun;
1199
1200	int_to_scsilun(cmnd->device->lun, &lun);
1201
1202	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1203}
1204EXPORT_SYMBOL_GPL(sas_abort_task);
1205
1206/*
1207 * Tell an upper layer that it needs to initiate an abort for a given task.
1208 * This should only ever be called by an LLDD.
1209 */
1210void sas_task_abort(struct sas_task *task)
1211{
1212	struct scsi_cmnd *sc = task->uldd_task;
1213
1214	/* Escape for libsas internal commands */
1215	if (!sc) {
1216		struct sas_task_slow *slow = task->slow_task;
1217
1218		if (!slow)
1219			return;
1220		if (!del_timer(&slow->timer))
1221			return;
1222		slow->timer.function(&slow->timer);
1223		return;
1224	}
1225
1226	if (dev_is_sata(task->dev))
1227		sas_ata_task_abort(task);
1228	else
1229		blk_abort_request(scsi_cmd_to_rq(sc));
1230}
1231EXPORT_SYMBOL_GPL(sas_task_abort);
1232
1233int sas_slave_alloc(struct scsi_device *sdev)
1234{
1235	if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
1236		return -ENXIO;
1237
1238	return 0;
1239}
1240EXPORT_SYMBOL_GPL(sas_slave_alloc);
1241
1242void sas_target_destroy(struct scsi_target *starget)
1243{
1244	struct domain_device *found_dev = starget->hostdata;
1245
1246	if (!found_dev)
1247		return;
1248
1249	starget->hostdata = NULL;
1250	sas_put_device(found_dev);
1251}
1252EXPORT_SYMBOL_GPL(sas_target_destroy);
1253
1254#define SAS_STRING_ADDR_SIZE	16
1255
1256int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
1257{
1258	int res;
1259	const struct firmware *fw;
1260
1261	res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
1262	if (res)
1263		return res;
1264
1265	if (fw->size < SAS_STRING_ADDR_SIZE) {
1266		res = -ENODEV;
1267		goto out;
1268	}
1269
1270	res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
1271	if (res)
1272		goto out;
1273
1274out:
1275	release_firmware(fw);
1276	return res;
1277}
1278EXPORT_SYMBOL_GPL(sas_request_addr);
1279