Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (c) 2015 Linaro Ltd.
   4 * Copyright (c) 2015 Hisilicon Limited.
   5 */
   6
   7#include "hisi_sas.h"
   8#define DRV_NAME "hisi_sas"
   9
  10#define DEV_IS_GONE(dev) \
  11	((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
  12
  13static int hisi_sas_softreset_ata_disk(struct domain_device *device);
  14static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
  15				void *funcdata);
  16static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
  17				  struct domain_device *device);
  18static void hisi_sas_dev_gone(struct domain_device *device);
  19
  20struct hisi_sas_internal_abort_data {
  21	bool rst_ha_timeout; /* reset the HA for timeout */
  22};
  23
  24u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
  25{
  26	switch (fis->command) {
  27	case ATA_CMD_FPDMA_WRITE:
  28	case ATA_CMD_FPDMA_READ:
  29	case ATA_CMD_FPDMA_RECV:
  30	case ATA_CMD_FPDMA_SEND:
  31	case ATA_CMD_NCQ_NON_DATA:
  32		return HISI_SAS_SATA_PROTOCOL_FPDMA;
  33
  34	case ATA_CMD_DOWNLOAD_MICRO:
  35	case ATA_CMD_ID_ATA:
  36	case ATA_CMD_PMP_READ:
  37	case ATA_CMD_READ_LOG_EXT:
  38	case ATA_CMD_PIO_READ:
  39	case ATA_CMD_PIO_READ_EXT:
  40	case ATA_CMD_PMP_WRITE:
  41	case ATA_CMD_WRITE_LOG_EXT:
  42	case ATA_CMD_PIO_WRITE:
  43	case ATA_CMD_PIO_WRITE_EXT:
  44		return HISI_SAS_SATA_PROTOCOL_PIO;
  45
  46	case ATA_CMD_DSM:
  47	case ATA_CMD_DOWNLOAD_MICRO_DMA:
  48	case ATA_CMD_PMP_READ_DMA:
  49	case ATA_CMD_PMP_WRITE_DMA:
  50	case ATA_CMD_READ:
  51	case ATA_CMD_READ_EXT:
  52	case ATA_CMD_READ_LOG_DMA_EXT:
  53	case ATA_CMD_READ_STREAM_DMA_EXT:
  54	case ATA_CMD_TRUSTED_RCV_DMA:
  55	case ATA_CMD_TRUSTED_SND_DMA:
  56	case ATA_CMD_WRITE:
  57	case ATA_CMD_WRITE_EXT:
  58	case ATA_CMD_WRITE_FUA_EXT:
  59	case ATA_CMD_WRITE_QUEUED:
  60	case ATA_CMD_WRITE_LOG_DMA_EXT:
  61	case ATA_CMD_WRITE_STREAM_DMA_EXT:
  62	case ATA_CMD_ZAC_MGMT_IN:
  63		return HISI_SAS_SATA_PROTOCOL_DMA;
  64
  65	case ATA_CMD_CHK_POWER:
  66	case ATA_CMD_DEV_RESET:
  67	case ATA_CMD_EDD:
  68	case ATA_CMD_FLUSH:
  69	case ATA_CMD_FLUSH_EXT:
  70	case ATA_CMD_VERIFY:
  71	case ATA_CMD_VERIFY_EXT:
  72	case ATA_CMD_SET_FEATURES:
  73	case ATA_CMD_STANDBY:
  74	case ATA_CMD_STANDBYNOW1:
  75	case ATA_CMD_ZAC_MGMT_OUT:
  76		return HISI_SAS_SATA_PROTOCOL_NONDATA;
  77
  78	case ATA_CMD_SET_MAX:
  79		switch (fis->features) {
  80		case ATA_SET_MAX_PASSWD:
  81		case ATA_SET_MAX_LOCK:
  82			return HISI_SAS_SATA_PROTOCOL_PIO;
  83
  84		case ATA_SET_MAX_PASSWD_DMA:
  85		case ATA_SET_MAX_UNLOCK_DMA:
  86			return HISI_SAS_SATA_PROTOCOL_DMA;
  87
  88		default:
  89			return HISI_SAS_SATA_PROTOCOL_NONDATA;
  90		}
  91
  92	default:
  93	{
  94		if (direction == DMA_NONE)
  95			return HISI_SAS_SATA_PROTOCOL_NONDATA;
  96		return HISI_SAS_SATA_PROTOCOL_PIO;
  97	}
  98	}
  99}
 100EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
 101
 102void hisi_sas_sata_done(struct sas_task *task,
 103			    struct hisi_sas_slot *slot)
 104{
 105	struct task_status_struct *ts = &task->task_status;
 106	struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
 107	struct hisi_sas_status_buffer *status_buf =
 108			hisi_sas_status_buf_addr_mem(slot);
 109	u8 *iu = &status_buf->iu[0];
 110	struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
 111
 112	resp->frame_len = sizeof(struct dev_to_host_fis);
 113	memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
 114
 115	ts->buf_valid_size = sizeof(*resp);
 116}
 117EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
 118
 119/*
 120 * This function assumes linkrate mask fits in 8 bits, which it
 121 * does for all HW versions supported.
 122 */
 123u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
 124{
 125	u8 rate = 0;
 126	int i;
 127
 128	max -= SAS_LINK_RATE_1_5_GBPS;
 129	for (i = 0; i <= max; i++)
 130		rate |= 1 << (i * 2);
 131	return rate;
 132}
 133EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
 134
 135static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
 136{
 137	return device->port->ha->lldd_ha;
 138}
 139
 140struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
 141{
 142	return container_of(sas_port, struct hisi_sas_port, sas_port);
 143}
 144EXPORT_SYMBOL_GPL(to_hisi_sas_port);
 145
 146void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
 147{
 148	int phy_no;
 149
 150	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
 151		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
 152}
 153EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
 154
 155static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
 156{
 157	void *bitmap = hisi_hba->slot_index_tags;
 158
 159	__clear_bit(slot_idx, bitmap);
 160}
 161
 162static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
 163{
 164	if (hisi_hba->hw->slot_index_alloc ||
 165	    slot_idx < HISI_SAS_RESERVED_IPTT) {
 166		spin_lock(&hisi_hba->lock);
 167		hisi_sas_slot_index_clear(hisi_hba, slot_idx);
 168		spin_unlock(&hisi_hba->lock);
 169	}
 170}
 171
 172static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
 173{
 174	void *bitmap = hisi_hba->slot_index_tags;
 175
 176	__set_bit(slot_idx, bitmap);
 177}
 178
 179static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
 180				     struct request *rq)
 181{
 182	int index;
 183	void *bitmap = hisi_hba->slot_index_tags;
 184
 185	if (rq)
 186		return rq->tag + HISI_SAS_RESERVED_IPTT;
 187
 188	spin_lock(&hisi_hba->lock);
 189	index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT,
 190				   hisi_hba->last_slot_index + 1);
 191	if (index >= HISI_SAS_RESERVED_IPTT) {
 192		index = find_next_zero_bit(bitmap,
 193				HISI_SAS_RESERVED_IPTT,
 194				0);
 195		if (index >= HISI_SAS_RESERVED_IPTT) {
 196			spin_unlock(&hisi_hba->lock);
 197			return -SAS_QUEUE_FULL;
 198		}
 199	}
 200	hisi_sas_slot_index_set(hisi_hba, index);
 201	hisi_hba->last_slot_index = index;
 202	spin_unlock(&hisi_hba->lock);
 203
 204	return index;
 205}
 206
 207void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
 208			     struct hisi_sas_slot *slot, bool need_lock)
 209{
 210	int device_id = slot->device_id;
 211	struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
 212
 213	if (task) {
 214		struct device *dev = hisi_hba->dev;
 215
 216		if (!task->lldd_task)
 217			return;
 218
 219		task->lldd_task = NULL;
 220
 221		if (!sas_protocol_ata(task->task_proto)) {
 222			if (slot->n_elem) {
 223				if (task->task_proto & SAS_PROTOCOL_SSP)
 224					dma_unmap_sg(dev, task->scatter,
 225						     task->num_scatter,
 226						     task->data_dir);
 227				else
 228					dma_unmap_sg(dev, &task->smp_task.smp_req,
 229						     1, DMA_TO_DEVICE);
 230			}
 231			if (slot->n_elem_dif) {
 232				struct sas_ssp_task *ssp_task = &task->ssp_task;
 233				struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
 234
 235				dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
 236					     scsi_prot_sg_count(scsi_cmnd),
 237					     task->data_dir);
 238			}
 239		}
 240	}
 241
 242	if (need_lock) {
 243		spin_lock(&sas_dev->lock);
 244		list_del_init(&slot->entry);
 245		spin_unlock(&sas_dev->lock);
 246	} else {
 247		list_del_init(&slot->entry);
 248	}
 249
 250	memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
 251
 252	hisi_sas_slot_index_free(hisi_hba, slot->idx);
 253}
 254EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
 255
 256static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
 257				  struct hisi_sas_slot *slot)
 258{
 259	hisi_hba->hw->prep_smp(hisi_hba, slot);
 260}
 261
 262static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
 263				  struct hisi_sas_slot *slot)
 264{
 265	hisi_hba->hw->prep_ssp(hisi_hba, slot);
 266}
 267
 268static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
 269				  struct hisi_sas_slot *slot)
 270{
 271	hisi_hba->hw->prep_stp(hisi_hba, slot);
 272}
 273
 274static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
 275				     struct hisi_sas_slot *slot)
 276{
 277	hisi_hba->hw->prep_abort(hisi_hba, slot);
 278}
 279
 280static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
 281			       struct sas_task *task, int n_elem)
 282{
 283	struct device *dev = hisi_hba->dev;
 284
 285	if (!sas_protocol_ata(task->task_proto) && n_elem) {
 286		if (task->num_scatter) {
 287			dma_unmap_sg(dev, task->scatter, task->num_scatter,
 288				     task->data_dir);
 289		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
 290			dma_unmap_sg(dev, &task->smp_task.smp_req,
 291				     1, DMA_TO_DEVICE);
 292		}
 293	}
 294}
 295
 296static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
 297			    struct sas_task *task, int *n_elem)
 298{
 299	struct device *dev = hisi_hba->dev;
 300	int rc;
 301
 302	if (sas_protocol_ata(task->task_proto)) {
 303		*n_elem = task->num_scatter;
 304	} else {
 305		unsigned int req_len;
 306
 307		if (task->num_scatter) {
 308			*n_elem = dma_map_sg(dev, task->scatter,
 309					     task->num_scatter, task->data_dir);
 310			if (!*n_elem) {
 311				rc = -ENOMEM;
 312				goto prep_out;
 313			}
 314		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
 315			*n_elem = dma_map_sg(dev, &task->smp_task.smp_req,
 316					     1, DMA_TO_DEVICE);
 317			if (!*n_elem) {
 318				rc = -ENOMEM;
 319				goto prep_out;
 320			}
 321			req_len = sg_dma_len(&task->smp_task.smp_req);
 322			if (req_len & 0x3) {
 323				rc = -EINVAL;
 324				goto err_out_dma_unmap;
 325			}
 326		}
 327	}
 328
 329	if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
 330		dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
 331			*n_elem);
 332		rc = -EINVAL;
 333		goto err_out_dma_unmap;
 334	}
 335	return 0;
 336
 337err_out_dma_unmap:
 338	/* It would be better to call dma_unmap_sg() here, but it's messy */
 339	hisi_sas_dma_unmap(hisi_hba, task, *n_elem);
 340prep_out:
 341	return rc;
 342}
 343
 344static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
 345				   struct sas_task *task, int n_elem_dif)
 346{
 347	struct device *dev = hisi_hba->dev;
 348
 349	if (n_elem_dif) {
 350		struct sas_ssp_task *ssp_task = &task->ssp_task;
 351		struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
 352
 353		dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
 354			     scsi_prot_sg_count(scsi_cmnd),
 355			     task->data_dir);
 356	}
 357}
 358
 359static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
 360				int *n_elem_dif, struct sas_task *task)
 361{
 362	struct device *dev = hisi_hba->dev;
 363	struct sas_ssp_task *ssp_task;
 364	struct scsi_cmnd *scsi_cmnd;
 365	int rc;
 366
 367	if (task->num_scatter) {
 368		ssp_task = &task->ssp_task;
 369		scsi_cmnd = ssp_task->cmd;
 370
 371		if (scsi_prot_sg_count(scsi_cmnd)) {
 372			*n_elem_dif = dma_map_sg(dev,
 373						 scsi_prot_sglist(scsi_cmnd),
 374						 scsi_prot_sg_count(scsi_cmnd),
 375						 task->data_dir);
 376
 377			if (!*n_elem_dif)
 378				return -ENOMEM;
 379
 380			if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
 381				dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
 382					*n_elem_dif);
 383				rc = -EINVAL;
 384				goto err_out_dif_dma_unmap;
 385			}
 386		}
 387	}
 388
 389	return 0;
 390
 391err_out_dif_dma_unmap:
 392	dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
 393		     scsi_prot_sg_count(scsi_cmnd), task->data_dir);
 394	return rc;
 395}
 396
 397static
 398void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
 399			   struct hisi_sas_slot *slot,
 400			   struct hisi_sas_dq *dq,
 401			   struct hisi_sas_device *sas_dev)
 402{
 403	struct hisi_sas_cmd_hdr *cmd_hdr_base;
 404	int dlvry_queue_slot, dlvry_queue;
 405	struct sas_task *task = slot->task;
 406	int wr_q_index;
 407
 408	spin_lock(&dq->lock);
 409	wr_q_index = dq->wr_point;
 410	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
 411	list_add_tail(&slot->delivery, &dq->list);
 412	spin_unlock(&dq->lock);
 413	spin_lock(&sas_dev->lock);
 414	list_add_tail(&slot->entry, &sas_dev->list);
 415	spin_unlock(&sas_dev->lock);
 416
 417	dlvry_queue = dq->id;
 418	dlvry_queue_slot = wr_q_index;
 419
 420	slot->device_id = sas_dev->device_id;
 421	slot->dlvry_queue = dlvry_queue;
 422	slot->dlvry_queue_slot = dlvry_queue_slot;
 423	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
 424	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
 425
 426	task->lldd_task = slot;
 427
 428	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
 429	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
 430	memset(hisi_sas_status_buf_addr_mem(slot), 0,
 431	       sizeof(struct hisi_sas_err_record));
 432
 433	switch (task->task_proto) {
 434	case SAS_PROTOCOL_SMP:
 435		hisi_sas_task_prep_smp(hisi_hba, slot);
 436		break;
 437	case SAS_PROTOCOL_SSP:
 438		hisi_sas_task_prep_ssp(hisi_hba, slot);
 439		break;
 440	case SAS_PROTOCOL_SATA:
 441	case SAS_PROTOCOL_STP:
 442	case SAS_PROTOCOL_STP_ALL:
 443		hisi_sas_task_prep_ata(hisi_hba, slot);
 444		break;
 445	case SAS_PROTOCOL_INTERNAL_ABORT:
 446		hisi_sas_task_prep_abort(hisi_hba, slot);
 447		break;
 448	default:
 449		return;
 450	}
 451
 452	/* Make slot memories observable before marking as ready */
 453	smp_wmb();
 454	WRITE_ONCE(slot->ready, 1);
 455
 456	spin_lock(&dq->lock);
 457	hisi_hba->hw->start_delivery(dq);
 458	spin_unlock(&dq->lock);
 459}
 460
 461static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
 462{
 463	int n_elem = 0, n_elem_dif = 0;
 464	struct domain_device *device = task->dev;
 465	struct asd_sas_port *sas_port = device->port;
 466	struct hisi_sas_device *sas_dev = device->lldd_dev;
 467	bool internal_abort = sas_is_internal_abort(task);
 468	struct hisi_sas_dq *dq = NULL;
 469	struct hisi_sas_port *port;
 470	struct hisi_hba *hisi_hba;
 471	struct hisi_sas_slot *slot;
 472	struct request *rq = NULL;
 473	struct device *dev;
 474	int rc;
 475
 476	if (!sas_port) {
 477		struct task_status_struct *ts = &task->task_status;
 478
 479		ts->resp = SAS_TASK_UNDELIVERED;
 480		ts->stat = SAS_PHY_DOWN;
 481		/*
 482		 * libsas will use dev->port, should
 483		 * not call task_done for sata
 484		 */
 485		if (device->dev_type != SAS_SATA_DEV && !internal_abort)
 486			task->task_done(task);
 487		return -ECOMM;
 488	}
 489
 490	hisi_hba = dev_to_hisi_hba(device);
 491	dev = hisi_hba->dev;
 492
 493	switch (task->task_proto) {
 494	case SAS_PROTOCOL_SSP:
 495	case SAS_PROTOCOL_SMP:
 496	case SAS_PROTOCOL_SATA:
 497	case SAS_PROTOCOL_STP:
 498	case SAS_PROTOCOL_STP_ALL:
 499		if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
 500			if (!gfpflags_allow_blocking(gfp_flags))
 501				return -EINVAL;
 502
 503			down(&hisi_hba->sem);
 504			up(&hisi_hba->sem);
 505		}
 506
 507		if (DEV_IS_GONE(sas_dev)) {
 508			if (sas_dev)
 509				dev_info(dev, "task prep: device %d not ready\n",
 510					 sas_dev->device_id);
 511			else
 512				dev_info(dev, "task prep: device %016llx not ready\n",
 513					 SAS_ADDR(device->sas_addr));
 514
 515			return -ECOMM;
 516		}
 517
 518		port = to_hisi_sas_port(sas_port);
 519		if (!port->port_attached) {
 520			dev_info(dev, "task prep: %s port%d not attach device\n",
 521				 dev_is_sata(device) ? "SATA/STP" : "SAS",
 522				 device->port->id);
 523
 524				return -ECOMM;
 525		}
 526
 527		rq = sas_task_find_rq(task);
 528		if (rq) {
 529			unsigned int dq_index;
 530			u32 blk_tag;
 531
 532			blk_tag = blk_mq_unique_tag(rq);
 533			dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
 534			dq = &hisi_hba->dq[dq_index];
 535		} else {
 536			int queue;
 537
 538			if (hisi_hba->iopoll_q_cnt) {
 539				/*
 540				 * Use interrupt queue (queue 0) to deliver and complete
 541				 * internal IOs of libsas or libata when there is at least
 542				 * one iopoll queue
 543				 */
 544				queue = 0;
 545			} else {
 546				struct Scsi_Host *shost = hisi_hba->shost;
 547				struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
 548
 549				queue = qmap->mq_map[raw_smp_processor_id()];
 550			}
 551			dq = &hisi_hba->dq[queue];
 552		}
 553		break;
 554	case SAS_PROTOCOL_INTERNAL_ABORT:
 555		if (!hisi_hba->hw->prep_abort)
 556			return TMF_RESP_FUNC_FAILED;
 557
 558		if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
 559			return -EIO;
 560
 561		hisi_hba = dev_to_hisi_hba(device);
 562
 563		if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
 564			return -EINVAL;
 565
 566		port = to_hisi_sas_port(sas_port);
 567		dq = &hisi_hba->dq[task->abort_task.qid];
 568		break;
 569	default:
 570		dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
 571			task->task_proto);
 572		return -EINVAL;
 573	}
 574
 575	rc = hisi_sas_dma_map(hisi_hba, task, &n_elem);
 576	if (rc < 0)
 577		goto prep_out;
 578
 579	if (!sas_protocol_ata(task->task_proto)) {
 580		rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
 581		if (rc < 0)
 582			goto err_out_dma_unmap;
 583	}
 584
 585	if (!internal_abort && hisi_hba->hw->slot_index_alloc)
 586		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
 587	else
 588		rc = hisi_sas_slot_index_alloc(hisi_hba, rq);
 589
 590	if (rc < 0)
 591		goto err_out_dif_dma_unmap;
 592
 593	slot = &hisi_hba->slot_info[rc];
 594	slot->n_elem = n_elem;
 595	slot->n_elem_dif = n_elem_dif;
 596	slot->task = task;
 597	slot->port = port;
 598
 599	slot->tmf = task->tmf;
 600	slot->is_internal = !!task->tmf || internal_abort;
 601
 602	/* protect task_prep and start_delivery sequence */
 603	hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev);
 604
 605	return 0;
 606
 607err_out_dif_dma_unmap:
 608	if (!sas_protocol_ata(task->task_proto))
 609		hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
 610err_out_dma_unmap:
 611	hisi_sas_dma_unmap(hisi_hba, task, n_elem);
 612prep_out:
 613	dev_err(dev, "task exec: failed[%d]!\n", rc);
 614	return rc;
 615}
 616
 617static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
 618				 gfp_t gfp_flags)
 619{
 620	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 621	struct asd_sas_phy *sas_phy = &phy->sas_phy;
 622
 623	if (!phy->phy_attached)
 624		return;
 625
 626	sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
 627
 628	if (sas_phy->phy) {
 629		struct sas_phy *sphy = sas_phy->phy;
 630
 631		sphy->negotiated_linkrate = sas_phy->linkrate;
 632		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
 633		sphy->maximum_linkrate_hw =
 634			hisi_hba->hw->phy_get_max_linkrate();
 635		if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
 636			sphy->minimum_linkrate = phy->minimum_linkrate;
 637
 638		if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
 639			sphy->maximum_linkrate = phy->maximum_linkrate;
 640	}
 641
 642	if (phy->phy_type & PORT_TYPE_SAS) {
 643		struct sas_identify_frame *id;
 644
 645		id = (struct sas_identify_frame *)phy->frame_rcvd;
 646		id->dev_type = phy->identify.device_type;
 647		id->initiator_bits = SAS_PROTOCOL_ALL;
 648		id->target_bits = phy->identify.target_port_protocols;
 649	} else if (phy->phy_type & PORT_TYPE_SATA) {
 650		/* Nothing */
 651	}
 652
 653	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
 654	sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
 655}
 656
 657static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
 658{
 659	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 660	struct hisi_sas_device *sas_dev = NULL;
 661	int last = hisi_hba->last_dev_id;
 662	int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
 663	int i;
 664
 665	spin_lock(&hisi_hba->lock);
 666	for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
 667		if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
 668			int queue = i % hisi_hba->queue_count;
 669			struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
 670
 671			hisi_hba->devices[i].device_id = i;
 672			sas_dev = &hisi_hba->devices[i];
 673			sas_dev->dev_status = HISI_SAS_DEV_INIT;
 674			sas_dev->dev_type = device->dev_type;
 675			sas_dev->hisi_hba = hisi_hba;
 676			sas_dev->sas_device = device;
 677			sas_dev->dq = dq;
 678			spin_lock_init(&sas_dev->lock);
 679			INIT_LIST_HEAD(&hisi_hba->devices[i].list);
 680			break;
 681		}
 682		i++;
 683	}
 684	hisi_hba->last_dev_id = i;
 685	spin_unlock(&hisi_hba->lock);
 686
 687	return sas_dev;
 688}
 689
 690static void hisi_sas_sync_poll_cq(struct hisi_sas_cq *cq)
 691{
 692	/* make sure CQ entries being processed are processed to completion */
 693	spin_lock(&cq->poll_lock);
 694	spin_unlock(&cq->poll_lock);
 695}
 696
 697static bool hisi_sas_queue_is_poll(struct hisi_sas_cq *cq)
 698{
 699	struct hisi_hba *hisi_hba = cq->hisi_hba;
 700
 701	if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt)
 702		return false;
 703	return true;
 704}
 705
 706static void hisi_sas_sync_cq(struct hisi_sas_cq *cq)
 707{
 708	if (hisi_sas_queue_is_poll(cq))
 709		hisi_sas_sync_poll_cq(cq);
 710	else
 711		synchronize_irq(cq->irq_no);
 712}
 713
 714void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba)
 715{
 716	int i;
 717
 718	for (i = 0; i < hisi_hba->queue_count; i++) {
 719		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
 720
 721		if (hisi_sas_queue_is_poll(cq))
 722			hisi_sas_sync_poll_cq(cq);
 723	}
 724}
 725EXPORT_SYMBOL_GPL(hisi_sas_sync_poll_cqs);
 726
 727void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba)
 728{
 729	int i;
 730
 731	for (i = 0; i < hisi_hba->queue_count; i++) {
 732		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
 733
 734		hisi_sas_sync_cq(cq);
 735	}
 736}
 737EXPORT_SYMBOL_GPL(hisi_sas_sync_cqs);
 738
 739static void hisi_sas_tmf_aborted(struct sas_task *task)
 740{
 741	struct hisi_sas_slot *slot = task->lldd_task;
 742	struct domain_device *device = task->dev;
 743	struct hisi_sas_device *sas_dev = device->lldd_dev;
 744	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
 745
 746	if (slot) {
 747		struct hisi_sas_cq *cq =
 748			   &hisi_hba->cq[slot->dlvry_queue];
 749		/*
 750		 * sync irq or poll queue to avoid free'ing task
 751		 * before using task in IO completion
 752		 */
 753		hisi_sas_sync_cq(cq);
 754		slot->task = NULL;
 755	}
 756}
 757
 758#define HISI_SAS_DISK_RECOVER_CNT 3
 759static int hisi_sas_init_device(struct domain_device *device)
 760{
 761	int rc = TMF_RESP_FUNC_COMPLETE;
 762	struct scsi_lun lun;
 763	int retry = HISI_SAS_DISK_RECOVER_CNT;
 764	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 765
 766	switch (device->dev_type) {
 767	case SAS_END_DEVICE:
 768		int_to_scsilun(0, &lun);
 769
 770		while (retry-- > 0) {
 771			rc = sas_abort_task_set(device, lun.scsi_lun);
 772			if (rc == TMF_RESP_FUNC_COMPLETE) {
 773				hisi_sas_release_task(hisi_hba, device);
 774				break;
 775			}
 776		}
 777		break;
 778	case SAS_SATA_DEV:
 779	case SAS_SATA_PM:
 780	case SAS_SATA_PM_PORT:
 781	case SAS_SATA_PENDING:
 782		/*
 783		 * If an expander is swapped when a SATA disk is attached then
 784		 * we should issue a hard reset to clear previous affiliation
 785		 * of STP target port, see SPL (chapter 6.19.4).
 786		 *
 787		 * However we don't need to issue a hard reset here for these
 788		 * reasons:
 789		 * a. When probing the device, libsas/libata already issues a
 790		 * hard reset in sas_probe_sata() -> ata_port_probe().
 791		 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care
 792		 * to issue a hard reset by checking the dev status (== INIT).
 793		 * b. When resetting the controller, this is simply unnecessary.
 794		 */
 795		while (retry-- > 0) {
 796			rc = hisi_sas_softreset_ata_disk(device);
 797			if (!rc)
 798				break;
 799		}
 800		break;
 801	default:
 802		break;
 803	}
 804
 805	return rc;
 806}
 807
 808int hisi_sas_slave_alloc(struct scsi_device *sdev)
 809{
 810	struct domain_device *ddev = sdev_to_domain_dev(sdev);
 811	struct hisi_sas_device *sas_dev = ddev->lldd_dev;
 812	int rc;
 813
 814	rc = sas_slave_alloc(sdev);
 815	if (rc)
 816		return rc;
 817
 818	rc = hisi_sas_init_device(ddev);
 819	if (rc)
 820		return rc;
 821	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
 822	return 0;
 823}
 824EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
 825
 826static int hisi_sas_dev_found(struct domain_device *device)
 827{
 828	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 829	struct domain_device *parent_dev = device->parent;
 830	struct hisi_sas_device *sas_dev;
 831	struct device *dev = hisi_hba->dev;
 832	int rc;
 833
 834	if (hisi_hba->hw->alloc_dev)
 835		sas_dev = hisi_hba->hw->alloc_dev(device);
 836	else
 837		sas_dev = hisi_sas_alloc_dev(device);
 838	if (!sas_dev) {
 839		dev_err(dev, "fail alloc dev: max support %d devices\n",
 840			HISI_SAS_MAX_DEVICES);
 841		return -EINVAL;
 842	}
 843
 844	device->lldd_dev = sas_dev;
 845	hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
 846
 847	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
 848		int phy_no;
 849
 850		phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device);
 851		if (phy_no < 0) {
 852			dev_info(dev, "dev found: no attached "
 853				 "dev:%016llx at ex:%016llx\n",
 854				 SAS_ADDR(device->sas_addr),
 855				 SAS_ADDR(parent_dev->sas_addr));
 856			rc = phy_no;
 857			goto err_out;
 858		}
 859	}
 860
 861	dev_info(dev, "dev[%d:%x] found\n",
 862		sas_dev->device_id, sas_dev->dev_type);
 863
 864	return 0;
 865
 866err_out:
 867	hisi_sas_dev_gone(device);
 868	return rc;
 869}
 870
 871int hisi_sas_slave_configure(struct scsi_device *sdev)
 872{
 873	struct domain_device *dev = sdev_to_domain_dev(sdev);
 874	int ret = sas_slave_configure(sdev);
 875
 876	if (ret)
 877		return ret;
 878	if (!dev_is_sata(dev))
 879		sas_change_queue_depth(sdev, 64);
 880
 881	return 0;
 882}
 883EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
 884
 885void hisi_sas_scan_start(struct Scsi_Host *shost)
 886{
 887	struct hisi_hba *hisi_hba = shost_priv(shost);
 888
 889	hisi_hba->hw->phys_init(hisi_hba);
 890}
 891EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
 892
 893int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
 894{
 895	struct hisi_hba *hisi_hba = shost_priv(shost);
 896	struct sas_ha_struct *sha = &hisi_hba->sha;
 897
 898	/* Wait for PHY up interrupt to occur */
 899	if (time < HZ)
 900		return 0;
 901
 902	sas_drain_work(sha);
 903	return 1;
 904}
 905EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
 906
 907static void hisi_sas_phyup_work_common(struct work_struct *work,
 908		enum hisi_sas_phy_event event)
 909{
 910	struct hisi_sas_phy *phy =
 911		container_of(work, typeof(*phy), works[event]);
 912	struct hisi_hba *hisi_hba = phy->hisi_hba;
 913	struct asd_sas_phy *sas_phy = &phy->sas_phy;
 914	int phy_no = sas_phy->id;
 915
 916	phy->wait_phyup_cnt = 0;
 917	if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
 918		hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
 919	hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
 920}
 921
 922static void hisi_sas_phyup_work(struct work_struct *work)
 923{
 924	hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
 925}
 926
 927static void hisi_sas_linkreset_work(struct work_struct *work)
 928{
 929	struct hisi_sas_phy *phy =
 930		container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
 931	struct asd_sas_phy *sas_phy = &phy->sas_phy;
 932
 933	hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
 934}
 935
 936static void hisi_sas_phyup_pm_work(struct work_struct *work)
 937{
 938	struct hisi_sas_phy *phy =
 939		container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
 940	struct hisi_hba *hisi_hba = phy->hisi_hba;
 941	struct device *dev = hisi_hba->dev;
 942
 943	hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
 944	pm_runtime_put_sync(dev);
 945}
 946
 947static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
 948	[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
 949	[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
 950	[HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
 951};
 952
 953bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
 954				enum hisi_sas_phy_event event)
 955{
 956	struct hisi_hba *hisi_hba = phy->hisi_hba;
 957
 958	if (WARN_ON(event >= HISI_PHYES_NUM))
 959		return false;
 960
 961	return queue_work(hisi_hba->wq, &phy->works[event]);
 962}
 963EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
 964
 965static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
 966{
 967	struct hisi_sas_phy *phy = from_timer(phy, t, timer);
 968	struct hisi_hba *hisi_hba = phy->hisi_hba;
 969	struct device *dev = hisi_hba->dev;
 970	int phy_no = phy->sas_phy.id;
 971
 972	dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
 973	hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
 974}
 975
 976#define HISI_SAS_WAIT_PHYUP_RETRIES	10
 977
 978void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
 979{
 980	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 981	struct device *dev = hisi_hba->dev;
 982	unsigned long flags;
 983
 984	dev_dbg(dev, "phy%d OOB ready\n", phy_no);
 985	spin_lock_irqsave(&phy->lock, flags);
 986	if (phy->phy_attached) {
 987		spin_unlock_irqrestore(&phy->lock, flags);
 988		return;
 989	}
 990
 991	if (!timer_pending(&phy->timer)) {
 992		if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
 993			phy->wait_phyup_cnt++;
 994			phy->timer.expires = jiffies +
 995					     HISI_SAS_WAIT_PHYUP_TIMEOUT;
 996			add_timer(&phy->timer);
 997			spin_unlock_irqrestore(&phy->lock, flags);
 998			return;
 999		}
1000
1001		dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
1002			 phy_no, phy->wait_phyup_cnt);
1003		phy->wait_phyup_cnt = 0;
1004	}
1005	spin_unlock_irqrestore(&phy->lock, flags);
1006}
1007
1008EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
1009
1010static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
1011{
1012	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1013	struct asd_sas_phy *sas_phy = &phy->sas_phy;
1014	int i;
1015
1016	phy->hisi_hba = hisi_hba;
1017	phy->port = NULL;
1018	phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
1019	phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
1020	sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
1021	sas_phy->iproto = SAS_PROTOCOL_ALL;
1022	sas_phy->tproto = 0;
1023	sas_phy->role = PHY_ROLE_INITIATOR;
1024	sas_phy->oob_mode = OOB_NOT_CONNECTED;
1025	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
1026	sas_phy->id = phy_no;
1027	sas_phy->sas_addr = &hisi_hba->sas_addr[0];
1028	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
1029	sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
1030	sas_phy->lldd_phy = phy;
1031
1032	for (i = 0; i < HISI_PHYES_NUM; i++)
1033		INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
1034
1035	spin_lock_init(&phy->lock);
1036
1037	timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
1038}
1039
1040/* Wrapper to ensure we track hisi_sas_phy.enable properly */
1041void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
1042{
1043	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1044	struct asd_sas_phy *aphy = &phy->sas_phy;
1045	struct sas_phy *sphy = aphy->phy;
1046	unsigned long flags;
1047
1048	spin_lock_irqsave(&phy->lock, flags);
1049
1050	if (enable) {
1051		/* We may have been enabled already; if so, don't touch */
1052		if (!phy->enable)
1053			sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
1054		hisi_hba->hw->phy_start(hisi_hba, phy_no);
1055	} else {
1056		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
1057		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
1058	}
1059	phy->enable = enable;
1060	spin_unlock_irqrestore(&phy->lock, flags);
1061}
1062EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
1063
1064static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
1065{
1066	struct hisi_sas_phy *phy = sas_phy->lldd_phy;
1067	struct asd_sas_port *sas_port = sas_phy->port;
1068	struct hisi_sas_port *port;
1069
1070	if (!sas_port)
1071		return;
1072
1073	port = to_hisi_sas_port(sas_port);
1074	port->port_attached = 1;
1075	port->id = phy->port_id;
1076	phy->port = port;
1077	sas_port->lldd_port = port;
1078}
1079
1080static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
1081				     struct hisi_sas_slot *slot, bool need_lock)
1082{
1083	if (task) {
1084		unsigned long flags;
1085		struct task_status_struct *ts;
1086
1087		ts = &task->task_status;
1088
1089		ts->resp = SAS_TASK_COMPLETE;
1090		ts->stat = SAS_ABORTED_TASK;
1091		spin_lock_irqsave(&task->task_state_lock, flags);
1092		task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1093		if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
1094			task->task_state_flags |= SAS_TASK_STATE_DONE;
1095		spin_unlock_irqrestore(&task->task_state_lock, flags);
1096	}
1097
1098	hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock);
1099}
1100
1101static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
1102			struct domain_device *device)
1103{
1104	struct hisi_sas_slot *slot, *slot2;
1105	struct hisi_sas_device *sas_dev = device->lldd_dev;
1106
1107	spin_lock(&sas_dev->lock);
1108	list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
1109		hisi_sas_do_release_task(hisi_hba, slot->task, slot, false);
1110
1111	spin_unlock(&sas_dev->lock);
1112}
1113
1114void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
1115{
1116	struct hisi_sas_device *sas_dev;
1117	struct domain_device *device;
1118	int i;
1119
1120	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1121		sas_dev = &hisi_hba->devices[i];
1122		device = sas_dev->sas_device;
1123
1124		if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
1125		    !device)
1126			continue;
1127
1128		hisi_sas_release_task(hisi_hba, device);
1129	}
1130}
1131EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
1132
1133static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
1134				struct domain_device *device)
1135{
1136	if (hisi_hba->hw->dereg_device)
1137		hisi_hba->hw->dereg_device(hisi_hba, device);
1138}
1139
1140static int
1141hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev,
1142				 bool rst_ha_timeout)
1143{
1144	struct hisi_sas_internal_abort_data data = { rst_ha_timeout };
1145	struct domain_device *device = sas_dev->sas_device;
1146	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1147	int i, rc;
1148
1149	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
1150		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1151		const struct cpumask *mask = cq->irq_mask;
1152
1153		if (mask && !cpumask_intersects(cpu_online_mask, mask))
1154			continue;
1155		rc = sas_execute_internal_abort_dev(device, i, &data);
1156		if (rc)
1157			return rc;
1158	}
1159
1160	return 0;
1161}
1162
1163static void hisi_sas_dev_gone(struct domain_device *device)
1164{
1165	struct hisi_sas_device *sas_dev = device->lldd_dev;
1166	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1167	struct device *dev = hisi_hba->dev;
1168	int ret = 0;
1169
1170	dev_info(dev, "dev[%d:%x] is gone\n",
1171		 sas_dev->device_id, sas_dev->dev_type);
1172
1173	down(&hisi_hba->sem);
1174	if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
1175		hisi_sas_internal_task_abort_dev(sas_dev, true);
1176
1177		hisi_sas_dereg_device(hisi_hba, device);
1178
1179		ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
1180		device->lldd_dev = NULL;
1181	}
1182
1183	if (hisi_hba->hw->free_device)
1184		hisi_hba->hw->free_device(sas_dev);
1185
1186	/* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
1187	if (!ret)
1188		sas_dev->dev_type = SAS_PHY_UNUSED;
1189	sas_dev->sas_device = NULL;
1190	up(&hisi_hba->sem);
1191}
1192
1193static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
1194			struct sas_phy_linkrates *r)
1195{
1196	struct sas_phy_linkrates _r;
1197
1198	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1199	struct asd_sas_phy *sas_phy = &phy->sas_phy;
1200	enum sas_linkrate min, max;
1201
1202	if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
1203		return -EINVAL;
1204
1205	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1206		max = sas_phy->phy->maximum_linkrate;
1207		min = r->minimum_linkrate;
1208	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1209		max = r->maximum_linkrate;
1210		min = sas_phy->phy->minimum_linkrate;
1211	} else
1212		return -EINVAL;
1213
1214	_r.maximum_linkrate = max;
1215	_r.minimum_linkrate = min;
1216
1217	sas_phy->phy->maximum_linkrate = max;
1218	sas_phy->phy->minimum_linkrate = min;
1219
1220	hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1221	msleep(100);
1222	hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
1223	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1224
1225	return 0;
1226}
1227
1228static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
1229				void *funcdata)
1230{
1231	struct hisi_sas_phy *phy = container_of(sas_phy,
1232			struct hisi_sas_phy, sas_phy);
1233	struct sas_ha_struct *sas_ha = sas_phy->ha;
1234	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1235	struct device *dev = hisi_hba->dev;
1236	DECLARE_COMPLETION_ONSTACK(completion);
1237	int phy_no = sas_phy->id;
1238	u8 sts = phy->phy_attached;
1239	int ret = 0;
1240
1241	down(&hisi_hba->sem);
1242	phy->reset_completion = &completion;
1243
1244	switch (func) {
1245	case PHY_FUNC_HARD_RESET:
1246		hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
1247		break;
1248
1249	case PHY_FUNC_LINK_RESET:
1250		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1251		msleep(100);
1252		hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1253		break;
1254
1255	case PHY_FUNC_DISABLE:
1256		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1257		goto out;
1258
1259	case PHY_FUNC_SET_LINK_RATE:
1260		ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1261		break;
1262
1263	case PHY_FUNC_GET_EVENTS:
1264		if (hisi_hba->hw->get_events) {
1265			hisi_hba->hw->get_events(hisi_hba, phy_no);
1266			goto out;
1267		}
1268		fallthrough;
1269	case PHY_FUNC_RELEASE_SPINUP_HOLD:
1270	default:
1271		ret = -EOPNOTSUPP;
1272		goto out;
1273	}
1274
1275	if (sts && !wait_for_completion_timeout(&completion,
1276		HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
1277		dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
1278			 phy_no, func);
1279		if (phy->in_reset)
1280			ret = -ETIMEDOUT;
1281	}
1282
1283out:
1284	phy->reset_completion = NULL;
1285
1286	up(&hisi_hba->sem);
1287	return ret;
1288}
1289
1290static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1291		bool reset, int pmp, u8 *fis)
1292{
1293	struct ata_taskfile tf;
1294
1295	ata_tf_init(dev, &tf);
1296	if (reset)
1297		tf.ctl |= ATA_SRST;
1298	else
1299		tf.ctl &= ~ATA_SRST;
1300	tf.command = ATA_CMD_DEV_RESET;
1301	ata_tf_to_fis(&tf, pmp, 0, fis);
1302}
1303
1304static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1305{
1306	u8 fis[20] = {0};
1307	struct ata_port *ap = device->sata_dev.ap;
1308	struct ata_link *link;
1309	int rc = TMF_RESP_FUNC_FAILED;
1310	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1311	struct device *dev = hisi_hba->dev;
1312
1313	ata_for_each_link(link, ap, EDGE) {
1314		int pmp = sata_srst_pmp(link);
1315
1316		hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1317		rc = sas_execute_ata_cmd(device, fis, -1);
1318		if (rc != TMF_RESP_FUNC_COMPLETE)
1319			break;
1320	}
1321
1322	if (rc == TMF_RESP_FUNC_COMPLETE) {
1323		ata_for_each_link(link, ap, EDGE) {
1324			int pmp = sata_srst_pmp(link);
1325
1326			hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1327			rc = sas_execute_ata_cmd(device, fis, -1);
1328			if (rc != TMF_RESP_FUNC_COMPLETE)
1329				dev_err(dev, "ata disk %016llx de-reset failed\n",
1330					SAS_ADDR(device->sas_addr));
1331		}
1332	} else {
1333		dev_err(dev, "ata disk %016llx reset failed\n",
1334			SAS_ADDR(device->sas_addr));
1335	}
1336
1337	if (rc == TMF_RESP_FUNC_COMPLETE)
1338		hisi_sas_release_task(hisi_hba, device);
1339
1340	return rc;
1341}
1342
1343static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1344{
1345	u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1346	int i;
1347
1348	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1349		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1350		struct domain_device *device = sas_dev->sas_device;
1351		struct asd_sas_port *sas_port;
1352		struct hisi_sas_port *port;
1353		struct hisi_sas_phy *phy = NULL;
1354		struct asd_sas_phy *sas_phy;
1355
1356		if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1357				|| !device || !device->port)
1358			continue;
1359
1360		sas_port = device->port;
1361		port = to_hisi_sas_port(sas_port);
1362
1363		spin_lock(&sas_port->phy_list_lock);
1364		list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1365			if (state & BIT(sas_phy->id)) {
1366				phy = sas_phy->lldd_phy;
1367				break;
1368			}
1369		spin_unlock(&sas_port->phy_list_lock);
1370
1371		if (phy) {
1372			port->id = phy->port_id;
1373
1374			/* Update linkrate of directly attached device. */
1375			if (!device->parent)
1376				device->linkrate = phy->sas_phy.linkrate;
1377
1378			hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1379		} else if (!port->port_attached)
1380			port->id = 0xff;
1381	}
1382}
1383
1384static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
1385{
1386	struct asd_sas_port *_sas_port = NULL;
1387	int phy_no;
1388
1389	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1390		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1391		struct asd_sas_phy *sas_phy = &phy->sas_phy;
1392		struct asd_sas_port *sas_port = sas_phy->port;
1393		bool do_port_check = _sas_port != sas_port;
1394
1395		if (!sas_phy->phy->enabled)
1396			continue;
1397
1398		/* Report PHY state change to libsas */
1399		if (state & BIT(phy_no)) {
1400			if (do_port_check && sas_port && sas_port->port_dev) {
1401				struct domain_device *dev = sas_port->port_dev;
1402
1403				_sas_port = sas_port;
1404
1405				if (dev_is_expander(dev->dev_type))
1406					sas_notify_port_event(sas_phy,
1407							PORTE_BROADCAST_RCVD,
1408							GFP_KERNEL);
1409			}
1410		} else {
1411			hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
1412		}
1413	}
1414}
1415
1416static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1417{
1418	struct hisi_sas_device *sas_dev;
1419	struct domain_device *device;
1420	int i;
1421
1422	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1423		sas_dev = &hisi_hba->devices[i];
1424		device = sas_dev->sas_device;
1425
1426		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1427			continue;
1428
1429		hisi_sas_init_device(device);
1430	}
1431}
1432
1433static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1434					     struct asd_sas_port *sas_port,
1435					     struct domain_device *device)
1436{
1437	struct ata_port *ap = device->sata_dev.ap;
1438	struct device *dev = hisi_hba->dev;
1439	int rc = TMF_RESP_FUNC_FAILED;
1440	struct ata_link *link;
1441	u8 fis[20] = {0};
1442	int i;
1443
1444	for (i = 0; i < hisi_hba->n_phy; i++) {
1445		if (!(sas_port->phy_mask & BIT(i)))
1446			continue;
1447
1448		ata_for_each_link(link, ap, EDGE) {
1449			int pmp = sata_srst_pmp(link);
1450
1451			hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1452			rc = sas_execute_ata_cmd(device, fis, i);
1453			if (rc != TMF_RESP_FUNC_COMPLETE) {
1454				dev_err(dev, "phy%d ata reset failed rc=%d\n",
1455					i, rc);
1456				break;
1457			}
1458		}
1459	}
1460}
1461
1462static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1463{
1464	struct device *dev = hisi_hba->dev;
1465	int port_no, rc, i;
1466
1467	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1468		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1469		struct domain_device *device = sas_dev->sas_device;
1470
1471		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1472			continue;
1473
1474		rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1475		if (rc < 0)
1476			dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1477	}
1478
1479	for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1480		struct hisi_sas_port *port = &hisi_hba->port[port_no];
1481		struct asd_sas_port *sas_port = &port->sas_port;
1482		struct domain_device *port_dev = sas_port->port_dev;
1483		struct domain_device *device;
1484
1485		if (!port_dev || !dev_is_expander(port_dev->dev_type))
1486			continue;
1487
1488		/* Try to find a SATA device */
1489		list_for_each_entry(device, &sas_port->dev_list,
1490				    dev_list_node) {
1491			if (dev_is_sata(device)) {
1492				hisi_sas_send_ata_reset_each_phy(hisi_hba,
1493								 sas_port,
1494								 device);
1495				break;
1496			}
1497		}
1498	}
1499}
1500
1501void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1502{
1503	struct Scsi_Host *shost = hisi_hba->shost;
1504
1505	hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1506
1507	scsi_block_requests(shost);
1508	hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1509
1510	del_timer_sync(&hisi_hba->timer);
1511
1512	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1513}
1514EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1515
1516static void hisi_sas_async_init_wait_phyup(void *data, async_cookie_t cookie)
1517{
1518	struct hisi_sas_phy *phy = data;
1519	struct hisi_hba *hisi_hba = phy->hisi_hba;
1520	struct device *dev = hisi_hba->dev;
1521	DECLARE_COMPLETION_ONSTACK(completion);
1522	int phy_no = phy->sas_phy.id;
1523
1524	phy->reset_completion = &completion;
1525	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1526	if (!wait_for_completion_timeout(&completion,
1527					 HISI_SAS_WAIT_PHYUP_TIMEOUT))
1528		dev_warn(dev, "phy%d wait phyup timed out\n", phy_no);
1529
1530	phy->reset_completion = NULL;
1531}
1532
1533void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1534{
1535	struct Scsi_Host *shost = hisi_hba->shost;
1536	ASYNC_DOMAIN_EXCLUSIVE(async);
1537	int phy_no;
1538
1539	/* Init and wait for PHYs to come up and all libsas event finished. */
1540	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1541		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1542
1543		if (!(hisi_hba->phy_state & BIT(phy_no)))
1544			continue;
1545
1546		async_schedule_domain(hisi_sas_async_init_wait_phyup,
1547				      phy, &async);
1548	}
1549
1550	async_synchronize_full_domain(&async);
1551	hisi_sas_refresh_port_id(hisi_hba);
1552	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1553
1554	if (hisi_hba->reject_stp_links_msk)
1555		hisi_sas_terminate_stp_reject(hisi_hba);
1556	hisi_sas_reset_init_all_devices(hisi_hba);
1557	scsi_unblock_requests(shost);
1558	clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
1559	up(&hisi_hba->sem);
1560
1561	hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
1562}
1563EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1564
1565static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
1566{
1567	if (!hisi_hba->hw->soft_reset)
1568		return -ENOENT;
1569
1570	down(&hisi_hba->sem);
1571	if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
1572		up(&hisi_hba->sem);
1573		return -EPERM;
1574	}
1575
1576	if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
1577		hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
1578
1579	return 0;
1580}
1581
1582static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1583{
1584	struct device *dev = hisi_hba->dev;
1585	struct Scsi_Host *shost = hisi_hba->shost;
1586	int rc;
1587
1588	dev_info(dev, "controller resetting...\n");
1589	hisi_sas_controller_reset_prepare(hisi_hba);
1590
1591	rc = hisi_hba->hw->soft_reset(hisi_hba);
1592	if (rc) {
1593		dev_warn(dev, "controller reset failed (%d)\n", rc);
1594		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1595		up(&hisi_hba->sem);
1596		scsi_unblock_requests(shost);
1597		clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
1598		return rc;
1599	}
1600	clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
1601
1602	hisi_sas_controller_reset_done(hisi_hba);
1603	dev_info(dev, "controller reset complete\n");
1604
1605	return 0;
1606}
1607
1608static int hisi_sas_abort_task(struct sas_task *task)
1609{
1610	struct hisi_sas_internal_abort_data internal_abort_data = { false };
1611	struct domain_device *device = task->dev;
1612	struct hisi_sas_device *sas_dev = device->lldd_dev;
1613	struct hisi_sas_slot *slot = task->lldd_task;
1614	struct hisi_hba *hisi_hba;
1615	struct device *dev;
1616	int rc = TMF_RESP_FUNC_FAILED;
1617	unsigned long flags;
1618
1619	if (!sas_dev)
1620		return TMF_RESP_FUNC_FAILED;
1621
1622	hisi_hba = dev_to_hisi_hba(task->dev);
1623	dev = hisi_hba->dev;
1624
1625	spin_lock_irqsave(&task->task_state_lock, flags);
1626	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1627		struct hisi_sas_cq *cq;
1628
1629		if (slot) {
1630			/*
1631			 * sync irq or poll queue to avoid free'ing task
1632			 * before using task in IO completion
1633			 */
1634			cq = &hisi_hba->cq[slot->dlvry_queue];
1635			hisi_sas_sync_cq(cq);
1636		}
1637		spin_unlock_irqrestore(&task->task_state_lock, flags);
1638		rc = TMF_RESP_FUNC_COMPLETE;
1639		goto out;
1640	}
1641	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1642	spin_unlock_irqrestore(&task->task_state_lock, flags);
1643
1644	if (!slot)
1645		goto out;
1646
1647	if (task->task_proto & SAS_PROTOCOL_SSP) {
1648		u16 tag = slot->idx;
1649		int rc2;
1650
1651		rc = sas_abort_task(task, tag);
1652		rc2 = sas_execute_internal_abort_single(device, tag,
1653				slot->dlvry_queue, &internal_abort_data);
1654		if (rc2 < 0) {
1655			dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1656			return TMF_RESP_FUNC_FAILED;
1657		}
1658
1659		/*
1660		 * If the TMF finds that the IO is not in the device and also
1661		 * the internal abort does not succeed, then it is safe to
1662		 * free the slot.
1663		 * Note: if the internal abort succeeds then the slot
1664		 * will have already been completed
1665		 */
1666		if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1667			if (task->lldd_task)
1668				hisi_sas_do_release_task(hisi_hba, task, slot, true);
1669		}
1670	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1671		task->task_proto & SAS_PROTOCOL_STP) {
1672		if (task->dev->dev_type == SAS_SATA_DEV) {
1673			struct ata_queued_cmd *qc = task->uldd_task;
1674
1675			rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1676			if (rc < 0) {
1677				dev_err(dev, "abort task: internal abort failed\n");
1678				goto out;
1679			}
1680			hisi_sas_dereg_device(hisi_hba, device);
1681
1682			/*
1683			 * If an ATA internal command times out in ATA EH, it
1684			 * need to execute soft reset, so check the scsicmd
1685			 */
1686			if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) &&
1687			    qc && qc->scsicmd) {
1688				hisi_sas_do_release_task(hisi_hba, task, slot, true);
1689				rc = TMF_RESP_FUNC_COMPLETE;
1690			} else {
1691				rc = hisi_sas_softreset_ata_disk(device);
1692			}
1693		}
1694	} else if (task->task_proto & SAS_PROTOCOL_SMP) {
1695		/* SMP */
1696		u32 tag = slot->idx;
1697		struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1698
1699		rc = sas_execute_internal_abort_single(device,
1700						       tag, slot->dlvry_queue,
1701						       &internal_abort_data);
1702		if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1703					task->lldd_task) {
1704			/*
1705			 * sync irq or poll queue to avoid free'ing task
1706			 * before using task in IO completion
1707			 */
1708			hisi_sas_sync_cq(cq);
1709			slot->task = NULL;
1710		}
1711	}
1712
1713out:
1714	if (rc != TMF_RESP_FUNC_COMPLETE)
1715		dev_notice(dev, "abort task: rc=%d\n", rc);
1716	return rc;
1717}
1718
1719static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1720{
1721	struct hisi_sas_device *sas_dev = device->lldd_dev;
1722	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1723	struct device *dev = hisi_hba->dev;
1724	int rc;
1725
1726	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1727	if (rc < 0) {
1728		dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1729		return TMF_RESP_FUNC_FAILED;
1730	}
1731	hisi_sas_dereg_device(hisi_hba, device);
1732
1733	rc = sas_abort_task_set(device, lun);
1734	if (rc == TMF_RESP_FUNC_COMPLETE)
1735		hisi_sas_release_task(hisi_hba, device);
1736
1737	return rc;
1738}
1739
1740static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1741{
1742	struct sas_phy *local_phy = sas_get_local_phy(device);
1743	struct hisi_sas_device *sas_dev = device->lldd_dev;
1744	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1745	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1746	int rc, reset_type;
1747
1748	if (!local_phy->enabled) {
1749		sas_put_local_phy(local_phy);
1750		return -ENODEV;
1751	}
1752
1753	if (scsi_is_sas_phy_local(local_phy)) {
1754		struct asd_sas_phy *sas_phy =
1755			sas_ha->sas_phy[local_phy->number];
1756		struct hisi_sas_phy *phy =
1757			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1758		unsigned long flags;
1759
1760		spin_lock_irqsave(&phy->lock, flags);
1761		phy->in_reset = 1;
1762		spin_unlock_irqrestore(&phy->lock, flags);
1763	}
1764
1765	reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
1766		      !dev_is_sata(device)) ? true : false;
1767
1768	rc = sas_phy_reset(local_phy, reset_type);
1769	sas_put_local_phy(local_phy);
1770
1771	if (scsi_is_sas_phy_local(local_phy)) {
1772		struct asd_sas_phy *sas_phy =
1773			sas_ha->sas_phy[local_phy->number];
1774		struct hisi_sas_phy *phy =
1775			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1776		unsigned long flags;
1777
1778		spin_lock_irqsave(&phy->lock, flags);
1779		phy->in_reset = 0;
1780		spin_unlock_irqrestore(&phy->lock, flags);
1781
1782		/* report PHY down if timed out */
1783		if (rc == -ETIMEDOUT)
1784			hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
1785		return rc;
1786	}
1787
1788	/* Remote phy */
1789	if (rc)
1790		return rc;
1791
1792	if (dev_is_sata(device)) {
1793		struct ata_link *link = &device->sata_dev.ap->link;
1794
1795		rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
1796					  smp_ata_check_ready_type);
1797	} else {
1798		msleep(2000);
1799	}
1800
1801	return rc;
1802}
1803
1804static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1805{
1806	struct hisi_sas_device *sas_dev = device->lldd_dev;
1807	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1808	struct device *dev = hisi_hba->dev;
1809	int rc;
1810
1811	if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR)
1812		sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1813
1814	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1815	if (rc < 0) {
1816		dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1817		return TMF_RESP_FUNC_FAILED;
1818	}
1819	hisi_sas_dereg_device(hisi_hba, device);
1820
1821	rc = hisi_sas_debug_I_T_nexus_reset(device);
1822	if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
1823		struct sas_phy *local_phy;
1824
1825		rc = hisi_sas_softreset_ata_disk(device);
1826		switch (rc) {
1827		case -ECOMM:
1828			rc = -ENODEV;
1829			break;
1830		case TMF_RESP_FUNC_FAILED:
1831		case -EMSGSIZE:
1832		case -EIO:
1833			local_phy = sas_get_local_phy(device);
1834			rc = sas_phy_enable(local_phy, 0);
1835			if (!rc) {
1836				local_phy->enabled = 0;
1837				dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
1838					SAS_ADDR(device->sas_addr), rc);
1839				rc = -ENODEV;
1840			}
1841			sas_put_local_phy(local_phy);
1842			break;
1843		default:
1844			break;
1845		}
1846	}
1847
1848	if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1849		hisi_sas_release_task(hisi_hba, device);
1850
1851	return rc;
1852}
1853
1854static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1855{
1856	struct hisi_sas_device *sas_dev = device->lldd_dev;
1857	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1858	struct device *dev = hisi_hba->dev;
1859	int rc = TMF_RESP_FUNC_FAILED;
1860
1861	/* Clear internal IO and then lu reset */
1862	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1863	if (rc < 0) {
1864		dev_err(dev, "lu_reset: internal abort failed\n");
1865		goto out;
1866	}
1867	hisi_sas_dereg_device(hisi_hba, device);
1868
1869	if (dev_is_sata(device)) {
1870		struct sas_phy *phy;
1871
1872		phy = sas_get_local_phy(device);
1873
1874		rc = sas_phy_reset(phy, true);
1875
1876		if (rc == 0)
1877			hisi_sas_release_task(hisi_hba, device);
1878		sas_put_local_phy(phy);
1879	} else {
1880		rc = sas_lu_reset(device, lun);
1881		if (rc == TMF_RESP_FUNC_COMPLETE)
1882			hisi_sas_release_task(hisi_hba, device);
1883	}
1884out:
1885	if (rc != TMF_RESP_FUNC_COMPLETE)
1886		dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1887			     sas_dev->device_id, rc);
1888	return rc;
1889}
1890
1891static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie)
1892{
1893	struct domain_device *device = data;
1894	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1895	int rc;
1896
1897	rc = hisi_sas_debug_I_T_nexus_reset(device);
1898	if (rc != TMF_RESP_FUNC_COMPLETE)
1899		dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n",
1900			 SAS_ADDR(device->sas_addr), rc);
1901}
1902
1903static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1904{
1905	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1906	HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1907	ASYNC_DOMAIN_EXCLUSIVE(async);
1908	int i;
1909
1910	queue_work(hisi_hba->wq, &r.work);
1911	wait_for_completion(r.completion);
1912	if (!r.done)
1913		return TMF_RESP_FUNC_FAILED;
1914
1915	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1916		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1917		struct domain_device *device = sas_dev->sas_device;
1918
1919		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1920		    dev_is_expander(device->dev_type))
1921			continue;
1922
1923		async_schedule_domain(hisi_sas_async_I_T_nexus_reset,
1924				      device, &async);
1925	}
1926
1927	async_synchronize_full_domain(&async);
1928	hisi_sas_release_tasks(hisi_hba);
1929
1930	return TMF_RESP_FUNC_COMPLETE;
1931}
1932
1933static int hisi_sas_query_task(struct sas_task *task)
1934{
1935	int rc = TMF_RESP_FUNC_FAILED;
1936
1937	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1938		struct hisi_sas_slot *slot = task->lldd_task;
1939		u32 tag = slot->idx;
1940
1941		rc = sas_query_task(task, tag);
1942		switch (rc) {
1943		/* The task is still in Lun, release it then */
1944		case TMF_RESP_FUNC_SUCC:
1945		/* The task is not in Lun or failed, reset the phy */
1946		case TMF_RESP_FUNC_FAILED:
1947		case TMF_RESP_FUNC_COMPLETE:
1948			break;
1949		default:
1950			rc = TMF_RESP_FUNC_FAILED;
1951			break;
1952		}
1953	}
1954	return rc;
1955}
1956
1957static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
1958					    void *data)
1959{
1960	struct domain_device *device = task->dev;
1961	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1962	struct hisi_sas_internal_abort_data *timeout = data;
1963
1964	if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) {
1965		down(&hisi_hba->sem);
1966		hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
1967		up(&hisi_hba->sem);
1968	}
1969
1970	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1971		pr_err("Internal abort: timeout %016llx\n",
1972		       SAS_ADDR(device->sas_addr));
1973	} else {
1974		struct hisi_sas_slot *slot = task->lldd_task;
1975
1976		set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
1977
1978		if (slot) {
1979			struct hisi_sas_cq *cq =
1980				&hisi_hba->cq[slot->dlvry_queue];
1981			/*
1982			 * sync irq or poll queue to avoid free'ing task
1983			 * before using task in IO completion
1984			 */
1985			hisi_sas_sync_cq(cq);
1986			slot->task = NULL;
1987		}
1988
1989		if (timeout->rst_ha_timeout) {
1990			pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n",
1991			       SAS_ADDR(device->sas_addr));
1992			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1993		} else {
1994			pr_err("Internal abort: timeout and not done %016llx.\n",
1995			       SAS_ADDR(device->sas_addr));
1996		}
1997
1998		return true;
1999	}
2000
2001	return false;
2002}
2003
2004static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
2005{
2006	hisi_sas_port_notify_formed(sas_phy);
2007}
2008
2009static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
2010			u8 reg_index, u8 reg_count, u8 *write_data)
2011{
2012	struct hisi_hba *hisi_hba = sha->lldd_ha;
2013
2014	if (!hisi_hba->hw->write_gpio)
2015		return -EOPNOTSUPP;
2016
2017	return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
2018				reg_index, reg_count, write_data);
2019}
2020
2021static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
2022{
2023	struct asd_sas_phy *sas_phy = &phy->sas_phy;
2024	struct sas_phy *sphy = sas_phy->phy;
2025	unsigned long flags;
2026
2027	phy->phy_attached = 0;
2028	phy->phy_type = 0;
2029	phy->port = NULL;
2030
2031	spin_lock_irqsave(&phy->lock, flags);
2032	if (phy->enable)
2033		sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
2034	else
2035		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
2036	spin_unlock_irqrestore(&phy->lock, flags);
2037}
2038
2039void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
2040		       gfp_t gfp_flags)
2041{
2042	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2043	struct asd_sas_phy *sas_phy = &phy->sas_phy;
2044	struct device *dev = hisi_hba->dev;
2045
2046	if (rdy) {
2047		/* Phy down but ready */
2048		hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags);
2049		hisi_sas_port_notify_formed(sas_phy);
2050	} else {
2051		struct hisi_sas_port *port  = phy->port;
2052
2053		if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
2054		    phy->in_reset) {
2055			dev_info(dev, "ignore flutter phy%d down\n", phy_no);
2056			return;
2057		}
2058		/* Phy down and not ready */
2059		sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags);
2060		sas_phy_disconnected(sas_phy);
2061
2062		if (port) {
2063			if (phy->phy_type & PORT_TYPE_SAS) {
2064				int port_id = port->id;
2065
2066				if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
2067								       port_id))
2068					port->port_attached = 0;
2069			} else if (phy->phy_type & PORT_TYPE_SATA)
2070				port->port_attached = 0;
2071		}
2072		hisi_sas_phy_disconnected(phy);
2073	}
2074}
2075EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
2076
2077void hisi_sas_phy_bcast(struct hisi_sas_phy *phy)
2078{
2079	struct asd_sas_phy *sas_phy = &phy->sas_phy;
2080	struct hisi_hba	*hisi_hba = phy->hisi_hba;
2081
2082	if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
2083		return;
2084
2085	sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC);
2086}
2087EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast);
2088
2089int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
2090{
2091	struct hisi_hba *hisi_hba = shost_priv(shost);
2092
2093	if (reset_type != SCSI_ADAPTER_RESET)
2094		return -EOPNOTSUPP;
2095
2096	queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2097
2098	return 0;
2099}
2100EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
2101
2102struct scsi_transport_template *hisi_sas_stt;
2103EXPORT_SYMBOL_GPL(hisi_sas_stt);
2104
2105static struct sas_domain_function_template hisi_sas_transport_ops = {
2106	.lldd_dev_found		= hisi_sas_dev_found,
2107	.lldd_dev_gone		= hisi_sas_dev_gone,
2108	.lldd_execute_task	= hisi_sas_queue_command,
2109	.lldd_control_phy	= hisi_sas_control_phy,
2110	.lldd_abort_task	= hisi_sas_abort_task,
2111	.lldd_abort_task_set	= hisi_sas_abort_task_set,
2112	.lldd_I_T_nexus_reset	= hisi_sas_I_T_nexus_reset,
2113	.lldd_lu_reset		= hisi_sas_lu_reset,
2114	.lldd_query_task	= hisi_sas_query_task,
2115	.lldd_clear_nexus_ha	= hisi_sas_clear_nexus_ha,
2116	.lldd_port_formed	= hisi_sas_port_formed,
2117	.lldd_write_gpio	= hisi_sas_write_gpio,
2118	.lldd_tmf_aborted	= hisi_sas_tmf_aborted,
2119	.lldd_abort_timeout	= hisi_sas_internal_abort_timeout,
2120};
2121
2122void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2123{
2124	int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
2125	struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
2126
2127	for (i = 0; i < hisi_hba->queue_count; i++) {
2128		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2129		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2130		struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
2131
2132		s = sizeof(struct hisi_sas_cmd_hdr);
2133		for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
2134			memset(&cmd_hdr[j], 0, s);
2135
2136		dq->wr_point = 0;
2137
2138		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2139		memset(hisi_hba->complete_hdr[i], 0, s);
2140		cq->rd_point = 0;
2141	}
2142
2143	s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2144	memset(hisi_hba->initial_fis, 0, s);
2145
2146	s = max_command_entries * sizeof(struct hisi_sas_iost);
2147	memset(hisi_hba->iost, 0, s);
2148
2149	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2150	memset(hisi_hba->breakpoint, 0, s);
2151
2152	s = sizeof(struct hisi_sas_sata_breakpoint);
2153	for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
2154		memset(&sata_breakpoint[j], 0, s);
2155}
2156EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2157
2158int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2159{
2160	struct device *dev = hisi_hba->dev;
2161	int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
2162	int max_command_entries_ru, sz_slot_buf_ru;
2163	int blk_cnt, slots_per_blk;
2164
2165	sema_init(&hisi_hba->sem, 1);
2166	spin_lock_init(&hisi_hba->lock);
2167	for (i = 0; i < hisi_hba->n_phy; i++) {
2168		hisi_sas_phy_init(hisi_hba, i);
2169		hisi_hba->port[i].port_attached = 0;
2170		hisi_hba->port[i].id = -1;
2171	}
2172
2173	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2174		hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2175		hisi_hba->devices[i].device_id = i;
2176		hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
2177	}
2178
2179	for (i = 0; i < hisi_hba->queue_count; i++) {
2180		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2181		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2182
2183		/* Completion queue structure */
2184		cq->id = i;
2185		cq->hisi_hba = hisi_hba;
2186		spin_lock_init(&cq->poll_lock);
2187
2188		/* Delivery queue structure */
2189		spin_lock_init(&dq->lock);
2190		INIT_LIST_HEAD(&dq->list);
2191		dq->id = i;
2192		dq->hisi_hba = hisi_hba;
2193
2194		/* Delivery queue */
2195		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2196		hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2197						&hisi_hba->cmd_hdr_dma[i],
2198						GFP_KERNEL);
2199		if (!hisi_hba->cmd_hdr[i])
2200			goto err_out;
2201
2202		/* Completion queue */
2203		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2204		hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2205						&hisi_hba->complete_hdr_dma[i],
2206						GFP_KERNEL);
2207		if (!hisi_hba->complete_hdr[i])
2208			goto err_out;
2209	}
2210
2211	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2212	hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2213					     GFP_KERNEL);
2214	if (!hisi_hba->itct)
2215		goto err_out;
2216
2217	hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2218					   sizeof(struct hisi_sas_slot),
2219					   GFP_KERNEL);
2220	if (!hisi_hba->slot_info)
2221		goto err_out;
2222
2223	/* roundup to avoid overly large block size */
2224	max_command_entries_ru = roundup(max_command_entries, 64);
2225	if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
2226		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
2227	else
2228		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
2229	sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
2230	s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
2231	blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2232	slots_per_blk = s / sz_slot_buf_ru;
2233
2234	for (i = 0; i < blk_cnt; i++) {
2235		int slot_index = i * slots_per_blk;
2236		dma_addr_t buf_dma;
2237		void *buf;
2238
2239		buf = dmam_alloc_coherent(dev, s, &buf_dma,
2240					  GFP_KERNEL);
2241		if (!buf)
2242			goto err_out;
2243
2244		for (j = 0; j < slots_per_blk; j++, slot_index++) {
2245			struct hisi_sas_slot *slot;
2246
2247			slot = &hisi_hba->slot_info[slot_index];
2248			slot->buf = buf;
2249			slot->buf_dma = buf_dma;
2250			slot->idx = slot_index;
2251
2252			buf += sz_slot_buf_ru;
2253			buf_dma += sz_slot_buf_ru;
2254		}
2255	}
2256
2257	s = max_command_entries * sizeof(struct hisi_sas_iost);
2258	hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2259					     GFP_KERNEL);
2260	if (!hisi_hba->iost)
2261		goto err_out;
2262
2263	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2264	hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2265						   &hisi_hba->breakpoint_dma,
2266						   GFP_KERNEL);
2267	if (!hisi_hba->breakpoint)
2268		goto err_out;
2269
2270	s = hisi_hba->slot_index_count = max_command_entries;
2271	hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
2272	if (!hisi_hba->slot_index_tags)
2273		goto err_out;
2274
2275	s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2276	hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2277						    &hisi_hba->initial_fis_dma,
2278						    GFP_KERNEL);
2279	if (!hisi_hba->initial_fis)
2280		goto err_out;
2281
2282	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2283	hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2284					&hisi_hba->sata_breakpoint_dma,
2285					GFP_KERNEL);
2286	if (!hisi_hba->sata_breakpoint)
2287		goto err_out;
2288
2289	hisi_hba->last_slot_index = 0;
2290
2291	hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2292	if (!hisi_hba->wq) {
2293		dev_err(dev, "sas_alloc: failed to create workqueue\n");
2294		goto err_out;
2295	}
2296
2297	return 0;
2298err_out:
2299	return -ENOMEM;
2300}
2301EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2302
2303void hisi_sas_free(struct hisi_hba *hisi_hba)
2304{
2305	int i;
2306
2307	for (i = 0; i < hisi_hba->n_phy; i++) {
2308		struct hisi_sas_phy *phy = &hisi_hba->phy[i];
2309
2310		del_timer_sync(&phy->timer);
2311	}
2312
2313	if (hisi_hba->wq)
2314		destroy_workqueue(hisi_hba->wq);
2315}
2316EXPORT_SYMBOL_GPL(hisi_sas_free);
2317
2318void hisi_sas_rst_work_handler(struct work_struct *work)
2319{
2320	struct hisi_hba *hisi_hba =
2321		container_of(work, struct hisi_hba, rst_work);
2322
2323	if (hisi_sas_controller_prereset(hisi_hba))
2324		return;
2325
2326	hisi_sas_controller_reset(hisi_hba);
2327}
2328EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2329
2330void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2331{
2332	struct hisi_sas_rst *rst =
2333		container_of(work, struct hisi_sas_rst, work);
2334
2335	if (hisi_sas_controller_prereset(rst->hisi_hba))
2336		goto rst_complete;
2337
2338	if (!hisi_sas_controller_reset(rst->hisi_hba))
2339		rst->done = true;
2340rst_complete:
2341	complete(rst->completion);
2342}
2343EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2344
2345int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2346{
2347	struct device *dev = hisi_hba->dev;
2348	struct platform_device *pdev = hisi_hba->platform_dev;
2349	struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2350	struct clk *refclk;
2351
2352	if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2353					  SAS_ADDR_SIZE)) {
2354		dev_err(dev, "could not get property sas-addr\n");
2355		return -ENOENT;
2356	}
2357
2358	if (np) {
2359		/*
2360		 * These properties are only required for platform device-based
2361		 * controller with DT firmware.
2362		 */
2363		hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2364					"hisilicon,sas-syscon");
2365		if (IS_ERR(hisi_hba->ctrl)) {
2366			dev_err(dev, "could not get syscon\n");
2367			return -ENOENT;
2368		}
2369
2370		if (device_property_read_u32(dev, "ctrl-reset-reg",
2371					     &hisi_hba->ctrl_reset_reg)) {
2372			dev_err(dev, "could not get property ctrl-reset-reg\n");
2373			return -ENOENT;
2374		}
2375
2376		if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2377					     &hisi_hba->ctrl_reset_sts_reg)) {
2378			dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
2379			return -ENOENT;
2380		}
2381
2382		if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2383					     &hisi_hba->ctrl_clock_ena_reg)) {
2384			dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
2385			return -ENOENT;
2386		}
2387	}
2388
2389	refclk = devm_clk_get(dev, NULL);
2390	if (IS_ERR(refclk))
2391		dev_dbg(dev, "no ref clk property\n");
2392	else
2393		hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2394
2395	if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2396		dev_err(dev, "could not get property phy-count\n");
2397		return -ENOENT;
2398	}
2399
2400	if (device_property_read_u32(dev, "queue-count",
2401				     &hisi_hba->queue_count)) {
2402		dev_err(dev, "could not get property queue-count\n");
2403		return -ENOENT;
2404	}
2405
2406	return 0;
2407}
2408EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2409
2410static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2411					      const struct hisi_sas_hw *hw)
2412{
2413	struct resource *res;
2414	struct Scsi_Host *shost;
2415	struct hisi_hba *hisi_hba;
2416	struct device *dev = &pdev->dev;
2417	int error;
2418
2419	shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2420	if (!shost) {
2421		dev_err(dev, "scsi host alloc failed\n");
2422		return NULL;
2423	}
2424	hisi_hba = shost_priv(shost);
2425
2426	INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2427	hisi_hba->hw = hw;
2428	hisi_hba->dev = dev;
2429	hisi_hba->platform_dev = pdev;
2430	hisi_hba->shost = shost;
2431	SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2432
2433	timer_setup(&hisi_hba->timer, NULL, 0);
2434
2435	if (hisi_sas_get_fw_info(hisi_hba) < 0)
2436		goto err_out;
2437
2438	error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2439	if (error) {
2440		dev_err(dev, "No usable DMA addressing method\n");
2441		goto err_out;
2442	}
2443
2444	hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
2445	if (IS_ERR(hisi_hba->regs))
2446		goto err_out;
2447
2448	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2449	if (res) {
2450		hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2451		if (IS_ERR(hisi_hba->sgpio_regs))
2452			goto err_out;
2453	}
2454
2455	if (hisi_sas_alloc(hisi_hba)) {
2456		hisi_sas_free(hisi_hba);
2457		goto err_out;
2458	}
2459
2460	return shost;
2461err_out:
2462	scsi_host_put(shost);
2463	dev_err(dev, "shost alloc failed\n");
2464	return NULL;
2465}
2466
2467static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
2468{
2469	if (hisi_hba->hw->interrupt_preinit)
2470		return hisi_hba->hw->interrupt_preinit(hisi_hba);
2471	return 0;
2472}
2473
2474int hisi_sas_probe(struct platform_device *pdev,
2475		   const struct hisi_sas_hw *hw)
2476{
2477	struct Scsi_Host *shost;
2478	struct hisi_hba *hisi_hba;
2479	struct device *dev = &pdev->dev;
2480	struct asd_sas_phy **arr_phy;
2481	struct asd_sas_port **arr_port;
2482	struct sas_ha_struct *sha;
2483	int rc, phy_nr, port_nr, i;
2484
2485	shost = hisi_sas_shost_alloc(pdev, hw);
2486	if (!shost)
2487		return -ENOMEM;
2488
2489	sha = SHOST_TO_SAS_HA(shost);
2490	hisi_hba = shost_priv(shost);
2491	platform_set_drvdata(pdev, sha);
2492
2493	phy_nr = port_nr = hisi_hba->n_phy;
2494
2495	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2496	arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2497	if (!arr_phy || !arr_port) {
2498		rc = -ENOMEM;
2499		goto err_out_ha;
2500	}
2501
2502	sha->sas_phy = arr_phy;
2503	sha->sas_port = arr_port;
2504	sha->lldd_ha = hisi_hba;
2505
2506	shost->transportt = hisi_sas_stt;
2507	shost->max_id = HISI_SAS_MAX_DEVICES;
2508	shost->max_lun = ~0;
2509	shost->max_channel = 1;
2510	shost->max_cmd_len = 16;
2511	if (hisi_hba->hw->slot_index_alloc) {
2512		shost->can_queue = HISI_SAS_MAX_COMMANDS;
2513		shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
2514	} else {
2515		shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
2516		shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
2517	}
2518
2519	sha->sas_ha_name = DRV_NAME;
2520	sha->dev = hisi_hba->dev;
2521	sha->sas_addr = &hisi_hba->sas_addr[0];
2522	sha->num_phys = hisi_hba->n_phy;
2523	sha->shost = hisi_hba->shost;
2524
2525	for (i = 0; i < hisi_hba->n_phy; i++) {
2526		sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2527		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2528	}
2529
2530	rc = hisi_sas_interrupt_preinit(hisi_hba);
2531	if (rc)
2532		goto err_out_ha;
2533
2534	rc = scsi_add_host(shost, &pdev->dev);
2535	if (rc)
2536		goto err_out_ha;
2537
2538	rc = sas_register_ha(sha);
2539	if (rc)
2540		goto err_out_register_ha;
2541
2542	rc = hisi_hba->hw->hw_init(hisi_hba);
2543	if (rc)
2544		goto err_out_hw_init;
2545
2546	scsi_scan_host(shost);
2547
2548	return 0;
2549
2550err_out_hw_init:
2551	sas_unregister_ha(sha);
2552err_out_register_ha:
2553	scsi_remove_host(shost);
2554err_out_ha:
2555	hisi_sas_free(hisi_hba);
2556	scsi_host_put(shost);
2557	return rc;
2558}
2559EXPORT_SYMBOL_GPL(hisi_sas_probe);
2560
2561void hisi_sas_remove(struct platform_device *pdev)
2562{
2563	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2564	struct hisi_hba *hisi_hba = sha->lldd_ha;
2565	struct Scsi_Host *shost = sha->shost;
2566
2567	del_timer_sync(&hisi_hba->timer);
2568
2569	sas_unregister_ha(sha);
2570	sas_remove_host(shost);
2571
2572	hisi_sas_free(hisi_hba);
2573	scsi_host_put(shost);
2574}
2575EXPORT_SYMBOL_GPL(hisi_sas_remove);
2576
2577#if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE)
2578#define DEBUGFS_ENABLE_DEFAULT  "enabled"
2579bool hisi_sas_debugfs_enable = true;
2580u32 hisi_sas_debugfs_dump_count = 50;
2581#else
2582#define DEBUGFS_ENABLE_DEFAULT "disabled"
2583bool hisi_sas_debugfs_enable;
2584u32 hisi_sas_debugfs_dump_count = 1;
2585#endif
2586
2587EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
2588module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
2589MODULE_PARM_DESC(hisi_sas_debugfs_enable,
2590		 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")");
2591
2592EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
2593module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
2594MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
2595
2596struct dentry *hisi_sas_debugfs_dir;
2597EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir);
2598
2599static __init int hisi_sas_init(void)
2600{
2601	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2602	if (!hisi_sas_stt)
2603		return -ENOMEM;
2604
2605	if (hisi_sas_debugfs_enable) {
2606		hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
2607		if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
2608			pr_info("hisi_sas: Limiting debugfs dump count\n");
2609			hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
2610		}
2611	}
2612
2613	return 0;
2614}
2615
2616static __exit void hisi_sas_exit(void)
2617{
2618	sas_release_transport(hisi_sas_stt);
2619
2620	debugfs_remove(hisi_sas_debugfs_dir);
2621}
2622
2623module_init(hisi_sas_init);
2624module_exit(hisi_sas_exit);
2625
2626MODULE_LICENSE("GPL");
2627MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2628MODULE_DESCRIPTION("HISILICON SAS controller driver");
2629MODULE_ALIAS("platform:" DRV_NAME);