Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Aic94xx SAS/SATA Tasks
  4 *
  5 * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  7 */
  8
  9#include <linux/spinlock.h>
 10#include "aic94xx.h"
 11#include "aic94xx_sas.h"
 12#include "aic94xx_hwi.h"
 13
 14static void asd_unbuild_ata_ascb(struct asd_ascb *a);
 15static void asd_unbuild_smp_ascb(struct asd_ascb *a);
 16static void asd_unbuild_ssp_ascb(struct asd_ascb *a);
 17
 18static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
 19{
 20	unsigned long flags;
 21
 22	spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
 23	asd_ha->seq.can_queue += num;
 24	spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
 25}
 26
 27/* DMA_... to our direction translation.
 28 */
 29static const u8 data_dir_flags[] = {
 30	[DMA_BIDIRECTIONAL]	= DATA_DIR_BYRECIPIENT,	/* UNSPECIFIED */
 31	[DMA_TO_DEVICE]		= DATA_DIR_OUT,		/* OUTBOUND */
 32	[DMA_FROM_DEVICE]	= DATA_DIR_IN,		/* INBOUND */
 33	[DMA_NONE]		= DATA_DIR_NONE,	/* NO TRANSFER */
 34};
 35
 36static int asd_map_scatterlist(struct sas_task *task,
 37			       struct sg_el *sg_arr,
 38			       gfp_t gfp_flags)
 39{
 40	struct asd_ascb *ascb = task->lldd_task;
 41	struct asd_ha_struct *asd_ha = ascb->ha;
 42	struct scatterlist *sc;
 43	int num_sg, res;
 44
 45	if (task->data_dir == DMA_NONE)
 46		return 0;
 47
 48	if (task->num_scatter == 0) {
 49		void *p = task->scatter;
 50		dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
 51						task->total_xfer_len,
 52						task->data_dir);
 53		if (dma_mapping_error(&asd_ha->pcidev->dev, dma))
 54			return -ENOMEM;
 55
 56		sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
 57		sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
 58		sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
 59		return 0;
 60	}
 61
 62	/* STP tasks come from libata which has already mapped
 63	 * the SG list */
 64	if (sas_protocol_ata(task->task_proto))
 65		num_sg = task->num_scatter;
 66	else
 67		num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
 68				    task->num_scatter, task->data_dir);
 69	if (num_sg == 0)
 70		return -ENOMEM;
 71
 72	if (num_sg > 3) {
 73		int i;
 74
 75		ascb->sg_arr = asd_alloc_coherent(asd_ha,
 76						  num_sg*sizeof(struct sg_el),
 77						  gfp_flags);
 78		if (!ascb->sg_arr) {
 79			res = -ENOMEM;
 80			goto err_unmap;
 81		}
 82		for_each_sg(task->scatter, sc, num_sg, i) {
 83			struct sg_el *sg =
 84				&((struct sg_el *)ascb->sg_arr->vaddr)[i];
 85			sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
 86			sg->size = cpu_to_le32((u32)sg_dma_len(sc));
 87			if (i == num_sg-1)
 88				sg->flags |= ASD_SG_EL_LIST_EOL;
 89		}
 90
 91		for_each_sg(task->scatter, sc, 2, i) {
 92			sg_arr[i].bus_addr =
 93				cpu_to_le64((u64)sg_dma_address(sc));
 94			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
 95		}
 96		sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
 97		sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;
 98
 99		memset(&sg_arr[2], 0, sizeof(*sg_arr));
100		sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
101	} else {
102		int i;
103		for_each_sg(task->scatter, sc, num_sg, i) {
104			sg_arr[i].bus_addr =
105				cpu_to_le64((u64)sg_dma_address(sc));
106			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
107		}
108		sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
109	}
110
111	return 0;
112err_unmap:
113	if (sas_protocol_ata(task->task_proto))
114		dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
115			     task->num_scatter, task->data_dir);
116	return res;
117}
118
119static void asd_unmap_scatterlist(struct asd_ascb *ascb)
120{
121	struct asd_ha_struct *asd_ha = ascb->ha;
122	struct sas_task *task = ascb->uldd_task;
123
124	if (task->data_dir == DMA_NONE)
125		return;
126
127	if (task->num_scatter == 0) {
128		dma_addr_t dma = (dma_addr_t)
129		       le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
130		dma_unmap_single(&ascb->ha->pcidev->dev, dma,
131				 task->total_xfer_len, task->data_dir);
132		return;
133	}
134
135	asd_free_coherent(asd_ha, ascb->sg_arr);
136	if (task->task_proto != SAS_PROTOCOL_STP)
137		dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
138			     task->num_scatter, task->data_dir);
139}
140
141/* ---------- Task complete tasklet ---------- */
142
143static void asd_get_response_tasklet(struct asd_ascb *ascb,
144				     struct done_list_struct *dl)
145{
146	struct asd_ha_struct *asd_ha = ascb->ha;
147	struct sas_task *task = ascb->uldd_task;
148	struct task_status_struct *ts = &task->task_status;
149	unsigned long flags;
150	struct tc_resp_sb_struct {
151		__le16 index_escb;
152		u8     len_lsb;
153		u8     flags;
154	} __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
155
156/* 	int  size   = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */
157	int  edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
158	struct asd_ascb *escb;
159	struct asd_dma_tok *edb;
160	void *r;
161
162	spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
163	escb = asd_tc_index_find(&asd_ha->seq,
164				 (int)le16_to_cpu(resp_sb->index_escb));
165	spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
166
167	if (!escb) {
168		ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
169		return;
170	}
171
172	ts->buf_valid_size = 0;
173	edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
174	r = edb->vaddr;
175	if (task->task_proto == SAS_PROTOCOL_SSP) {
176		struct ssp_response_iu *iu =
177			r + 16 + sizeof(struct ssp_frame_hdr);
178
179		ts->residual = le32_to_cpu(*(__le32 *)r);
180
181		sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu);
182	}  else {
183		struct ata_task_resp *resp = (void *) &ts->buf[0];
184
185		ts->residual = le32_to_cpu(*(__le32 *)r);
186
187		if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
188			resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
189			memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
190			ts->buf_valid_size = sizeof(*resp);
191		}
192	}
193
194	asd_invalidate_edb(escb, edb_id);
195}
196
197static void asd_task_tasklet_complete(struct asd_ascb *ascb,
198				      struct done_list_struct *dl)
199{
200	struct sas_task *task = ascb->uldd_task;
201	struct task_status_struct *ts = &task->task_status;
202	unsigned long flags;
203	u8 opcode = dl->opcode;
204
205	asd_can_dequeue(ascb->ha, 1);
206
207Again:
208	switch (opcode) {
209	case TC_NO_ERROR:
210		ts->resp = SAS_TASK_COMPLETE;
211		ts->stat = SAS_SAM_STAT_GOOD;
212		break;
213	case TC_UNDERRUN:
214		ts->resp = SAS_TASK_COMPLETE;
215		ts->stat = SAS_DATA_UNDERRUN;
216		ts->residual = le32_to_cpu(*(__le32 *)dl->status_block);
217		break;
218	case TC_OVERRUN:
219		ts->resp = SAS_TASK_COMPLETE;
220		ts->stat = SAS_DATA_OVERRUN;
221		ts->residual = 0;
222		break;
223	case TC_SSP_RESP:
224	case TC_ATA_RESP:
225		ts->resp = SAS_TASK_COMPLETE;
226		ts->stat = SAS_PROTO_RESPONSE;
227		asd_get_response_tasklet(ascb, dl);
228		break;
229	case TF_OPEN_REJECT:
230		ts->resp = SAS_TASK_UNDELIVERED;
231		ts->stat = SAS_OPEN_REJECT;
232		if (dl->status_block[1] & 2)
233			ts->open_rej_reason = 1 + dl->status_block[2];
234		else if (dl->status_block[1] & 1)
235			ts->open_rej_reason = (dl->status_block[2] >> 4)+10;
236		else
237			ts->open_rej_reason = SAS_OREJ_UNKNOWN;
238		break;
239	case TF_OPEN_TO:
240		ts->resp = SAS_TASK_UNDELIVERED;
241		ts->stat = SAS_OPEN_TO;
242		break;
243	case TF_PHY_DOWN:
244	case TU_PHY_DOWN:
245		ts->resp = SAS_TASK_UNDELIVERED;
246		ts->stat = SAS_PHY_DOWN;
247		break;
248	case TI_PHY_DOWN:
249		ts->resp = SAS_TASK_COMPLETE;
250		ts->stat = SAS_PHY_DOWN;
251		break;
252	case TI_BREAK:
253	case TI_PROTO_ERR:
254	case TI_NAK:
255	case TI_ACK_NAK_TO:
256	case TF_SMP_XMIT_RCV_ERR:
257	case TC_ATA_R_ERR_RECV:
258		ts->resp = SAS_TASK_COMPLETE;
259		ts->stat = SAS_INTERRUPTED;
260		break;
261	case TF_BREAK:
262	case TU_BREAK:
263	case TU_ACK_NAK_TO:
264	case TF_SMPRSP_TO:
265		ts->resp = SAS_TASK_UNDELIVERED;
266		ts->stat = SAS_DEV_NO_RESPONSE;
267		break;
268	case TF_NAK_RECV:
269		ts->resp = SAS_TASK_COMPLETE;
270		ts->stat = SAS_NAK_R_ERR;
271		break;
272	case TA_I_T_NEXUS_LOSS:
273		opcode = dl->status_block[0];
274		goto Again;
275	case TF_INV_CONN_HANDLE:
276		ts->resp = SAS_TASK_UNDELIVERED;
277		ts->stat = SAS_DEVICE_UNKNOWN;
278		break;
279	case TF_REQUESTED_N_PENDING:
280		ts->resp = SAS_TASK_UNDELIVERED;
281		ts->stat = SAS_PENDING;
282		break;
283	case TC_TASK_CLEARED:
284	case TA_ON_REQ:
285		ts->resp = SAS_TASK_COMPLETE;
286		ts->stat = SAS_ABORTED_TASK;
287		break;
288
289	case TF_NO_SMP_CONN:
290	case TF_TMF_NO_CTX:
291	case TF_TMF_NO_TAG:
292	case TF_TMF_TAG_FREE:
293	case TF_TMF_TASK_DONE:
294	case TF_TMF_NO_CONN_HANDLE:
295	case TF_IRTT_TO:
296	case TF_IU_SHORT:
297	case TF_DATA_OFFS_ERR:
298		ts->resp = SAS_TASK_UNDELIVERED;
299		ts->stat = SAS_DEV_NO_RESPONSE;
300		break;
301
302	case TC_LINK_ADM_RESP:
303	case TC_CONTROL_PHY:
304	case TC_RESUME:
305	case TC_PARTIAL_SG_LIST:
306	default:
307		ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode);
308		break;
309	}
310
311	switch (task->task_proto) {
312	case SAS_PROTOCOL_SATA:
313	case SAS_PROTOCOL_STP:
314		asd_unbuild_ata_ascb(ascb);
315		break;
316	case SAS_PROTOCOL_SMP:
317		asd_unbuild_smp_ascb(ascb);
318		break;
319	case SAS_PROTOCOL_SSP:
320		asd_unbuild_ssp_ascb(ascb);
321		break;
322	default:
323		break;
324	}
325
326	spin_lock_irqsave(&task->task_state_lock, flags);
327	task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
328	task->task_state_flags |= SAS_TASK_STATE_DONE;
329	if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
330		struct completion *completion = ascb->completion;
331		spin_unlock_irqrestore(&task->task_state_lock, flags);
332		ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
333			    "stat 0x%x but aborted by upper layer!\n",
334			    task, opcode, ts->resp, ts->stat);
335		if (completion)
336			complete(completion);
337	} else {
338		spin_unlock_irqrestore(&task->task_state_lock, flags);
339		task->lldd_task = NULL;
340		asd_ascb_free(ascb);
341		mb();
342		task->task_done(task);
343	}
344}
345
346/* ---------- ATA ---------- */
347
348static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
349			      gfp_t gfp_flags)
350{
351	struct domain_device *dev = task->dev;
352	struct scb *scb;
353	u8     flags;
354	int    res = 0;
355
356	scb = ascb->scb;
357
358	if (unlikely(task->ata_task.device_control_reg_update))
359		scb->header.opcode = CONTROL_ATA_DEV;
360	else if (dev->sata_dev.class == ATA_DEV_ATAPI)
361		scb->header.opcode = INITIATE_ATAPI_TASK;
362	else
363		scb->header.opcode = INITIATE_ATA_TASK;
364
365	scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
366	if (dev->port->oob_mode == SAS_OOB_MODE)
367		scb->ata_task.proto_conn_rate |= dev->linkrate;
368
369	scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
370	scb->ata_task.fis = task->ata_task.fis;
371	if (likely(!task->ata_task.device_control_reg_update))
372		scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
373	scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
374	if (dev->sata_dev.class == ATA_DEV_ATAPI)
375		memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
376		       16);
377	scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
378	scb->ata_task.conn_handle = cpu_to_le16(
379		(u16)(unsigned long)dev->lldd_dev);
380
381	if (likely(!task->ata_task.device_control_reg_update)) {
382		flags = 0;
383		if (task->ata_task.dma_xfer)
384			flags |= DATA_XFER_MODE_DMA;
385		if (task->ata_task.use_ncq &&
386		    dev->sata_dev.class != ATA_DEV_ATAPI)
387			flags |= ATA_Q_TYPE_NCQ;
388		flags |= data_dir_flags[task->data_dir];
389		scb->ata_task.ata_flags = flags;
390
391		scb->ata_task.retry_count = 0;
392
393		scb->ata_task.flags = 0;
 
 
 
 
 
394	}
395	ascb->tasklet_complete = asd_task_tasklet_complete;
396
397	if (likely(!task->ata_task.device_control_reg_update))
398		res = asd_map_scatterlist(task, scb->ata_task.sg_element,
399					  gfp_flags);
400
401	return res;
402}
403
404static void asd_unbuild_ata_ascb(struct asd_ascb *a)
405{
406	asd_unmap_scatterlist(a);
407}
408
409/* ---------- SMP ---------- */
410
411static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
412			      gfp_t gfp_flags)
413{
414	struct asd_ha_struct *asd_ha = ascb->ha;
415	struct domain_device *dev = task->dev;
416	struct scb *scb;
417
418	dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
419		   DMA_TO_DEVICE);
420	dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
421		   DMA_FROM_DEVICE);
422
423	scb = ascb->scb;
424
425	scb->header.opcode = INITIATE_SMP_TASK;
426
427	scb->smp_task.proto_conn_rate = dev->linkrate;
428
429	scb->smp_task.smp_req.bus_addr =
430		cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
431	scb->smp_task.smp_req.size =
432		cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
433
434	scb->smp_task.smp_resp.bus_addr =
435		cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
436	scb->smp_task.smp_resp.size =
437		cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
438
439	scb->smp_task.sister_scb = cpu_to_le16(0xFFFF);
440	scb->smp_task.conn_handle = cpu_to_le16((u16)
441						(unsigned long)dev->lldd_dev);
442
443	ascb->tasklet_complete = asd_task_tasklet_complete;
444
445	return 0;
446}
447
448static void asd_unbuild_smp_ascb(struct asd_ascb *a)
449{
450	struct sas_task *task = a->uldd_task;
451
452	BUG_ON(!task);
453	dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
454		     DMA_TO_DEVICE);
455	dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
456		     DMA_FROM_DEVICE);
457}
458
459/* ---------- SSP ---------- */
460
461static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
462			      gfp_t gfp_flags)
463{
464	struct domain_device *dev = task->dev;
465	struct scb *scb;
466	int    res = 0;
467
468	scb = ascb->scb;
469
470	scb->header.opcode = INITIATE_SSP_TASK;
471
472	scb->ssp_task.proto_conn_rate  = (1 << 4); /* SSP */
473	scb->ssp_task.proto_conn_rate |= dev->linkrate;
474	scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
475	scb->ssp_task.ssp_frame.frame_type = SSP_DATA;
476	memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr,
477	       HASHED_SAS_ADDR_SIZE);
478	memcpy(scb->ssp_task.ssp_frame.hashed_src_addr,
479	       dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
480	scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
481
482	memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
 
 
 
483	scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
484	memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd,
485	       task->ssp_task.cmd->cmd_len);
486
487	scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF);
488	scb->ssp_task.conn_handle = cpu_to_le16(
489		(u16)(unsigned long)dev->lldd_dev);
490	scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
491	scb->ssp_task.retry_count = scb->ssp_task.retry_count;
492
493	ascb->tasklet_complete = asd_task_tasklet_complete;
494
495	res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags);
496
497	return res;
498}
499
500static void asd_unbuild_ssp_ascb(struct asd_ascb *a)
501{
502	asd_unmap_scatterlist(a);
503}
504
505/* ---------- Execute Task ---------- */
506
507static int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
508{
509	int res = 0;
510	unsigned long flags;
511
512	spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
513	if ((asd_ha->seq.can_queue - num) < 0)
514		res = -SAS_QUEUE_FULL;
515	else
516		asd_ha->seq.can_queue -= num;
517	spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
518
519	return res;
520}
521
522int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
523{
524	int res = 0;
525	LIST_HEAD(alist);
526	struct sas_task *t = task;
527	struct asd_ascb *ascb = NULL, *a;
528	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
529
530	res = asd_can_queue(asd_ha, 1);
531	if (res)
532		return res;
533
534	res = 1;
535	ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
536	if (res) {
537		res = -ENOMEM;
538		goto out_err;
539	}
540
541	__list_add(&alist, ascb->list.prev, &ascb->list);
542	list_for_each_entry(a, &alist, list) {
543		a->uldd_task = t;
544		t->lldd_task = a;
545		break;
546	}
547	list_for_each_entry(a, &alist, list) {
548		t = a->uldd_task;
549		a->uldd_timer = 1;
550		if (t->task_proto & SAS_PROTOCOL_STP)
551			t->task_proto = SAS_PROTOCOL_STP;
552		switch (t->task_proto) {
553		case SAS_PROTOCOL_SATA:
554		case SAS_PROTOCOL_STP:
555			res = asd_build_ata_ascb(a, t, gfp_flags);
556			break;
557		case SAS_PROTOCOL_SMP:
558			res = asd_build_smp_ascb(a, t, gfp_flags);
559			break;
560		case SAS_PROTOCOL_SSP:
561			res = asd_build_ssp_ascb(a, t, gfp_flags);
562			break;
563		default:
564			asd_printk("unknown sas_task proto: 0x%x\n",
565				   t->task_proto);
566			res = -ENOMEM;
567			break;
568		}
569		if (res)
570			goto out_err_unmap;
571	}
572	list_del_init(&alist);
573
574	res = asd_post_ascb_list(asd_ha, ascb, 1);
575	if (unlikely(res)) {
576		a = NULL;
577		__list_add(&alist, ascb->list.prev, &ascb->list);
578		goto out_err_unmap;
579	}
580
581	return 0;
582out_err_unmap:
583	{
584		struct asd_ascb *b = a;
585		list_for_each_entry(a, &alist, list) {
586			if (a == b)
587				break;
588			t = a->uldd_task;
589			switch (t->task_proto) {
590			case SAS_PROTOCOL_SATA:
591			case SAS_PROTOCOL_STP:
592				asd_unbuild_ata_ascb(a);
593				break;
594			case SAS_PROTOCOL_SMP:
595				asd_unbuild_smp_ascb(a);
596				break;
597			case SAS_PROTOCOL_SSP:
598				asd_unbuild_ssp_ascb(a);
599				break;
600			default:
601				break;
602			}
603			t->lldd_task = NULL;
604		}
605	}
606	list_del_init(&alist);
607out_err:
608	if (ascb)
609		asd_ascb_free_list(ascb);
610	asd_can_dequeue(asd_ha, 1);
611	return res;
612}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Aic94xx SAS/SATA Tasks
  4 *
  5 * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  7 */
  8
  9#include <linux/spinlock.h>
 10#include "aic94xx.h"
 11#include "aic94xx_sas.h"
 12#include "aic94xx_hwi.h"
 13
 14static void asd_unbuild_ata_ascb(struct asd_ascb *a);
 15static void asd_unbuild_smp_ascb(struct asd_ascb *a);
 16static void asd_unbuild_ssp_ascb(struct asd_ascb *a);
 17
 18static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
 19{
 20	unsigned long flags;
 21
 22	spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
 23	asd_ha->seq.can_queue += num;
 24	spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
 25}
 26
 27/* DMA_... to our direction translation.
 28 */
 29static const u8 data_dir_flags[] = {
 30	[DMA_BIDIRECTIONAL]	= DATA_DIR_BYRECIPIENT,	/* UNSPECIFIED */
 31	[DMA_TO_DEVICE]		= DATA_DIR_OUT,		/* OUTBOUND */
 32	[DMA_FROM_DEVICE]	= DATA_DIR_IN,		/* INBOUND */
 33	[DMA_NONE]		= DATA_DIR_NONE,	/* NO TRANSFER */
 34};
 35
 36static int asd_map_scatterlist(struct sas_task *task,
 37			       struct sg_el *sg_arr,
 38			       gfp_t gfp_flags)
 39{
 40	struct asd_ascb *ascb = task->lldd_task;
 41	struct asd_ha_struct *asd_ha = ascb->ha;
 42	struct scatterlist *sc;
 43	int num_sg, res;
 44
 45	if (task->data_dir == DMA_NONE)
 46		return 0;
 47
 48	if (task->num_scatter == 0) {
 49		void *p = task->scatter;
 50		dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
 51						task->total_xfer_len,
 52						task->data_dir);
 
 
 
 53		sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
 54		sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
 55		sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
 56		return 0;
 57	}
 58
 59	/* STP tasks come from libata which has already mapped
 60	 * the SG list */
 61	if (sas_protocol_ata(task->task_proto))
 62		num_sg = task->num_scatter;
 63	else
 64		num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
 65				    task->num_scatter, task->data_dir);
 66	if (num_sg == 0)
 67		return -ENOMEM;
 68
 69	if (num_sg > 3) {
 70		int i;
 71
 72		ascb->sg_arr = asd_alloc_coherent(asd_ha,
 73						  num_sg*sizeof(struct sg_el),
 74						  gfp_flags);
 75		if (!ascb->sg_arr) {
 76			res = -ENOMEM;
 77			goto err_unmap;
 78		}
 79		for_each_sg(task->scatter, sc, num_sg, i) {
 80			struct sg_el *sg =
 81				&((struct sg_el *)ascb->sg_arr->vaddr)[i];
 82			sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
 83			sg->size = cpu_to_le32((u32)sg_dma_len(sc));
 84			if (i == num_sg-1)
 85				sg->flags |= ASD_SG_EL_LIST_EOL;
 86		}
 87
 88		for_each_sg(task->scatter, sc, 2, i) {
 89			sg_arr[i].bus_addr =
 90				cpu_to_le64((u64)sg_dma_address(sc));
 91			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
 92		}
 93		sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
 94		sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;
 95
 96		memset(&sg_arr[2], 0, sizeof(*sg_arr));
 97		sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
 98	} else {
 99		int i;
100		for_each_sg(task->scatter, sc, num_sg, i) {
101			sg_arr[i].bus_addr =
102				cpu_to_le64((u64)sg_dma_address(sc));
103			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
104		}
105		sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
106	}
107
108	return 0;
109err_unmap:
110	if (sas_protocol_ata(task->task_proto))
111		dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
112			     task->num_scatter, task->data_dir);
113	return res;
114}
115
116static void asd_unmap_scatterlist(struct asd_ascb *ascb)
117{
118	struct asd_ha_struct *asd_ha = ascb->ha;
119	struct sas_task *task = ascb->uldd_task;
120
121	if (task->data_dir == DMA_NONE)
122		return;
123
124	if (task->num_scatter == 0) {
125		dma_addr_t dma = (dma_addr_t)
126		       le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
127		dma_unmap_single(&ascb->ha->pcidev->dev, dma,
128				 task->total_xfer_len, task->data_dir);
129		return;
130	}
131
132	asd_free_coherent(asd_ha, ascb->sg_arr);
133	if (task->task_proto != SAS_PROTOCOL_STP)
134		dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
135			     task->num_scatter, task->data_dir);
136}
137
138/* ---------- Task complete tasklet ---------- */
139
140static void asd_get_response_tasklet(struct asd_ascb *ascb,
141				     struct done_list_struct *dl)
142{
143	struct asd_ha_struct *asd_ha = ascb->ha;
144	struct sas_task *task = ascb->uldd_task;
145	struct task_status_struct *ts = &task->task_status;
146	unsigned long flags;
147	struct tc_resp_sb_struct {
148		__le16 index_escb;
149		u8     len_lsb;
150		u8     flags;
151	} __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
152
153/* 	int  size   = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */
154	int  edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
155	struct asd_ascb *escb;
156	struct asd_dma_tok *edb;
157	void *r;
158
159	spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
160	escb = asd_tc_index_find(&asd_ha->seq,
161				 (int)le16_to_cpu(resp_sb->index_escb));
162	spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
163
164	if (!escb) {
165		ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
166		return;
167	}
168
169	ts->buf_valid_size = 0;
170	edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
171	r = edb->vaddr;
172	if (task->task_proto == SAS_PROTOCOL_SSP) {
173		struct ssp_response_iu *iu =
174			r + 16 + sizeof(struct ssp_frame_hdr);
175
176		ts->residual = le32_to_cpu(*(__le32 *)r);
177
178		sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu);
179	}  else {
180		struct ata_task_resp *resp = (void *) &ts->buf[0];
181
182		ts->residual = le32_to_cpu(*(__le32 *)r);
183
184		if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
185			resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
186			memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
187			ts->buf_valid_size = sizeof(*resp);
188		}
189	}
190
191	asd_invalidate_edb(escb, edb_id);
192}
193
194static void asd_task_tasklet_complete(struct asd_ascb *ascb,
195				      struct done_list_struct *dl)
196{
197	struct sas_task *task = ascb->uldd_task;
198	struct task_status_struct *ts = &task->task_status;
199	unsigned long flags;
200	u8 opcode = dl->opcode;
201
202	asd_can_dequeue(ascb->ha, 1);
203
204Again:
205	switch (opcode) {
206	case TC_NO_ERROR:
207		ts->resp = SAS_TASK_COMPLETE;
208		ts->stat = SAS_SAM_STAT_GOOD;
209		break;
210	case TC_UNDERRUN:
211		ts->resp = SAS_TASK_COMPLETE;
212		ts->stat = SAS_DATA_UNDERRUN;
213		ts->residual = le32_to_cpu(*(__le32 *)dl->status_block);
214		break;
215	case TC_OVERRUN:
216		ts->resp = SAS_TASK_COMPLETE;
217		ts->stat = SAS_DATA_OVERRUN;
218		ts->residual = 0;
219		break;
220	case TC_SSP_RESP:
221	case TC_ATA_RESP:
222		ts->resp = SAS_TASK_COMPLETE;
223		ts->stat = SAS_PROTO_RESPONSE;
224		asd_get_response_tasklet(ascb, dl);
225		break;
226	case TF_OPEN_REJECT:
227		ts->resp = SAS_TASK_UNDELIVERED;
228		ts->stat = SAS_OPEN_REJECT;
229		if (dl->status_block[1] & 2)
230			ts->open_rej_reason = 1 + dl->status_block[2];
231		else if (dl->status_block[1] & 1)
232			ts->open_rej_reason = (dl->status_block[2] >> 4)+10;
233		else
234			ts->open_rej_reason = SAS_OREJ_UNKNOWN;
235		break;
236	case TF_OPEN_TO:
237		ts->resp = SAS_TASK_UNDELIVERED;
238		ts->stat = SAS_OPEN_TO;
239		break;
240	case TF_PHY_DOWN:
241	case TU_PHY_DOWN:
242		ts->resp = SAS_TASK_UNDELIVERED;
243		ts->stat = SAS_PHY_DOWN;
244		break;
245	case TI_PHY_DOWN:
246		ts->resp = SAS_TASK_COMPLETE;
247		ts->stat = SAS_PHY_DOWN;
248		break;
249	case TI_BREAK:
250	case TI_PROTO_ERR:
251	case TI_NAK:
252	case TI_ACK_NAK_TO:
253	case TF_SMP_XMIT_RCV_ERR:
254	case TC_ATA_R_ERR_RECV:
255		ts->resp = SAS_TASK_COMPLETE;
256		ts->stat = SAS_INTERRUPTED;
257		break;
258	case TF_BREAK:
259	case TU_BREAK:
260	case TU_ACK_NAK_TO:
261	case TF_SMPRSP_TO:
262		ts->resp = SAS_TASK_UNDELIVERED;
263		ts->stat = SAS_DEV_NO_RESPONSE;
264		break;
265	case TF_NAK_RECV:
266		ts->resp = SAS_TASK_COMPLETE;
267		ts->stat = SAS_NAK_R_ERR;
268		break;
269	case TA_I_T_NEXUS_LOSS:
270		opcode = dl->status_block[0];
271		goto Again;
272	case TF_INV_CONN_HANDLE:
273		ts->resp = SAS_TASK_UNDELIVERED;
274		ts->stat = SAS_DEVICE_UNKNOWN;
275		break;
276	case TF_REQUESTED_N_PENDING:
277		ts->resp = SAS_TASK_UNDELIVERED;
278		ts->stat = SAS_PENDING;
279		break;
280	case TC_TASK_CLEARED:
281	case TA_ON_REQ:
282		ts->resp = SAS_TASK_COMPLETE;
283		ts->stat = SAS_ABORTED_TASK;
284		break;
285
286	case TF_NO_SMP_CONN:
287	case TF_TMF_NO_CTX:
288	case TF_TMF_NO_TAG:
289	case TF_TMF_TAG_FREE:
290	case TF_TMF_TASK_DONE:
291	case TF_TMF_NO_CONN_HANDLE:
292	case TF_IRTT_TO:
293	case TF_IU_SHORT:
294	case TF_DATA_OFFS_ERR:
295		ts->resp = SAS_TASK_UNDELIVERED;
296		ts->stat = SAS_DEV_NO_RESPONSE;
297		break;
298
299	case TC_LINK_ADM_RESP:
300	case TC_CONTROL_PHY:
301	case TC_RESUME:
302	case TC_PARTIAL_SG_LIST:
303	default:
304		ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode);
305		break;
306	}
307
308	switch (task->task_proto) {
309	case SAS_PROTOCOL_SATA:
310	case SAS_PROTOCOL_STP:
311		asd_unbuild_ata_ascb(ascb);
312		break;
313	case SAS_PROTOCOL_SMP:
314		asd_unbuild_smp_ascb(ascb);
315		break;
316	case SAS_PROTOCOL_SSP:
317		asd_unbuild_ssp_ascb(ascb);
318		break;
319	default:
320		break;
321	}
322
323	spin_lock_irqsave(&task->task_state_lock, flags);
324	task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
325	task->task_state_flags |= SAS_TASK_STATE_DONE;
326	if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
327		struct completion *completion = ascb->completion;
328		spin_unlock_irqrestore(&task->task_state_lock, flags);
329		ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
330			    "stat 0x%x but aborted by upper layer!\n",
331			    task, opcode, ts->resp, ts->stat);
332		if (completion)
333			complete(completion);
334	} else {
335		spin_unlock_irqrestore(&task->task_state_lock, flags);
336		task->lldd_task = NULL;
337		asd_ascb_free(ascb);
338		mb();
339		task->task_done(task);
340	}
341}
342
343/* ---------- ATA ---------- */
344
345static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
346			      gfp_t gfp_flags)
347{
348	struct domain_device *dev = task->dev;
349	struct scb *scb;
350	u8     flags;
351	int    res = 0;
352
353	scb = ascb->scb;
354
355	if (unlikely(task->ata_task.device_control_reg_update))
356		scb->header.opcode = CONTROL_ATA_DEV;
357	else if (dev->sata_dev.class == ATA_DEV_ATAPI)
358		scb->header.opcode = INITIATE_ATAPI_TASK;
359	else
360		scb->header.opcode = INITIATE_ATA_TASK;
361
362	scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
363	if (dev->port->oob_mode == SAS_OOB_MODE)
364		scb->ata_task.proto_conn_rate |= dev->linkrate;
365
366	scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
367	scb->ata_task.fis = task->ata_task.fis;
368	if (likely(!task->ata_task.device_control_reg_update))
369		scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
370	scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
371	if (dev->sata_dev.class == ATA_DEV_ATAPI)
372		memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
373		       16);
374	scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
375	scb->ata_task.conn_handle = cpu_to_le16(
376		(u16)(unsigned long)dev->lldd_dev);
377
378	if (likely(!task->ata_task.device_control_reg_update)) {
379		flags = 0;
380		if (task->ata_task.dma_xfer)
381			flags |= DATA_XFER_MODE_DMA;
382		if (task->ata_task.use_ncq &&
383		    dev->sata_dev.class != ATA_DEV_ATAPI)
384			flags |= ATA_Q_TYPE_NCQ;
385		flags |= data_dir_flags[task->data_dir];
386		scb->ata_task.ata_flags = flags;
387
388		scb->ata_task.retry_count = task->ata_task.retry_count;
389
390		flags = 0;
391		if (task->ata_task.set_affil_pol)
392			flags |= SET_AFFIL_POLICY;
393		if (task->ata_task.stp_affil_pol)
394			flags |= STP_AFFIL_POLICY;
395		scb->ata_task.flags = flags;
396	}
397	ascb->tasklet_complete = asd_task_tasklet_complete;
398
399	if (likely(!task->ata_task.device_control_reg_update))
400		res = asd_map_scatterlist(task, scb->ata_task.sg_element,
401					  gfp_flags);
402
403	return res;
404}
405
406static void asd_unbuild_ata_ascb(struct asd_ascb *a)
407{
408	asd_unmap_scatterlist(a);
409}
410
411/* ---------- SMP ---------- */
412
413static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
414			      gfp_t gfp_flags)
415{
416	struct asd_ha_struct *asd_ha = ascb->ha;
417	struct domain_device *dev = task->dev;
418	struct scb *scb;
419
420	dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
421		   DMA_TO_DEVICE);
422	dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
423		   DMA_FROM_DEVICE);
424
425	scb = ascb->scb;
426
427	scb->header.opcode = INITIATE_SMP_TASK;
428
429	scb->smp_task.proto_conn_rate = dev->linkrate;
430
431	scb->smp_task.smp_req.bus_addr =
432		cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
433	scb->smp_task.smp_req.size =
434		cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
435
436	scb->smp_task.smp_resp.bus_addr =
437		cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
438	scb->smp_task.smp_resp.size =
439		cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
440
441	scb->smp_task.sister_scb = cpu_to_le16(0xFFFF);
442	scb->smp_task.conn_handle = cpu_to_le16((u16)
443						(unsigned long)dev->lldd_dev);
444
445	ascb->tasklet_complete = asd_task_tasklet_complete;
446
447	return 0;
448}
449
450static void asd_unbuild_smp_ascb(struct asd_ascb *a)
451{
452	struct sas_task *task = a->uldd_task;
453
454	BUG_ON(!task);
455	dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
456		     DMA_TO_DEVICE);
457	dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
458		     DMA_FROM_DEVICE);
459}
460
461/* ---------- SSP ---------- */
462
463static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
464			      gfp_t gfp_flags)
465{
466	struct domain_device *dev = task->dev;
467	struct scb *scb;
468	int    res = 0;
469
470	scb = ascb->scb;
471
472	scb->header.opcode = INITIATE_SSP_TASK;
473
474	scb->ssp_task.proto_conn_rate  = (1 << 4); /* SSP */
475	scb->ssp_task.proto_conn_rate |= dev->linkrate;
476	scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
477	scb->ssp_task.ssp_frame.frame_type = SSP_DATA;
478	memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr,
479	       HASHED_SAS_ADDR_SIZE);
480	memcpy(scb->ssp_task.ssp_frame.hashed_src_addr,
481	       dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
482	scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
483
484	memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
485	if (task->ssp_task.enable_first_burst)
486		scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
487	scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
488	scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
489	memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd,
490	       task->ssp_task.cmd->cmd_len);
491
492	scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF);
493	scb->ssp_task.conn_handle = cpu_to_le16(
494		(u16)(unsigned long)dev->lldd_dev);
495	scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
496	scb->ssp_task.retry_count = scb->ssp_task.retry_count;
497
498	ascb->tasklet_complete = asd_task_tasklet_complete;
499
500	res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags);
501
502	return res;
503}
504
505static void asd_unbuild_ssp_ascb(struct asd_ascb *a)
506{
507	asd_unmap_scatterlist(a);
508}
509
510/* ---------- Execute Task ---------- */
511
512static int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
513{
514	int res = 0;
515	unsigned long flags;
516
517	spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
518	if ((asd_ha->seq.can_queue - num) < 0)
519		res = -SAS_QUEUE_FULL;
520	else
521		asd_ha->seq.can_queue -= num;
522	spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
523
524	return res;
525}
526
527int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
528{
529	int res = 0;
530	LIST_HEAD(alist);
531	struct sas_task *t = task;
532	struct asd_ascb *ascb = NULL, *a;
533	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
534
535	res = asd_can_queue(asd_ha, 1);
536	if (res)
537		return res;
538
539	res = 1;
540	ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
541	if (res) {
542		res = -ENOMEM;
543		goto out_err;
544	}
545
546	__list_add(&alist, ascb->list.prev, &ascb->list);
547	list_for_each_entry(a, &alist, list) {
548		a->uldd_task = t;
549		t->lldd_task = a;
550		break;
551	}
552	list_for_each_entry(a, &alist, list) {
553		t = a->uldd_task;
554		a->uldd_timer = 1;
555		if (t->task_proto & SAS_PROTOCOL_STP)
556			t->task_proto = SAS_PROTOCOL_STP;
557		switch (t->task_proto) {
558		case SAS_PROTOCOL_SATA:
559		case SAS_PROTOCOL_STP:
560			res = asd_build_ata_ascb(a, t, gfp_flags);
561			break;
562		case SAS_PROTOCOL_SMP:
563			res = asd_build_smp_ascb(a, t, gfp_flags);
564			break;
565		case SAS_PROTOCOL_SSP:
566			res = asd_build_ssp_ascb(a, t, gfp_flags);
567			break;
568		default:
569			asd_printk("unknown sas_task proto: 0x%x\n",
570				   t->task_proto);
571			res = -ENOMEM;
572			break;
573		}
574		if (res)
575			goto out_err_unmap;
576	}
577	list_del_init(&alist);
578
579	res = asd_post_ascb_list(asd_ha, ascb, 1);
580	if (unlikely(res)) {
581		a = NULL;
582		__list_add(&alist, ascb->list.prev, &ascb->list);
583		goto out_err_unmap;
584	}
585
586	return 0;
587out_err_unmap:
588	{
589		struct asd_ascb *b = a;
590		list_for_each_entry(a, &alist, list) {
591			if (a == b)
592				break;
593			t = a->uldd_task;
594			switch (t->task_proto) {
595			case SAS_PROTOCOL_SATA:
596			case SAS_PROTOCOL_STP:
597				asd_unbuild_ata_ascb(a);
598				break;
599			case SAS_PROTOCOL_SMP:
600				asd_unbuild_smp_ascb(a);
601				break;
602			case SAS_PROTOCOL_SSP:
603				asd_unbuild_ssp_ascb(a);
604				break;
605			default:
606				break;
607			}
608			t->lldd_task = NULL;
609		}
610	}
611	list_del_init(&alist);
612out_err:
613	if (ascb)
614		asd_ascb_free_list(ascb);
615	asd_can_dequeue(asd_ha, 1);
616	return res;
617}