Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Aic94xx Task Management Functions
  3 *
  4 * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  6 *
  7 * This file is licensed under GPLv2.
  8 *
  9 * This file is part of the aic94xx driver.
 10 *
 11 * The aic94xx driver is free software; you can redistribute it and/or
 12 * modify it under the terms of the GNU General Public License as
 13 * published by the Free Software Foundation; version 2 of the
 14 * License.
 15 *
 16 * The aic94xx driver is distributed in the hope that it will be useful,
 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 19 * General Public License for more details.
 20 *
 21 * You should have received a copy of the GNU General Public License
 22 * along with the aic94xx driver; if not, write to the Free Software
 23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 24 *
 25 */
 26
 27#include <linux/spinlock.h>
 28#include <linux/gfp.h>
 29#include "aic94xx.h"
 30#include "aic94xx_sas.h"
 31#include "aic94xx_hwi.h"
 32
 33/* ---------- Internal enqueue ---------- */
 34
 35static int asd_enqueue_internal(struct asd_ascb *ascb,
 36		void (*tasklet_complete)(struct asd_ascb *,
 37					 struct done_list_struct *),
 38				void (*timed_out)(unsigned long))
 39{
 40	int res;
 41
 42	ascb->tasklet_complete = tasklet_complete;
 43	ascb->uldd_timer = 1;
 44
 45	ascb->timer.data = (unsigned long) ascb;
 46	ascb->timer.function = timed_out;
 47	ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
 48
 49	add_timer(&ascb->timer);
 50
 51	res = asd_post_ascb_list(ascb->ha, ascb, 1);
 52	if (unlikely(res))
 53		del_timer(&ascb->timer);
 54	return res;
 55}
 56
 57/* ---------- CLEAR NEXUS ---------- */
 58
 59struct tasklet_completion_status {
 60	int	dl_opcode;
 61	int	tmf_state;
 62	u8	tag_valid:1;
 63	__be16	tag;
 64};
 65
 66#define DECLARE_TCS(tcs) \
 67	struct tasklet_completion_status tcs = { \
 68		.dl_opcode = 0, \
 69		.tmf_state = 0, \
 70		.tag_valid = 0, \
 71		.tag = 0, \
 72	}
 73
 74
 75static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
 76					     struct done_list_struct *dl)
 77{
 78	struct tasklet_completion_status *tcs = ascb->uldd_task;
 79	ASD_DPRINTK("%s: here\n", __func__);
 80	if (!del_timer(&ascb->timer)) {
 81		ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
 82		return;
 83	}
 84	ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
 85	tcs->dl_opcode = dl->opcode;
 86	complete(ascb->completion);
 87	asd_ascb_free(ascb);
 88}
 89
 90static void asd_clear_nexus_timedout(unsigned long data)
 91{
 92	struct asd_ascb *ascb = (void *)data;
 93	struct tasklet_completion_status *tcs = ascb->uldd_task;
 94
 95	ASD_DPRINTK("%s: here\n", __func__);
 96	tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
 97	complete(ascb->completion);
 98}
 99
100#define CLEAR_NEXUS_PRE         \
101	struct asd_ascb *ascb; \
102	struct scb *scb; \
103	int res; \
104	DECLARE_COMPLETION_ONSTACK(completion); \
105	DECLARE_TCS(tcs); \
106		\
107	ASD_DPRINTK("%s: PRE\n", __func__); \
108        res = 1;                \
109	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
110	if (!ascb)              \
111		return -ENOMEM; \
112                                \
113	ascb->completion = &completion; \
114	ascb->uldd_task = &tcs; \
115	scb = ascb->scb;        \
116	scb->header.opcode = CLEAR_NEXUS
117
118#define CLEAR_NEXUS_POST        \
119	ASD_DPRINTK("%s: POST\n", __func__); \
120	res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
121				   asd_clear_nexus_timedout);              \
122	if (res)                \
123		goto out_err;   \
124	ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
125	wait_for_completion(&completion); \
126	res = tcs.dl_opcode; \
127	if (res == TC_NO_ERROR) \
128		res = TMF_RESP_FUNC_COMPLETE;   \
129	return res; \
130out_err:                        \
131	asd_ascb_free(ascb);    \
132	return res
133
134int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
135{
136	struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
137
138	CLEAR_NEXUS_PRE;
139	scb->clear_nexus.nexus = NEXUS_ADAPTER;
140	CLEAR_NEXUS_POST;
141}
142
143int asd_clear_nexus_port(struct asd_sas_port *port)
144{
145	struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
146
147	CLEAR_NEXUS_PRE;
148	scb->clear_nexus.nexus = NEXUS_PORT;
149	scb->clear_nexus.conn_mask = port->phy_mask;
150	CLEAR_NEXUS_POST;
151}
152
153enum clear_nexus_phase {
154	NEXUS_PHASE_PRE,
155	NEXUS_PHASE_POST,
156	NEXUS_PHASE_RESUME,
157};
158
159static int asd_clear_nexus_I_T(struct domain_device *dev,
160			       enum clear_nexus_phase phase)
161{
162	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
163
164	CLEAR_NEXUS_PRE;
165	scb->clear_nexus.nexus = NEXUS_I_T;
166	switch (phase) {
167	case NEXUS_PHASE_PRE:
168		scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
169		break;
170	case NEXUS_PHASE_POST:
171		scb->clear_nexus.flags = SEND_Q | NOTINQ;
172		break;
173	case NEXUS_PHASE_RESUME:
174		scb->clear_nexus.flags = RESUME_TX;
175	}
176	scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
177						   dev->lldd_dev);
178	CLEAR_NEXUS_POST;
179}
180
181int asd_I_T_nexus_reset(struct domain_device *dev)
182{
183	int res, tmp_res, i;
184	struct sas_phy *phy = sas_find_local_phy(dev);
185	/* Standard mandates link reset for ATA  (type 0) and
186	 * hard reset for SSP (type 1) */
187	int reset_type = (dev->dev_type == SATA_DEV ||
188			  (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
189
190	asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
191	/* send a hard reset */
192	ASD_DPRINTK("sending %s reset to %s\n",
193		    reset_type ? "hard" : "soft", dev_name(&phy->dev));
194	res = sas_phy_reset(phy, reset_type);
195	if (res == TMF_RESP_FUNC_COMPLETE) {
196		/* wait for the maximum settle time */
197		msleep(500);
198		/* clear all outstanding commands (keep nexus suspended) */
199		asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
200	}
201	for (i = 0 ; i < 3; i++) {
202		tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
203		if (tmp_res == TC_RESUME)
204			return res;
205		msleep(500);
206	}
207
208	/* This is a bit of a problem:  the sequencer is still suspended
209	 * and is refusing to resume.  Hope it will resume on a bigger hammer
210	 * or the disk is lost */
211	dev_printk(KERN_ERR, &phy->dev,
212		   "Failed to resume nexus after reset 0x%x\n", tmp_res);
213
214	return TMF_RESP_FUNC_FAILED;
 
 
 
215}
216
217static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
218{
219	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
220
221	CLEAR_NEXUS_PRE;
222	scb->clear_nexus.nexus = NEXUS_I_T_L;
223	scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
224	memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
225	scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
226						   dev->lldd_dev);
227	CLEAR_NEXUS_POST;
228}
229
230static int asd_clear_nexus_tag(struct sas_task *task)
231{
232	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
233	struct asd_ascb *tascb = task->lldd_task;
234
235	CLEAR_NEXUS_PRE;
236	scb->clear_nexus.nexus = NEXUS_TAG;
237	memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
238	scb->clear_nexus.ssp_task.tag = tascb->tag;
239	if (task->dev->tproto)
240		scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
241							  task->dev->lldd_dev);
242	CLEAR_NEXUS_POST;
243}
244
245static int asd_clear_nexus_index(struct sas_task *task)
246{
247	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
248	struct asd_ascb *tascb = task->lldd_task;
249
250	CLEAR_NEXUS_PRE;
251	scb->clear_nexus.nexus = NEXUS_TRANS_CX;
252	if (task->dev->tproto)
253		scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
254							  task->dev->lldd_dev);
255	scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
256	CLEAR_NEXUS_POST;
257}
258
259/* ---------- TMFs ---------- */
260
261static void asd_tmf_timedout(unsigned long data)
262{
263	struct asd_ascb *ascb = (void *) data;
264	struct tasklet_completion_status *tcs = ascb->uldd_task;
265
266	ASD_DPRINTK("tmf timed out\n");
267	tcs->tmf_state = TMF_RESP_FUNC_FAILED;
268	complete(ascb->completion);
269}
270
271static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
272				    struct done_list_struct *dl)
273{
274	struct asd_ha_struct *asd_ha = ascb->ha;
275	unsigned long flags;
276	struct tc_resp_sb_struct {
277		__le16 index_escb;
278		u8     len_lsb;
279		u8     flags;
280	} __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
281
282	int  edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
283	struct asd_ascb *escb;
284	struct asd_dma_tok *edb;
285	struct ssp_frame_hdr *fh;
286	struct ssp_response_iu   *ru;
287	int res = TMF_RESP_FUNC_FAILED;
288
289	ASD_DPRINTK("tmf resp tasklet\n");
290
291	spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
292	escb = asd_tc_index_find(&asd_ha->seq,
293				 (int)le16_to_cpu(resp_sb->index_escb));
294	spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
295
296	if (!escb) {
297		ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
298		return res;
299	}
300
301	edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
302	ascb->tag = *(__be16 *)(edb->vaddr+4);
303	fh = edb->vaddr + 16;
304	ru = edb->vaddr + 16 + sizeof(*fh);
305	res = ru->status;
306	if (ru->datapres == 1)	  /* Response data present */
307		res = ru->resp_data[3];
308#if 0
309	ascb->tag = fh->tag;
310#endif
311	ascb->tag_valid = 1;
312
313	asd_invalidate_edb(escb, edb_id);
314	return res;
315}
316
317static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
318				     struct done_list_struct *dl)
319{
320	struct tasklet_completion_status *tcs;
321
322	if (!del_timer(&ascb->timer))
323		return;
324
325	tcs = ascb->uldd_task;
326	ASD_DPRINTK("tmf tasklet complete\n");
327
328	tcs->dl_opcode = dl->opcode;
329
330	if (dl->opcode == TC_SSP_RESP) {
331		tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
332		tcs->tag_valid = ascb->tag_valid;
333		tcs->tag = ascb->tag;
334	}
335
336	complete(ascb->completion);
337	asd_ascb_free(ascb);
338}
339
340static int asd_clear_nexus(struct sas_task *task)
341{
342	int res = TMF_RESP_FUNC_FAILED;
343	int leftover;
344	struct asd_ascb *tascb = task->lldd_task;
345	DECLARE_COMPLETION_ONSTACK(completion);
346	unsigned long flags;
347
348	tascb->completion = &completion;
349
350	ASD_DPRINTK("task not done, clearing nexus\n");
351	if (tascb->tag_valid)
352		res = asd_clear_nexus_tag(task);
353	else
354		res = asd_clear_nexus_index(task);
355	leftover = wait_for_completion_timeout(&completion,
356					       AIC94XX_SCB_TIMEOUT);
357	tascb->completion = NULL;
358	ASD_DPRINTK("came back from clear nexus\n");
359	spin_lock_irqsave(&task->task_state_lock, flags);
360	if (leftover < 1)
361		res = TMF_RESP_FUNC_FAILED;
362	if (task->task_state_flags & SAS_TASK_STATE_DONE)
363		res = TMF_RESP_FUNC_COMPLETE;
364	spin_unlock_irqrestore(&task->task_state_lock, flags);
365
366	return res;
367}
368
369/**
370 * asd_abort_task -- ABORT TASK TMF
371 * @task: the task to be aborted
372 *
373 * Before calling ABORT TASK the task state flags should be ORed with
374 * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
375 * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
376 *
377 * Implements the ABORT TASK TMF, I_T_L_Q nexus.
378 * Returns: SAS TMF responses (see sas_task.h),
379 *          -ENOMEM,
380 *          -SAS_QUEUE_FULL.
381 *
382 * When ABORT TASK returns, the caller of ABORT TASK checks first the
383 * task->task_state_flags, and then the return value of ABORT TASK.
384 *
385 * If the task has task state bit SAS_TASK_STATE_DONE set, then the
386 * task was completed successfully prior to it being aborted.  The
387 * caller of ABORT TASK has responsibility to call task->task_done()
388 * xor free the task, depending on their framework.  The return code
389 * is TMF_RESP_FUNC_FAILED in this case.
390 *
391 * Else the SAS_TASK_STATE_DONE bit is not set,
392 * 	If the return code is TMF_RESP_FUNC_COMPLETE, then
393 * 		the task was aborted successfully.  The caller of
394 * 		ABORT TASK has responsibility to call task->task_done()
395 *              to finish the task, xor free the task depending on their
396 *		framework.
397 *	else
398 * 		the ABORT TASK returned some kind of error. The task
399 *              was _not_ cancelled.  Nothing can be assumed.
400 *		The caller of ABORT TASK may wish to retry.
401 */
402int asd_abort_task(struct sas_task *task)
403{
404	struct asd_ascb *tascb = task->lldd_task;
405	struct asd_ha_struct *asd_ha = tascb->ha;
406	int res = 1;
407	unsigned long flags;
408	struct asd_ascb *ascb = NULL;
409	struct scb *scb;
410	int leftover;
411	DECLARE_TCS(tcs);
412	DECLARE_COMPLETION_ONSTACK(completion);
413	DECLARE_COMPLETION_ONSTACK(tascb_completion);
414
415	tascb->completion = &tascb_completion;
416
417	spin_lock_irqsave(&task->task_state_lock, flags);
418	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
419		spin_unlock_irqrestore(&task->task_state_lock, flags);
420		res = TMF_RESP_FUNC_COMPLETE;
421		ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
422		goto out_done;
423	}
424	spin_unlock_irqrestore(&task->task_state_lock, flags);
425
426	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
427	if (!ascb)
428		return -ENOMEM;
429
430	ascb->uldd_task = &tcs;
431	ascb->completion = &completion;
432	scb = ascb->scb;
433	scb->header.opcode = SCB_ABORT_TASK;
434
435	switch (task->task_proto) {
436	case SAS_PROTOCOL_SATA:
437	case SAS_PROTOCOL_STP:
438		scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
439		break;
440	case SAS_PROTOCOL_SSP:
441		scb->abort_task.proto_conn_rate  = (1 << 4); /* SSP */
442		scb->abort_task.proto_conn_rate |= task->dev->linkrate;
443		break;
444	case SAS_PROTOCOL_SMP:
445		break;
446	default:
447		break;
448	}
449
450	if (task->task_proto == SAS_PROTOCOL_SSP) {
451		scb->abort_task.ssp_frame.frame_type = SSP_TASK;
452		memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
453		       task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
454		memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
455		       task->dev->port->ha->hashed_sas_addr,
456		       HASHED_SAS_ADDR_SIZE);
457		scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
458
459		memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
460		scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
461		scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
462	}
463
464	scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
465	scb->abort_task.conn_handle = cpu_to_le16(
466		(u16)(unsigned long)task->dev->lldd_dev);
467	scb->abort_task.retry_count = 1;
468	scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
469	scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
470
471	res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
472				   asd_tmf_timedout);
473	if (res)
474		goto out_free;
475	wait_for_completion(&completion);
476	ASD_DPRINTK("tmf came back\n");
477
478	tascb->tag = tcs.tag;
479	tascb->tag_valid = tcs.tag_valid;
480
481	spin_lock_irqsave(&task->task_state_lock, flags);
482	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
483		spin_unlock_irqrestore(&task->task_state_lock, flags);
484		res = TMF_RESP_FUNC_COMPLETE;
485		ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
486		goto out_done;
487	}
488	spin_unlock_irqrestore(&task->task_state_lock, flags);
489
490	if (tcs.dl_opcode == TC_SSP_RESP) {
491		/* The task to be aborted has been sent to the device.
492		 * We got a Response IU for the ABORT TASK TMF. */
493		if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
494			res = asd_clear_nexus(task);
495		else
496			res = tcs.tmf_state;
497	} else if (tcs.dl_opcode == TC_NO_ERROR &&
498		   tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
499		/* timeout */
500		res = TMF_RESP_FUNC_FAILED;
501	} else {
502		/* In the following we assume that the managing layer
503		 * will _never_ make a mistake, when issuing ABORT
504		 * TASK.
505		 */
506		switch (tcs.dl_opcode) {
507		default:
508			res = asd_clear_nexus(task);
509			/* fallthrough */
510		case TC_NO_ERROR:
511			break;
512			/* The task hasn't been sent to the device xor
513			 * we never got a (sane) Response IU for the
514			 * ABORT TASK TMF.
515			 */
516		case TF_NAK_RECV:
517			res = TMF_RESP_INVALID_FRAME;
518			break;
519		case TF_TMF_TASK_DONE:	/* done but not reported yet */
520			res = TMF_RESP_FUNC_FAILED;
521			leftover =
522				wait_for_completion_timeout(&tascb_completion,
523							  AIC94XX_SCB_TIMEOUT);
524			spin_lock_irqsave(&task->task_state_lock, flags);
525			if (leftover < 1)
526				res = TMF_RESP_FUNC_FAILED;
527			if (task->task_state_flags & SAS_TASK_STATE_DONE)
528				res = TMF_RESP_FUNC_COMPLETE;
529			spin_unlock_irqrestore(&task->task_state_lock, flags);
530			break;
531		case TF_TMF_NO_TAG:
532		case TF_TMF_TAG_FREE: /* the tag is in the free list */
533		case TF_TMF_NO_CONN_HANDLE: /* no such device */
534			res = TMF_RESP_FUNC_COMPLETE;
535			break;
536		case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
537			res = TMF_RESP_FUNC_ESUPP;
538			break;
539		}
540	}
541 out_done:
542	tascb->completion = NULL;
543	if (res == TMF_RESP_FUNC_COMPLETE) {
544		task->lldd_task = NULL;
545		mb();
546		asd_ascb_free(tascb);
547	}
548	ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
549	return res;
550
551 out_free:
552	asd_ascb_free(ascb);
553	ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
554	return res;
555}
556
557/**
558 * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
559 * @dev: pointer to struct domain_device of interest
560 * @lun: pointer to u8[8] which is the LUN
561 * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
562 * @index: the transaction context of the task to be queried if QT TMF
563 *
564 * This function is used to send ABORT TASK SET, CLEAR ACA,
565 * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
566 *
567 * No SCBs should be queued to the I_T_L nexus when this SCB is
568 * pending.
569 *
570 * Returns: TMF response code (see sas_task.h or the SAS spec)
571 */
572static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
573				int tmf, int index)
574{
575	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
576	struct asd_ascb *ascb;
577	int res = 1;
578	struct scb *scb;
579	DECLARE_COMPLETION_ONSTACK(completion);
580	DECLARE_TCS(tcs);
581
582	if (!(dev->tproto & SAS_PROTOCOL_SSP))
583		return TMF_RESP_FUNC_ESUPP;
584
585	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
586	if (!ascb)
587		return -ENOMEM;
588
589	ascb->completion = &completion;
590	ascb->uldd_task = &tcs;
591	scb = ascb->scb;
592
593	if (tmf == TMF_QUERY_TASK)
594		scb->header.opcode = QUERY_SSP_TASK;
595	else
596		scb->header.opcode = INITIATE_SSP_TMF;
597
598	scb->ssp_tmf.proto_conn_rate  = (1 << 4); /* SSP */
599	scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
600	/* SSP frame header */
601	scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
602	memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
603	       dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
604	memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
605	       dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
606	scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
607	/* SSP Task IU */
608	memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
609	scb->ssp_tmf.ssp_task.tmf = tmf;
610
611	scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
612	scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
613					      dev->lldd_dev);
614	scb->ssp_tmf.retry_count = 1;
615	scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
616	if (tmf == TMF_QUERY_TASK)
617		scb->ssp_tmf.index = cpu_to_le16(index);
618
619	res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
620				   asd_tmf_timedout);
621	if (res)
622		goto out_err;
623	wait_for_completion(&completion);
624
625	switch (tcs.dl_opcode) {
626	case TC_NO_ERROR:
627		res = TMF_RESP_FUNC_COMPLETE;
628		break;
629	case TF_NAK_RECV:
630		res = TMF_RESP_INVALID_FRAME;
631		break;
632	case TF_TMF_TASK_DONE:
633		res = TMF_RESP_FUNC_FAILED;
634		break;
635	case TF_TMF_NO_TAG:
636	case TF_TMF_TAG_FREE: /* the tag is in the free list */
637	case TF_TMF_NO_CONN_HANDLE: /* no such device */
638		res = TMF_RESP_FUNC_COMPLETE;
639		break;
640	case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
641		res = TMF_RESP_FUNC_ESUPP;
642		break;
643	default:
644		/* Allow TMF response codes to propagate upwards */
645		res = tcs.dl_opcode;
646		break;
647	}
648	return res;
649out_err:
650	asd_ascb_free(ascb);
651	return res;
652}
653
654int asd_abort_task_set(struct domain_device *dev, u8 *lun)
655{
656	int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
657
658	if (res == TMF_RESP_FUNC_COMPLETE)
659		asd_clear_nexus_I_T_L(dev, lun);
660	return res;
661}
662
663int asd_clear_aca(struct domain_device *dev, u8 *lun)
664{
665	int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
666
667	if (res == TMF_RESP_FUNC_COMPLETE)
668		asd_clear_nexus_I_T_L(dev, lun);
669	return res;
670}
671
672int asd_clear_task_set(struct domain_device *dev, u8 *lun)
673{
674	int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
675
676	if (res == TMF_RESP_FUNC_COMPLETE)
677		asd_clear_nexus_I_T_L(dev, lun);
678	return res;
679}
680
681int asd_lu_reset(struct domain_device *dev, u8 *lun)
682{
683	int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
684
685	if (res == TMF_RESP_FUNC_COMPLETE)
686		asd_clear_nexus_I_T_L(dev, lun);
687	return res;
688}
689
690/**
691 * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
692 * task: pointer to sas_task struct of interest
693 *
694 * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
695 * or TMF_RESP_FUNC_SUCC if the task is in the task set.
696 *
697 * Normally the management layer sets the task to aborted state,
698 * and then calls query task and then abort task.
699 */
700int asd_query_task(struct sas_task *task)
701{
702	struct asd_ascb *ascb = task->lldd_task;
703	int index;
704
705	if (ascb) {
706		index = ascb->tc_index;
707		return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
708					    TMF_QUERY_TASK, index);
709	}
710	return TMF_RESP_FUNC_COMPLETE;
711}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Aic94xx Task Management Functions
  4 *
  5 * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8
  9#include <linux/spinlock.h>
 10#include <linux/gfp.h>
 11#include "aic94xx.h"
 12#include "aic94xx_sas.h"
 13#include "aic94xx_hwi.h"
 14
 15/* ---------- Internal enqueue ---------- */
 16
 17static int asd_enqueue_internal(struct asd_ascb *ascb,
 18		void (*tasklet_complete)(struct asd_ascb *,
 19					 struct done_list_struct *),
 20				void (*timed_out)(struct timer_list *t))
 21{
 22	int res;
 23
 24	ascb->tasklet_complete = tasklet_complete;
 25	ascb->uldd_timer = 1;
 26
 
 27	ascb->timer.function = timed_out;
 28	ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
 29
 30	add_timer(&ascb->timer);
 31
 32	res = asd_post_ascb_list(ascb->ha, ascb, 1);
 33	if (unlikely(res))
 34		del_timer(&ascb->timer);
 35	return res;
 36}
 37
 38/* ---------- CLEAR NEXUS ---------- */
 39
 40struct tasklet_completion_status {
 41	int	dl_opcode;
 42	int	tmf_state;
 43	u8	tag_valid:1;
 44	__be16	tag;
 45};
 46
 47#define DECLARE_TCS(tcs) \
 48	struct tasklet_completion_status tcs = { \
 49		.dl_opcode = 0, \
 50		.tmf_state = 0, \
 51		.tag_valid = 0, \
 52		.tag = 0, \
 53	}
 54
 55
 56static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
 57					     struct done_list_struct *dl)
 58{
 59	struct tasklet_completion_status *tcs = ascb->uldd_task;
 60	ASD_DPRINTK("%s: here\n", __func__);
 61	if (!del_timer(&ascb->timer)) {
 62		ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
 63		return;
 64	}
 65	ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
 66	tcs->dl_opcode = dl->opcode;
 67	complete(ascb->completion);
 68	asd_ascb_free(ascb);
 69}
 70
 71static void asd_clear_nexus_timedout(struct timer_list *t)
 72{
 73	struct asd_ascb *ascb = from_timer(ascb, t, timer);
 74	struct tasklet_completion_status *tcs = ascb->uldd_task;
 75
 76	ASD_DPRINTK("%s: here\n", __func__);
 77	tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
 78	complete(ascb->completion);
 79}
 80
 81#define CLEAR_NEXUS_PRE         \
 82	struct asd_ascb *ascb; \
 83	struct scb *scb; \
 84	int res; \
 85	DECLARE_COMPLETION_ONSTACK(completion); \
 86	DECLARE_TCS(tcs); \
 87		\
 88	ASD_DPRINTK("%s: PRE\n", __func__); \
 89        res = 1;                \
 90	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
 91	if (!ascb)              \
 92		return -ENOMEM; \
 93                                \
 94	ascb->completion = &completion; \
 95	ascb->uldd_task = &tcs; \
 96	scb = ascb->scb;        \
 97	scb->header.opcode = CLEAR_NEXUS
 98
 99#define CLEAR_NEXUS_POST        \
100	ASD_DPRINTK("%s: POST\n", __func__); \
101	res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
102				   asd_clear_nexus_timedout);              \
103	if (res)                \
104		goto out_err;   \
105	ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
106	wait_for_completion(&completion); \
107	res = tcs.dl_opcode; \
108	if (res == TC_NO_ERROR) \
109		res = TMF_RESP_FUNC_COMPLETE;   \
110	return res; \
111out_err:                        \
112	asd_ascb_free(ascb);    \
113	return res
114
115int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
116{
117	struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
118
119	CLEAR_NEXUS_PRE;
120	scb->clear_nexus.nexus = NEXUS_ADAPTER;
121	CLEAR_NEXUS_POST;
122}
123
124int asd_clear_nexus_port(struct asd_sas_port *port)
125{
126	struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
127
128	CLEAR_NEXUS_PRE;
129	scb->clear_nexus.nexus = NEXUS_PORT;
130	scb->clear_nexus.conn_mask = port->phy_mask;
131	CLEAR_NEXUS_POST;
132}
133
134enum clear_nexus_phase {
135	NEXUS_PHASE_PRE,
136	NEXUS_PHASE_POST,
137	NEXUS_PHASE_RESUME,
138};
139
140static int asd_clear_nexus_I_T(struct domain_device *dev,
141			       enum clear_nexus_phase phase)
142{
143	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
144
145	CLEAR_NEXUS_PRE;
146	scb->clear_nexus.nexus = NEXUS_I_T;
147	switch (phase) {
148	case NEXUS_PHASE_PRE:
149		scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
150		break;
151	case NEXUS_PHASE_POST:
152		scb->clear_nexus.flags = SEND_Q | NOTINQ;
153		break;
154	case NEXUS_PHASE_RESUME:
155		scb->clear_nexus.flags = RESUME_TX;
156	}
157	scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
158						   dev->lldd_dev);
159	CLEAR_NEXUS_POST;
160}
161
162int asd_I_T_nexus_reset(struct domain_device *dev)
163{
164	int res, tmp_res, i;
165	struct sas_phy *phy = sas_get_local_phy(dev);
166	/* Standard mandates link reset for ATA  (type 0) and
167	 * hard reset for SSP (type 1) */
168	int reset_type = (dev->dev_type == SAS_SATA_DEV ||
169			  (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
170
171	asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
172	/* send a hard reset */
173	ASD_DPRINTK("sending %s reset to %s\n",
174		    reset_type ? "hard" : "soft", dev_name(&phy->dev));
175	res = sas_phy_reset(phy, reset_type);
176	if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
177		/* wait for the maximum settle time */
178		msleep(500);
179		/* clear all outstanding commands (keep nexus suspended) */
180		asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
181	}
182	for (i = 0 ; i < 3; i++) {
183		tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
184		if (tmp_res == TC_RESUME)
185			goto out;
186		msleep(500);
187	}
188
189	/* This is a bit of a problem:  the sequencer is still suspended
190	 * and is refusing to resume.  Hope it will resume on a bigger hammer
191	 * or the disk is lost */
192	dev_printk(KERN_ERR, &phy->dev,
193		   "Failed to resume nexus after reset 0x%x\n", tmp_res);
194
195	res = TMF_RESP_FUNC_FAILED;
196 out:
197	sas_put_local_phy(phy);
198	return res;
199}
200
201static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
202{
203	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
204
205	CLEAR_NEXUS_PRE;
206	scb->clear_nexus.nexus = NEXUS_I_T_L;
207	scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
208	memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
209	scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
210						   dev->lldd_dev);
211	CLEAR_NEXUS_POST;
212}
213
214static int asd_clear_nexus_tag(struct sas_task *task)
215{
216	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
217	struct asd_ascb *tascb = task->lldd_task;
218
219	CLEAR_NEXUS_PRE;
220	scb->clear_nexus.nexus = NEXUS_TAG;
221	memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
222	scb->clear_nexus.ssp_task.tag = tascb->tag;
223	if (task->dev->tproto)
224		scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
225							  task->dev->lldd_dev);
226	CLEAR_NEXUS_POST;
227}
228
229static int asd_clear_nexus_index(struct sas_task *task)
230{
231	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
232	struct asd_ascb *tascb = task->lldd_task;
233
234	CLEAR_NEXUS_PRE;
235	scb->clear_nexus.nexus = NEXUS_TRANS_CX;
236	if (task->dev->tproto)
237		scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
238							  task->dev->lldd_dev);
239	scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
240	CLEAR_NEXUS_POST;
241}
242
243/* ---------- TMFs ---------- */
244
245static void asd_tmf_timedout(struct timer_list *t)
246{
247	struct asd_ascb *ascb = from_timer(ascb, t, timer);
248	struct tasklet_completion_status *tcs = ascb->uldd_task;
249
250	ASD_DPRINTK("tmf timed out\n");
251	tcs->tmf_state = TMF_RESP_FUNC_FAILED;
252	complete(ascb->completion);
253}
254
255static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
256				    struct done_list_struct *dl)
257{
258	struct asd_ha_struct *asd_ha = ascb->ha;
259	unsigned long flags;
260	struct tc_resp_sb_struct {
261		__le16 index_escb;
262		u8     len_lsb;
263		u8     flags;
264	} __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
265
266	int  edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
267	struct asd_ascb *escb;
268	struct asd_dma_tok *edb;
269	struct ssp_frame_hdr *fh;
270	struct ssp_response_iu   *ru;
271	int res = TMF_RESP_FUNC_FAILED;
272
273	ASD_DPRINTK("tmf resp tasklet\n");
274
275	spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
276	escb = asd_tc_index_find(&asd_ha->seq,
277				 (int)le16_to_cpu(resp_sb->index_escb));
278	spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
279
280	if (!escb) {
281		ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
282		return res;
283	}
284
285	edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
286	ascb->tag = *(__be16 *)(edb->vaddr+4);
287	fh = edb->vaddr + 16;
288	ru = edb->vaddr + 16 + sizeof(*fh);
289	res = ru->status;
290	if (ru->datapres == 1)	  /* Response data present */
291		res = ru->resp_data[3];
292#if 0
293	ascb->tag = fh->tag;
294#endif
295	ascb->tag_valid = 1;
296
297	asd_invalidate_edb(escb, edb_id);
298	return res;
299}
300
301static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
302				     struct done_list_struct *dl)
303{
304	struct tasklet_completion_status *tcs;
305
306	if (!del_timer(&ascb->timer))
307		return;
308
309	tcs = ascb->uldd_task;
310	ASD_DPRINTK("tmf tasklet complete\n");
311
312	tcs->dl_opcode = dl->opcode;
313
314	if (dl->opcode == TC_SSP_RESP) {
315		tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
316		tcs->tag_valid = ascb->tag_valid;
317		tcs->tag = ascb->tag;
318	}
319
320	complete(ascb->completion);
321	asd_ascb_free(ascb);
322}
323
324static int asd_clear_nexus(struct sas_task *task)
325{
326	int res = TMF_RESP_FUNC_FAILED;
327	int leftover;
328	struct asd_ascb *tascb = task->lldd_task;
329	DECLARE_COMPLETION_ONSTACK(completion);
330	unsigned long flags;
331
332	tascb->completion = &completion;
333
334	ASD_DPRINTK("task not done, clearing nexus\n");
335	if (tascb->tag_valid)
336		res = asd_clear_nexus_tag(task);
337	else
338		res = asd_clear_nexus_index(task);
339	leftover = wait_for_completion_timeout(&completion,
340					       AIC94XX_SCB_TIMEOUT);
341	tascb->completion = NULL;
342	ASD_DPRINTK("came back from clear nexus\n");
343	spin_lock_irqsave(&task->task_state_lock, flags);
344	if (leftover < 1)
345		res = TMF_RESP_FUNC_FAILED;
346	if (task->task_state_flags & SAS_TASK_STATE_DONE)
347		res = TMF_RESP_FUNC_COMPLETE;
348	spin_unlock_irqrestore(&task->task_state_lock, flags);
349
350	return res;
351}
352
353/**
354 * asd_abort_task -- ABORT TASK TMF
355 * @task: the task to be aborted
356 *
357 * Before calling ABORT TASK the task state flags should be ORed with
358 * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
359 * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
360 *
361 * Implements the ABORT TASK TMF, I_T_L_Q nexus.
362 * Returns: SAS TMF responses (see sas_task.h),
363 *          -ENOMEM,
364 *          -SAS_QUEUE_FULL.
365 *
366 * When ABORT TASK returns, the caller of ABORT TASK checks first the
367 * task->task_state_flags, and then the return value of ABORT TASK.
368 *
369 * If the task has task state bit SAS_TASK_STATE_DONE set, then the
370 * task was completed successfully prior to it being aborted.  The
371 * caller of ABORT TASK has responsibility to call task->task_done()
372 * xor free the task, depending on their framework.  The return code
373 * is TMF_RESP_FUNC_FAILED in this case.
374 *
375 * Else the SAS_TASK_STATE_DONE bit is not set,
376 * 	If the return code is TMF_RESP_FUNC_COMPLETE, then
377 * 		the task was aborted successfully.  The caller of
378 * 		ABORT TASK has responsibility to call task->task_done()
379 *              to finish the task, xor free the task depending on their
380 *		framework.
381 *	else
382 * 		the ABORT TASK returned some kind of error. The task
383 *              was _not_ cancelled.  Nothing can be assumed.
384 *		The caller of ABORT TASK may wish to retry.
385 */
386int asd_abort_task(struct sas_task *task)
387{
388	struct asd_ascb *tascb = task->lldd_task;
389	struct asd_ha_struct *asd_ha = tascb->ha;
390	int res = 1;
391	unsigned long flags;
392	struct asd_ascb *ascb = NULL;
393	struct scb *scb;
394	int leftover;
395	DECLARE_TCS(tcs);
396	DECLARE_COMPLETION_ONSTACK(completion);
397	DECLARE_COMPLETION_ONSTACK(tascb_completion);
398
399	tascb->completion = &tascb_completion;
400
401	spin_lock_irqsave(&task->task_state_lock, flags);
402	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
403		spin_unlock_irqrestore(&task->task_state_lock, flags);
404		res = TMF_RESP_FUNC_COMPLETE;
405		ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
406		goto out_done;
407	}
408	spin_unlock_irqrestore(&task->task_state_lock, flags);
409
410	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
411	if (!ascb)
412		return -ENOMEM;
413
414	ascb->uldd_task = &tcs;
415	ascb->completion = &completion;
416	scb = ascb->scb;
417	scb->header.opcode = SCB_ABORT_TASK;
418
419	switch (task->task_proto) {
420	case SAS_PROTOCOL_SATA:
421	case SAS_PROTOCOL_STP:
422		scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
423		break;
424	case SAS_PROTOCOL_SSP:
425		scb->abort_task.proto_conn_rate  = (1 << 4); /* SSP */
426		scb->abort_task.proto_conn_rate |= task->dev->linkrate;
427		break;
428	case SAS_PROTOCOL_SMP:
429		break;
430	default:
431		break;
432	}
433
434	if (task->task_proto == SAS_PROTOCOL_SSP) {
435		scb->abort_task.ssp_frame.frame_type = SSP_TASK;
436		memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
437		       task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
438		memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
439		       task->dev->port->ha->hashed_sas_addr,
440		       HASHED_SAS_ADDR_SIZE);
441		scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
442
443		memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
444		scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
445		scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
446	}
447
448	scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
449	scb->abort_task.conn_handle = cpu_to_le16(
450		(u16)(unsigned long)task->dev->lldd_dev);
451	scb->abort_task.retry_count = 1;
452	scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
453	scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
454
455	res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
456				   asd_tmf_timedout);
457	if (res)
458		goto out_free;
459	wait_for_completion(&completion);
460	ASD_DPRINTK("tmf came back\n");
461
462	tascb->tag = tcs.tag;
463	tascb->tag_valid = tcs.tag_valid;
464
465	spin_lock_irqsave(&task->task_state_lock, flags);
466	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
467		spin_unlock_irqrestore(&task->task_state_lock, flags);
468		res = TMF_RESP_FUNC_COMPLETE;
469		ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
470		goto out_done;
471	}
472	spin_unlock_irqrestore(&task->task_state_lock, flags);
473
474	if (tcs.dl_opcode == TC_SSP_RESP) {
475		/* The task to be aborted has been sent to the device.
476		 * We got a Response IU for the ABORT TASK TMF. */
477		if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
478			res = asd_clear_nexus(task);
479		else
480			res = tcs.tmf_state;
481	} else if (tcs.dl_opcode == TC_NO_ERROR &&
482		   tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
483		/* timeout */
484		res = TMF_RESP_FUNC_FAILED;
485	} else {
486		/* In the following we assume that the managing layer
487		 * will _never_ make a mistake, when issuing ABORT
488		 * TASK.
489		 */
490		switch (tcs.dl_opcode) {
491		default:
492			res = asd_clear_nexus(task);
493			fallthrough;
494		case TC_NO_ERROR:
495			break;
496			/* The task hasn't been sent to the device xor
497			 * we never got a (sane) Response IU for the
498			 * ABORT TASK TMF.
499			 */
500		case TF_NAK_RECV:
501			res = TMF_RESP_INVALID_FRAME;
502			break;
503		case TF_TMF_TASK_DONE:	/* done but not reported yet */
504			res = TMF_RESP_FUNC_FAILED;
505			leftover =
506				wait_for_completion_timeout(&tascb_completion,
507							  AIC94XX_SCB_TIMEOUT);
508			spin_lock_irqsave(&task->task_state_lock, flags);
509			if (leftover < 1)
510				res = TMF_RESP_FUNC_FAILED;
511			if (task->task_state_flags & SAS_TASK_STATE_DONE)
512				res = TMF_RESP_FUNC_COMPLETE;
513			spin_unlock_irqrestore(&task->task_state_lock, flags);
514			break;
515		case TF_TMF_NO_TAG:
516		case TF_TMF_TAG_FREE: /* the tag is in the free list */
517		case TF_TMF_NO_CONN_HANDLE: /* no such device */
518			res = TMF_RESP_FUNC_COMPLETE;
519			break;
520		case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
521			res = TMF_RESP_FUNC_ESUPP;
522			break;
523		}
524	}
525 out_done:
526	tascb->completion = NULL;
527	if (res == TMF_RESP_FUNC_COMPLETE) {
528		task->lldd_task = NULL;
529		mb();
530		asd_ascb_free(tascb);
531	}
532	ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
533	return res;
534
535 out_free:
536	asd_ascb_free(ascb);
537	ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
538	return res;
539}
540
541/**
542 * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
543 * @dev: pointer to struct domain_device of interest
544 * @lun: pointer to u8[8] which is the LUN
545 * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
546 * @index: the transaction context of the task to be queried if QT TMF
547 *
548 * This function is used to send ABORT TASK SET, CLEAR ACA,
549 * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
550 *
551 * No SCBs should be queued to the I_T_L nexus when this SCB is
552 * pending.
553 *
554 * Returns: TMF response code (see sas_task.h or the SAS spec)
555 */
556static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
557				int tmf, int index)
558{
559	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
560	struct asd_ascb *ascb;
561	int res = 1;
562	struct scb *scb;
563	DECLARE_COMPLETION_ONSTACK(completion);
564	DECLARE_TCS(tcs);
565
566	if (!(dev->tproto & SAS_PROTOCOL_SSP))
567		return TMF_RESP_FUNC_ESUPP;
568
569	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
570	if (!ascb)
571		return -ENOMEM;
572
573	ascb->completion = &completion;
574	ascb->uldd_task = &tcs;
575	scb = ascb->scb;
576
577	if (tmf == TMF_QUERY_TASK)
578		scb->header.opcode = QUERY_SSP_TASK;
579	else
580		scb->header.opcode = INITIATE_SSP_TMF;
581
582	scb->ssp_tmf.proto_conn_rate  = (1 << 4); /* SSP */
583	scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
584	/* SSP frame header */
585	scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
586	memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
587	       dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
588	memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
589	       dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
590	scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
591	/* SSP Task IU */
592	memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
593	scb->ssp_tmf.ssp_task.tmf = tmf;
594
595	scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
596	scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
597					      dev->lldd_dev);
598	scb->ssp_tmf.retry_count = 1;
599	scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
600	if (tmf == TMF_QUERY_TASK)
601		scb->ssp_tmf.index = cpu_to_le16(index);
602
603	res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
604				   asd_tmf_timedout);
605	if (res)
606		goto out_err;
607	wait_for_completion(&completion);
608
609	switch (tcs.dl_opcode) {
610	case TC_NO_ERROR:
611		res = TMF_RESP_FUNC_COMPLETE;
612		break;
613	case TF_NAK_RECV:
614		res = TMF_RESP_INVALID_FRAME;
615		break;
616	case TF_TMF_TASK_DONE:
617		res = TMF_RESP_FUNC_FAILED;
618		break;
619	case TF_TMF_NO_TAG:
620	case TF_TMF_TAG_FREE: /* the tag is in the free list */
621	case TF_TMF_NO_CONN_HANDLE: /* no such device */
622		res = TMF_RESP_FUNC_COMPLETE;
623		break;
624	case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
625		res = TMF_RESP_FUNC_ESUPP;
626		break;
627	default:
628		/* Allow TMF response codes to propagate upwards */
629		res = tcs.dl_opcode;
630		break;
631	}
632	return res;
633out_err:
634	asd_ascb_free(ascb);
635	return res;
636}
637
638int asd_abort_task_set(struct domain_device *dev, u8 *lun)
639{
640	int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
641
642	if (res == TMF_RESP_FUNC_COMPLETE)
643		asd_clear_nexus_I_T_L(dev, lun);
644	return res;
645}
646
647int asd_clear_aca(struct domain_device *dev, u8 *lun)
648{
649	int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
650
651	if (res == TMF_RESP_FUNC_COMPLETE)
652		asd_clear_nexus_I_T_L(dev, lun);
653	return res;
654}
655
656int asd_clear_task_set(struct domain_device *dev, u8 *lun)
657{
658	int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
659
660	if (res == TMF_RESP_FUNC_COMPLETE)
661		asd_clear_nexus_I_T_L(dev, lun);
662	return res;
663}
664
665int asd_lu_reset(struct domain_device *dev, u8 *lun)
666{
667	int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
668
669	if (res == TMF_RESP_FUNC_COMPLETE)
670		asd_clear_nexus_I_T_L(dev, lun);
671	return res;
672}
673
674/**
675 * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
676 * @task: pointer to sas_task struct of interest
677 *
678 * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
679 * or TMF_RESP_FUNC_SUCC if the task is in the task set.
680 *
681 * Normally the management layer sets the task to aborted state,
682 * and then calls query task and then abort task.
683 */
684int asd_query_task(struct sas_task *task)
685{
686	struct asd_ascb *ascb = task->lldd_task;
687	int index;
688
689	if (ascb) {
690		index = ascb->tc_index;
691		return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
692					    TMF_QUERY_TASK, index);
693	}
694	return TMF_RESP_FUNC_COMPLETE;
695}