Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Serial Attached SCSI (SAS) class SCSI Host glue.
4 *
5 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 */
8
9#include <linux/kthread.h>
10#include <linux/firmware.h>
11#include <linux/export.h>
12#include <linux/ctype.h>
13#include <linux/kernel.h>
14
15#include "sas_internal.h"
16
17#include <scsi/scsi_host.h>
18#include <scsi/scsi_device.h>
19#include <scsi/scsi_tcq.h>
20#include <scsi/scsi.h>
21#include <scsi/scsi_eh.h>
22#include <scsi/scsi_transport.h>
23#include <scsi/scsi_transport_sas.h>
24#include <scsi/sas_ata.h>
25#include "scsi_sas_internal.h"
26#include "scsi_transport_api.h"
27#include "scsi_priv.h"
28
29#include <linux/err.h>
30#include <linux/blkdev.h>
31#include <linux/freezer.h>
32#include <linux/gfp.h>
33#include <linux/scatterlist.h>
34#include <linux/libata.h>
35
36/* record final status and free the task */
37static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
38{
39 struct task_status_struct *ts = &task->task_status;
40 enum scsi_host_status hs = DID_OK;
41 enum exec_status stat = SAS_SAM_STAT_GOOD;
42
43 if (ts->resp == SAS_TASK_UNDELIVERED) {
44 /* transport error */
45 hs = DID_NO_CONNECT;
46 } else { /* ts->resp == SAS_TASK_COMPLETE */
47 /* task delivered, what happened afterwards? */
48 switch (ts->stat) {
49 case SAS_DEV_NO_RESPONSE:
50 case SAS_INTERRUPTED:
51 case SAS_PHY_DOWN:
52 case SAS_NAK_R_ERR:
53 case SAS_OPEN_TO:
54 hs = DID_NO_CONNECT;
55 break;
56 case SAS_DATA_UNDERRUN:
57 scsi_set_resid(sc, ts->residual);
58 if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
59 hs = DID_ERROR;
60 break;
61 case SAS_DATA_OVERRUN:
62 hs = DID_ERROR;
63 break;
64 case SAS_QUEUE_FULL:
65 hs = DID_SOFT_ERROR; /* retry */
66 break;
67 case SAS_DEVICE_UNKNOWN:
68 hs = DID_BAD_TARGET;
69 break;
70 case SAS_OPEN_REJECT:
71 if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
72 hs = DID_SOFT_ERROR; /* retry */
73 else
74 hs = DID_ERROR;
75 break;
76 case SAS_PROTO_RESPONSE:
77 pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
78 task->dev->port->ha->sas_ha_name);
79 break;
80 case SAS_ABORTED_TASK:
81 hs = DID_ABORT;
82 break;
83 case SAS_SAM_STAT_CHECK_CONDITION:
84 memcpy(sc->sense_buffer, ts->buf,
85 min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
86 stat = SAS_SAM_STAT_CHECK_CONDITION;
87 break;
88 default:
89 stat = ts->stat;
90 break;
91 }
92 }
93
94 sc->result = (hs << 16) | stat;
95 ASSIGN_SAS_TASK(sc, NULL);
96 sas_free_task(task);
97}
98
99static void sas_scsi_task_done(struct sas_task *task)
100{
101 struct scsi_cmnd *sc = task->uldd_task;
102 struct domain_device *dev = task->dev;
103 struct sas_ha_struct *ha = dev->port->ha;
104 unsigned long flags;
105
106 spin_lock_irqsave(&dev->done_lock, flags);
107 if (test_bit(SAS_HA_FROZEN, &ha->state))
108 task = NULL;
109 else
110 ASSIGN_SAS_TASK(sc, NULL);
111 spin_unlock_irqrestore(&dev->done_lock, flags);
112
113 if (unlikely(!task)) {
114 /* task will be completed by the error handler */
115 pr_debug("task done but aborted\n");
116 return;
117 }
118
119 if (unlikely(!sc)) {
120 pr_debug("task_done called with non existing SCSI cmnd!\n");
121 sas_free_task(task);
122 return;
123 }
124
125 sas_end_task(sc, task);
126 scsi_done(sc);
127}
128
129static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
130 struct domain_device *dev,
131 gfp_t gfp_flags)
132{
133 struct sas_task *task = sas_alloc_task(gfp_flags);
134 struct scsi_lun lun;
135
136 if (!task)
137 return NULL;
138
139 task->uldd_task = cmd;
140 ASSIGN_SAS_TASK(cmd, task);
141
142 task->dev = dev;
143 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
144
145 int_to_scsilun(cmd->device->lun, &lun);
146 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
147 task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
148 task->ssp_task.cmd = cmd;
149
150 task->scatter = scsi_sglist(cmd);
151 task->num_scatter = scsi_sg_count(cmd);
152 task->total_xfer_len = scsi_bufflen(cmd);
153 task->data_dir = cmd->sc_data_direction;
154
155 task->task_done = sas_scsi_task_done;
156
157 return task;
158}
159
160int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
161{
162 struct sas_internal *i = to_sas_internal(host->transportt);
163 struct domain_device *dev = cmd_to_domain_dev(cmd);
164 struct sas_task *task;
165 int res = 0;
166
167 /* If the device fell off, no sense in issuing commands */
168 if (test_bit(SAS_DEV_GONE, &dev->state)) {
169 cmd->result = DID_BAD_TARGET << 16;
170 goto out_done;
171 }
172
173 if (dev_is_sata(dev)) {
174 spin_lock_irq(dev->sata_dev.ap->lock);
175 res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
176 spin_unlock_irq(dev->sata_dev.ap->lock);
177 return res;
178 }
179
180 task = sas_create_task(cmd, dev, GFP_ATOMIC);
181 if (!task)
182 return SCSI_MLQUEUE_HOST_BUSY;
183
184 res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
185 if (res)
186 goto out_free_task;
187 return 0;
188
189out_free_task:
190 pr_debug("lldd_execute_task returned: %d\n", res);
191 ASSIGN_SAS_TASK(cmd, NULL);
192 sas_free_task(task);
193 if (res == -SAS_QUEUE_FULL)
194 cmd->result = DID_SOFT_ERROR << 16; /* retry */
195 else
196 cmd->result = DID_ERROR << 16;
197out_done:
198 scsi_done(cmd);
199 return 0;
200}
201EXPORT_SYMBOL_GPL(sas_queuecommand);
202
203static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
204{
205 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
206 struct domain_device *dev = cmd_to_domain_dev(cmd);
207 struct sas_task *task = TO_SAS_TASK(cmd);
208
209 /* At this point, we only get called following an actual abort
210 * of the task, so we should be guaranteed not to be racing with
211 * any completions from the LLD. Task is freed after this.
212 */
213 sas_end_task(cmd, task);
214
215 if (dev_is_sata(dev)) {
216 /* defer commands to libata so that libata EH can
217 * handle ata qcs correctly
218 */
219 list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
220 return;
221 }
222
223 /* now finish the command and move it on to the error
224 * handler done list, this also takes it off the
225 * error handler pending list.
226 */
227 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
228}
229
230static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
231{
232 struct scsi_cmnd *cmd, *n;
233
234 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
235 if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
236 cmd->device->lun == my_cmd->device->lun)
237 sas_eh_finish_cmd(cmd);
238 }
239}
240
241static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
242 struct domain_device *dev)
243{
244 struct scsi_cmnd *cmd, *n;
245
246 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
247 struct domain_device *x = cmd_to_domain_dev(cmd);
248
249 if (x == dev)
250 sas_eh_finish_cmd(cmd);
251 }
252}
253
254static void sas_scsi_clear_queue_port(struct list_head *error_q,
255 struct asd_sas_port *port)
256{
257 struct scsi_cmnd *cmd, *n;
258
259 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
260 struct domain_device *dev = cmd_to_domain_dev(cmd);
261 struct asd_sas_port *x = dev->port;
262
263 if (x == port)
264 sas_eh_finish_cmd(cmd);
265 }
266}
267
268enum task_disposition {
269 TASK_IS_DONE,
270 TASK_IS_ABORTED,
271 TASK_IS_AT_LU,
272 TASK_IS_NOT_AT_LU,
273 TASK_ABORT_FAILED,
274};
275
276static enum task_disposition sas_scsi_find_task(struct sas_task *task)
277{
278 unsigned long flags;
279 int i, res;
280 struct sas_internal *si =
281 to_sas_internal(task->dev->port->ha->shost->transportt);
282
283 for (i = 0; i < 5; i++) {
284 pr_notice("%s: aborting task 0x%p\n", __func__, task);
285 res = si->dft->lldd_abort_task(task);
286
287 spin_lock_irqsave(&task->task_state_lock, flags);
288 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
289 spin_unlock_irqrestore(&task->task_state_lock, flags);
290 pr_debug("%s: task 0x%p is done\n", __func__, task);
291 return TASK_IS_DONE;
292 }
293 spin_unlock_irqrestore(&task->task_state_lock, flags);
294
295 if (res == TMF_RESP_FUNC_COMPLETE) {
296 pr_notice("%s: task 0x%p is aborted\n",
297 __func__, task);
298 return TASK_IS_ABORTED;
299 } else if (si->dft->lldd_query_task) {
300 pr_notice("%s: querying task 0x%p\n", __func__, task);
301 res = si->dft->lldd_query_task(task);
302 switch (res) {
303 case TMF_RESP_FUNC_SUCC:
304 pr_notice("%s: task 0x%p at LU\n", __func__,
305 task);
306 return TASK_IS_AT_LU;
307 case TMF_RESP_FUNC_COMPLETE:
308 pr_notice("%s: task 0x%p not at LU\n",
309 __func__, task);
310 return TASK_IS_NOT_AT_LU;
311 case TMF_RESP_FUNC_FAILED:
312 pr_notice("%s: task 0x%p failed to abort\n",
313 __func__, task);
314 return TASK_ABORT_FAILED;
315 default:
316 pr_notice("%s: task 0x%p result code %d not handled\n",
317 __func__, task, res);
318 }
319 }
320 }
321 return TASK_ABORT_FAILED;
322}
323
324static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
325{
326 int res = TMF_RESP_FUNC_FAILED;
327 struct scsi_lun lun;
328 struct sas_internal *i =
329 to_sas_internal(dev->port->ha->shost->transportt);
330
331 int_to_scsilun(cmd->device->lun, &lun);
332
333 pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
334 SAS_ADDR(dev->sas_addr),
335 cmd->device->lun);
336
337 if (i->dft->lldd_abort_task_set)
338 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
339
340 if (res == TMF_RESP_FUNC_FAILED) {
341 if (i->dft->lldd_clear_task_set)
342 res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
343 }
344
345 if (res == TMF_RESP_FUNC_FAILED) {
346 if (i->dft->lldd_lu_reset)
347 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
348 }
349
350 return res;
351}
352
353static int sas_recover_I_T(struct domain_device *dev)
354{
355 int res = TMF_RESP_FUNC_FAILED;
356 struct sas_internal *i =
357 to_sas_internal(dev->port->ha->shost->transportt);
358
359 pr_notice("I_T nexus reset for dev %016llx\n",
360 SAS_ADDR(dev->sas_addr));
361
362 if (i->dft->lldd_I_T_nexus_reset)
363 res = i->dft->lldd_I_T_nexus_reset(dev);
364
365 return res;
366}
367
368/* take a reference on the last known good phy for this device */
369struct sas_phy *sas_get_local_phy(struct domain_device *dev)
370{
371 struct sas_ha_struct *ha = dev->port->ha;
372 struct sas_phy *phy;
373 unsigned long flags;
374
375 /* a published domain device always has a valid phy, it may be
376 * stale, but it is never NULL
377 */
378 BUG_ON(!dev->phy);
379
380 spin_lock_irqsave(&ha->phy_port_lock, flags);
381 phy = dev->phy;
382 get_device(&phy->dev);
383 spin_unlock_irqrestore(&ha->phy_port_lock, flags);
384
385 return phy;
386}
387EXPORT_SYMBOL_GPL(sas_get_local_phy);
388
389static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun)
390{
391 struct sas_ha_struct *ha = dev->port->ha;
392 int scheduled = 0, tries = 100;
393
394 /* ata: promote lun reset to bus reset */
395 if (dev_is_sata(dev)) {
396 sas_ata_schedule_reset(dev);
397 return SUCCESS;
398 }
399
400 while (!scheduled && tries--) {
401 spin_lock_irq(&ha->lock);
402 if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
403 !test_bit(reset_type, &dev->state)) {
404 scheduled = 1;
405 ha->eh_active++;
406 list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
407 set_bit(SAS_DEV_EH_PENDING, &dev->state);
408 set_bit(reset_type, &dev->state);
409 int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
410 scsi_schedule_eh(ha->shost);
411 }
412 spin_unlock_irq(&ha->lock);
413
414 if (scheduled)
415 return SUCCESS;
416 }
417
418 pr_warn("%s reset of %s failed\n",
419 reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
420 dev_name(&dev->rphy->dev));
421
422 return FAILED;
423}
424
425int sas_eh_abort_handler(struct scsi_cmnd *cmd)
426{
427 int res = TMF_RESP_FUNC_FAILED;
428 struct sas_task *task = TO_SAS_TASK(cmd);
429 struct Scsi_Host *host = cmd->device->host;
430 struct domain_device *dev = cmd_to_domain_dev(cmd);
431 struct sas_internal *i = to_sas_internal(host->transportt);
432 unsigned long flags;
433
434 if (!i->dft->lldd_abort_task)
435 return FAILED;
436
437 spin_lock_irqsave(host->host_lock, flags);
438 /* We cannot do async aborts for SATA devices */
439 if (dev_is_sata(dev) && !host->host_eh_scheduled) {
440 spin_unlock_irqrestore(host->host_lock, flags);
441 return FAILED;
442 }
443 spin_unlock_irqrestore(host->host_lock, flags);
444
445 if (task)
446 res = i->dft->lldd_abort_task(task);
447 else
448 pr_notice("no task to abort\n");
449 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
450 return SUCCESS;
451
452 return FAILED;
453}
454EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
455
456/* Attempt to send a LUN reset message to a device */
457int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
458{
459 int res;
460 struct scsi_lun lun;
461 struct Scsi_Host *host = cmd->device->host;
462 struct domain_device *dev = cmd_to_domain_dev(cmd);
463 struct sas_internal *i = to_sas_internal(host->transportt);
464
465 if (current != host->ehandler)
466 return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun);
467
468 int_to_scsilun(cmd->device->lun, &lun);
469
470 if (!i->dft->lldd_lu_reset)
471 return FAILED;
472
473 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
474 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
475 return SUCCESS;
476
477 return FAILED;
478}
479EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
480
481int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
482{
483 int res;
484 struct Scsi_Host *host = cmd->device->host;
485 struct domain_device *dev = cmd_to_domain_dev(cmd);
486 struct sas_internal *i = to_sas_internal(host->transportt);
487
488 if (current != host->ehandler)
489 return sas_queue_reset(dev, SAS_DEV_RESET, 0);
490
491 if (!i->dft->lldd_I_T_nexus_reset)
492 return FAILED;
493
494 res = i->dft->lldd_I_T_nexus_reset(dev);
495 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
496 res == -ENODEV)
497 return SUCCESS;
498
499 return FAILED;
500}
501EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
502
503/* Try to reset a device */
504static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
505{
506 int res;
507 struct Scsi_Host *shost = cmd->device->host;
508
509 if (!shost->hostt->eh_device_reset_handler)
510 goto try_target_reset;
511
512 res = shost->hostt->eh_device_reset_handler(cmd);
513 if (res == SUCCESS)
514 return res;
515
516try_target_reset:
517 if (shost->hostt->eh_target_reset_handler)
518 return shost->hostt->eh_target_reset_handler(cmd);
519
520 return FAILED;
521}
522
523static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
524{
525 struct scsi_cmnd *cmd, *n;
526 enum task_disposition res = TASK_IS_DONE;
527 int tmf_resp, need_reset;
528 struct sas_internal *i = to_sas_internal(shost->transportt);
529 unsigned long flags;
530 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
531 LIST_HEAD(done);
532
533 /* clean out any commands that won the completion vs eh race */
534 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
535 struct domain_device *dev = cmd_to_domain_dev(cmd);
536 struct sas_task *task;
537
538 spin_lock_irqsave(&dev->done_lock, flags);
539 /* by this point the lldd has either observed
540 * SAS_HA_FROZEN and is leaving the task alone, or has
541 * won the race with eh and decided to complete it
542 */
543 task = TO_SAS_TASK(cmd);
544 spin_unlock_irqrestore(&dev->done_lock, flags);
545
546 if (!task)
547 list_move_tail(&cmd->eh_entry, &done);
548 }
549
550 Again:
551 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
552 struct sas_task *task = TO_SAS_TASK(cmd);
553
554 list_del_init(&cmd->eh_entry);
555
556 spin_lock_irqsave(&task->task_state_lock, flags);
557 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
558 spin_unlock_irqrestore(&task->task_state_lock, flags);
559
560 if (need_reset) {
561 pr_notice("%s: task 0x%p requests reset\n",
562 __func__, task);
563 goto reset;
564 }
565
566 pr_debug("trying to find task 0x%p\n", task);
567 res = sas_scsi_find_task(task);
568
569 switch (res) {
570 case TASK_IS_DONE:
571 pr_notice("%s: task 0x%p is done\n", __func__,
572 task);
573 sas_eh_finish_cmd(cmd);
574 continue;
575 case TASK_IS_ABORTED:
576 pr_notice("%s: task 0x%p is aborted\n",
577 __func__, task);
578 sas_eh_finish_cmd(cmd);
579 continue;
580 case TASK_IS_AT_LU:
581 pr_info("task 0x%p is at LU: lu recover\n", task);
582 reset:
583 tmf_resp = sas_recover_lu(task->dev, cmd);
584 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
585 pr_notice("dev %016llx LU 0x%llx is recovered\n",
586 SAS_ADDR(task->dev),
587 cmd->device->lun);
588 sas_eh_finish_cmd(cmd);
589 sas_scsi_clear_queue_lu(work_q, cmd);
590 goto Again;
591 }
592 fallthrough;
593 case TASK_IS_NOT_AT_LU:
594 case TASK_ABORT_FAILED:
595 pr_notice("task 0x%p is not at LU: I_T recover\n",
596 task);
597 tmf_resp = sas_recover_I_T(task->dev);
598 if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
599 tmf_resp == -ENODEV) {
600 struct domain_device *dev = task->dev;
601 pr_notice("I_T %016llx recovered\n",
602 SAS_ADDR(task->dev->sas_addr));
603 sas_eh_finish_cmd(cmd);
604 sas_scsi_clear_queue_I_T(work_q, dev);
605 goto Again;
606 }
607 /* Hammer time :-) */
608 try_to_reset_cmd_device(cmd);
609 if (i->dft->lldd_clear_nexus_port) {
610 struct asd_sas_port *port = task->dev->port;
611 pr_debug("clearing nexus for port:%d\n",
612 port->id);
613 res = i->dft->lldd_clear_nexus_port(port);
614 if (res == TMF_RESP_FUNC_COMPLETE) {
615 pr_notice("clear nexus port:%d succeeded\n",
616 port->id);
617 sas_eh_finish_cmd(cmd);
618 sas_scsi_clear_queue_port(work_q,
619 port);
620 goto Again;
621 }
622 }
623 if (i->dft->lldd_clear_nexus_ha) {
624 pr_debug("clear nexus ha\n");
625 res = i->dft->lldd_clear_nexus_ha(ha);
626 if (res == TMF_RESP_FUNC_COMPLETE) {
627 pr_notice("clear nexus ha succeeded\n");
628 sas_eh_finish_cmd(cmd);
629 goto clear_q;
630 }
631 }
632 /* If we are here -- this means that no amount
633 * of effort could recover from errors. Quite
634 * possibly the HA just disappeared.
635 */
636 pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
637 SAS_ADDR(task->dev->sas_addr),
638 cmd->device->lun);
639
640 sas_eh_finish_cmd(cmd);
641 goto clear_q;
642 }
643 }
644 out:
645 list_splice_tail(&done, work_q);
646 list_splice_tail_init(&ha->eh_ata_q, work_q);
647 return;
648
649 clear_q:
650 pr_debug("--- Exit %s -- clear_q\n", __func__);
651 list_for_each_entry_safe(cmd, n, work_q, eh_entry)
652 sas_eh_finish_cmd(cmd);
653 goto out;
654}
655
656static void sas_eh_handle_resets(struct Scsi_Host *shost)
657{
658 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
659 struct sas_internal *i = to_sas_internal(shost->transportt);
660
661 /* handle directed resets to sas devices */
662 spin_lock_irq(&ha->lock);
663 while (!list_empty(&ha->eh_dev_q)) {
664 struct domain_device *dev;
665 struct ssp_device *ssp;
666
667 ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
668 list_del_init(&ssp->eh_list_node);
669 dev = container_of(ssp, typeof(*dev), ssp_dev);
670 kref_get(&dev->kref);
671 WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
672
673 spin_unlock_irq(&ha->lock);
674
675 if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
676 i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
677
678 if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
679 i->dft->lldd_I_T_nexus_reset(dev);
680
681 sas_put_device(dev);
682 spin_lock_irq(&ha->lock);
683 clear_bit(SAS_DEV_EH_PENDING, &dev->state);
684 ha->eh_active--;
685 }
686 spin_unlock_irq(&ha->lock);
687}
688
689
690void sas_scsi_recover_host(struct Scsi_Host *shost)
691{
692 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
693 LIST_HEAD(eh_work_q);
694 int tries = 0;
695 bool retry;
696
697retry:
698 tries++;
699 retry = true;
700 spin_lock_irq(shost->host_lock);
701 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
702 spin_unlock_irq(shost->host_lock);
703
704 pr_notice("Enter %s busy: %d failed: %d\n",
705 __func__, scsi_host_busy(shost), shost->host_failed);
706 /*
707 * Deal with commands that still have SAS tasks (i.e. they didn't
708 * complete via the normal sas_task completion mechanism),
709 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
710 */
711 set_bit(SAS_HA_FROZEN, &ha->state);
712 sas_eh_handle_sas_errors(shost, &eh_work_q);
713 clear_bit(SAS_HA_FROZEN, &ha->state);
714 if (list_empty(&eh_work_q))
715 goto out;
716
717 /*
718 * Now deal with SCSI commands that completed ok but have a an error
719 * code (and hopefully sense data) attached. This is roughly what
720 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
721 * command we see here has no sas_task and is thus unknown to the HA.
722 */
723 sas_ata_eh(shost, &eh_work_q);
724 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
725 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
726
727out:
728 sas_eh_handle_resets(shost);
729
730 /* now link into libata eh --- if we have any ata devices */
731 sas_ata_strategy_handler(shost);
732
733 scsi_eh_flush_done_q(&ha->eh_done_q);
734
735 /* check if any new eh work was scheduled during the last run */
736 spin_lock_irq(&ha->lock);
737 if (ha->eh_active == 0) {
738 shost->host_eh_scheduled = 0;
739 retry = false;
740 }
741 spin_unlock_irq(&ha->lock);
742
743 if (retry)
744 goto retry;
745
746 pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
747 __func__, scsi_host_busy(shost),
748 shost->host_failed, tries);
749}
750
751int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
752{
753 struct domain_device *dev = sdev_to_domain_dev(sdev);
754
755 if (dev_is_sata(dev))
756 return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
757
758 return -EINVAL;
759}
760EXPORT_SYMBOL_GPL(sas_ioctl);
761
762struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
763{
764 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
765 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
766 struct domain_device *found_dev = NULL;
767 int i;
768 unsigned long flags;
769
770 spin_lock_irqsave(&ha->phy_port_lock, flags);
771 for (i = 0; i < ha->num_phys; i++) {
772 struct asd_sas_port *port = ha->sas_port[i];
773 struct domain_device *dev;
774
775 spin_lock(&port->dev_list_lock);
776 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
777 if (rphy == dev->rphy) {
778 found_dev = dev;
779 spin_unlock(&port->dev_list_lock);
780 goto found;
781 }
782 }
783 spin_unlock(&port->dev_list_lock);
784 }
785 found:
786 spin_unlock_irqrestore(&ha->phy_port_lock, flags);
787
788 return found_dev;
789}
790
791int sas_target_alloc(struct scsi_target *starget)
792{
793 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
794 struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
795
796 if (!found_dev)
797 return -ENODEV;
798
799 kref_get(&found_dev->kref);
800 starget->hostdata = found_dev;
801 return 0;
802}
803EXPORT_SYMBOL_GPL(sas_target_alloc);
804
805#define SAS_DEF_QD 256
806
807int sas_slave_configure(struct scsi_device *scsi_dev)
808{
809 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
810
811 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
812
813 if (dev_is_sata(dev)) {
814 ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
815 return 0;
816 }
817
818 sas_read_port_mode_page(scsi_dev);
819
820 if (scsi_dev->tagged_supported) {
821 scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
822 } else {
823 pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
824 SAS_ADDR(dev->sas_addr), scsi_dev->lun);
825 scsi_change_queue_depth(scsi_dev, 1);
826 }
827
828 scsi_dev->allow_restart = 1;
829
830 return 0;
831}
832EXPORT_SYMBOL_GPL(sas_slave_configure);
833
834int sas_change_queue_depth(struct scsi_device *sdev, int depth)
835{
836 struct domain_device *dev = sdev_to_domain_dev(sdev);
837
838 if (dev_is_sata(dev))
839 return ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
840
841 if (!sdev->tagged_supported)
842 depth = 1;
843 return scsi_change_queue_depth(sdev, depth);
844}
845EXPORT_SYMBOL_GPL(sas_change_queue_depth);
846
847int sas_bios_param(struct scsi_device *scsi_dev,
848 struct block_device *bdev,
849 sector_t capacity, int *hsc)
850{
851 hsc[0] = 255;
852 hsc[1] = 63;
853 sector_div(capacity, 255*63);
854 hsc[2] = capacity;
855
856 return 0;
857}
858EXPORT_SYMBOL_GPL(sas_bios_param);
859
860void sas_task_internal_done(struct sas_task *task)
861{
862 del_timer(&task->slow_task->timer);
863 complete(&task->slow_task->completion);
864}
865
866void sas_task_internal_timedout(struct timer_list *t)
867{
868 struct sas_task_slow *slow = from_timer(slow, t, timer);
869 struct sas_task *task = slow->task;
870 bool is_completed = true;
871 unsigned long flags;
872
873 spin_lock_irqsave(&task->task_state_lock, flags);
874 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
875 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
876 is_completed = false;
877 }
878 spin_unlock_irqrestore(&task->task_state_lock, flags);
879
880 if (!is_completed)
881 complete(&task->slow_task->completion);
882}
883
884#define TASK_TIMEOUT (20 * HZ)
885#define TASK_RETRY 3
886
887static int sas_execute_internal_abort(struct domain_device *device,
888 enum sas_internal_abort type, u16 tag,
889 unsigned int qid, void *data)
890{
891 struct sas_ha_struct *ha = device->port->ha;
892 struct sas_internal *i = to_sas_internal(ha->shost->transportt);
893 struct sas_task *task = NULL;
894 int res, retry;
895
896 for (retry = 0; retry < TASK_RETRY; retry++) {
897 task = sas_alloc_slow_task(GFP_KERNEL);
898 if (!task)
899 return -ENOMEM;
900
901 task->dev = device;
902 task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
903 task->task_done = sas_task_internal_done;
904 task->slow_task->timer.function = sas_task_internal_timedout;
905 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
906 add_timer(&task->slow_task->timer);
907
908 task->abort_task.tag = tag;
909 task->abort_task.type = type;
910 task->abort_task.qid = qid;
911
912 res = i->dft->lldd_execute_task(task, GFP_KERNEL);
913 if (res) {
914 del_timer_sync(&task->slow_task->timer);
915 pr_err("Executing internal abort failed %016llx (%d)\n",
916 SAS_ADDR(device->sas_addr), res);
917 break;
918 }
919
920 wait_for_completion(&task->slow_task->completion);
921 res = TMF_RESP_FUNC_FAILED;
922
923 /* Even if the internal abort timed out, return direct. */
924 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
925 bool quit = true;
926
927 if (i->dft->lldd_abort_timeout)
928 quit = i->dft->lldd_abort_timeout(task, data);
929 else
930 pr_err("Internal abort: timeout %016llx\n",
931 SAS_ADDR(device->sas_addr));
932 res = -EIO;
933 if (quit)
934 break;
935 }
936
937 if (task->task_status.resp == SAS_TASK_COMPLETE &&
938 task->task_status.stat == SAS_SAM_STAT_GOOD) {
939 res = TMF_RESP_FUNC_COMPLETE;
940 break;
941 }
942
943 if (task->task_status.resp == SAS_TASK_COMPLETE &&
944 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
945 res = TMF_RESP_FUNC_SUCC;
946 break;
947 }
948
949 pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
950 SAS_ADDR(device->sas_addr), task->task_status.resp,
951 task->task_status.stat);
952 sas_free_task(task);
953 task = NULL;
954 }
955 BUG_ON(retry == TASK_RETRY && task != NULL);
956 sas_free_task(task);
957 return res;
958}
959
960int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
961 unsigned int qid, void *data)
962{
963 return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
964 tag, qid, data);
965}
966EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
967
968int sas_execute_internal_abort_dev(struct domain_device *device,
969 unsigned int qid, void *data)
970{
971 return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
972 SCSI_NO_TAG, qid, data);
973}
974EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
975
976int sas_execute_tmf(struct domain_device *device, void *parameter,
977 int para_len, int force_phy_id,
978 struct sas_tmf_task *tmf)
979{
980 struct sas_task *task;
981 struct sas_internal *i =
982 to_sas_internal(device->port->ha->shost->transportt);
983 int res, retry;
984
985 for (retry = 0; retry < TASK_RETRY; retry++) {
986 task = sas_alloc_slow_task(GFP_KERNEL);
987 if (!task)
988 return -ENOMEM;
989
990 task->dev = device;
991 task->task_proto = device->tproto;
992
993 if (dev_is_sata(device)) {
994 task->ata_task.device_control_reg_update = 1;
995 if (force_phy_id >= 0) {
996 task->ata_task.force_phy = true;
997 task->ata_task.force_phy_id = force_phy_id;
998 }
999 memcpy(&task->ata_task.fis, parameter, para_len);
1000 } else {
1001 memcpy(&task->ssp_task, parameter, para_len);
1002 }
1003
1004 task->task_done = sas_task_internal_done;
1005 task->tmf = tmf;
1006
1007 task->slow_task->timer.function = sas_task_internal_timedout;
1008 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
1009 add_timer(&task->slow_task->timer);
1010
1011 res = i->dft->lldd_execute_task(task, GFP_KERNEL);
1012 if (res) {
1013 del_timer_sync(&task->slow_task->timer);
1014 pr_err("executing TMF task failed %016llx (%d)\n",
1015 SAS_ADDR(device->sas_addr), res);
1016 break;
1017 }
1018
1019 wait_for_completion(&task->slow_task->completion);
1020
1021 if (i->dft->lldd_tmf_exec_complete)
1022 i->dft->lldd_tmf_exec_complete(device);
1023
1024 res = TMF_RESP_FUNC_FAILED;
1025
1026 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1027 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1028 pr_err("TMF task timeout for %016llx and not done\n",
1029 SAS_ADDR(device->sas_addr));
1030 if (i->dft->lldd_tmf_aborted)
1031 i->dft->lldd_tmf_aborted(task);
1032 break;
1033 }
1034 pr_warn("TMF task timeout for %016llx and done\n",
1035 SAS_ADDR(device->sas_addr));
1036 }
1037
1038 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1039 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1040 res = TMF_RESP_FUNC_COMPLETE;
1041 break;
1042 }
1043
1044 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1045 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1046 res = TMF_RESP_FUNC_SUCC;
1047 break;
1048 }
1049
1050 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1051 task->task_status.stat == SAS_DATA_UNDERRUN) {
1052 /* no error, but return the number of bytes of
1053 * underrun
1054 */
1055 pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
1056 SAS_ADDR(device->sas_addr),
1057 task->task_status.resp,
1058 task->task_status.stat);
1059 res = task->task_status.residual;
1060 break;
1061 }
1062
1063 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1064 task->task_status.stat == SAS_DATA_OVERRUN) {
1065 pr_warn("TMF task blocked task error %016llx\n",
1066 SAS_ADDR(device->sas_addr));
1067 res = -EMSGSIZE;
1068 break;
1069 }
1070
1071 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1072 task->task_status.stat == SAS_OPEN_REJECT) {
1073 pr_warn("TMF task open reject failed %016llx\n",
1074 SAS_ADDR(device->sas_addr));
1075 res = -EIO;
1076 } else {
1077 pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
1078 SAS_ADDR(device->sas_addr),
1079 task->task_status.resp,
1080 task->task_status.stat);
1081 }
1082 sas_free_task(task);
1083 task = NULL;
1084 }
1085
1086 if (retry == TASK_RETRY)
1087 pr_warn("executing TMF for %016llx failed after %d attempts!\n",
1088 SAS_ADDR(device->sas_addr), TASK_RETRY);
1089 sas_free_task(task);
1090
1091 return res;
1092}
1093
1094static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
1095 struct sas_tmf_task *tmf)
1096{
1097 struct sas_ssp_task ssp_task;
1098
1099 if (!(device->tproto & SAS_PROTOCOL_SSP))
1100 return TMF_RESP_FUNC_ESUPP;
1101
1102 memcpy(ssp_task.LUN, lun, 8);
1103
1104 return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
1105}
1106
1107int sas_abort_task_set(struct domain_device *dev, u8 *lun)
1108{
1109 struct sas_tmf_task tmf_task = {
1110 .tmf = TMF_ABORT_TASK_SET,
1111 };
1112
1113 return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1114}
1115EXPORT_SYMBOL_GPL(sas_abort_task_set);
1116
1117int sas_clear_task_set(struct domain_device *dev, u8 *lun)
1118{
1119 struct sas_tmf_task tmf_task = {
1120 .tmf = TMF_CLEAR_TASK_SET,
1121 };
1122
1123 return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1124}
1125EXPORT_SYMBOL_GPL(sas_clear_task_set);
1126
1127int sas_lu_reset(struct domain_device *dev, u8 *lun)
1128{
1129 struct sas_tmf_task tmf_task = {
1130 .tmf = TMF_LU_RESET,
1131 };
1132
1133 return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1134}
1135EXPORT_SYMBOL_GPL(sas_lu_reset);
1136
1137int sas_query_task(struct sas_task *task, u16 tag)
1138{
1139 struct sas_tmf_task tmf_task = {
1140 .tmf = TMF_QUERY_TASK,
1141 .tag_of_task_to_be_managed = tag,
1142 };
1143 struct scsi_cmnd *cmnd = task->uldd_task;
1144 struct domain_device *dev = task->dev;
1145 struct scsi_lun lun;
1146
1147 int_to_scsilun(cmnd->device->lun, &lun);
1148
1149 return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1150}
1151EXPORT_SYMBOL_GPL(sas_query_task);
1152
1153int sas_abort_task(struct sas_task *task, u16 tag)
1154{
1155 struct sas_tmf_task tmf_task = {
1156 .tmf = TMF_ABORT_TASK,
1157 .tag_of_task_to_be_managed = tag,
1158 };
1159 struct scsi_cmnd *cmnd = task->uldd_task;
1160 struct domain_device *dev = task->dev;
1161 struct scsi_lun lun;
1162
1163 int_to_scsilun(cmnd->device->lun, &lun);
1164
1165 return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1166}
1167EXPORT_SYMBOL_GPL(sas_abort_task);
1168
1169/*
1170 * Tell an upper layer that it needs to initiate an abort for a given task.
1171 * This should only ever be called by an LLDD.
1172 */
1173void sas_task_abort(struct sas_task *task)
1174{
1175 struct scsi_cmnd *sc = task->uldd_task;
1176
1177 /* Escape for libsas internal commands */
1178 if (!sc) {
1179 struct sas_task_slow *slow = task->slow_task;
1180
1181 if (!slow)
1182 return;
1183 if (!del_timer(&slow->timer))
1184 return;
1185 slow->timer.function(&slow->timer);
1186 return;
1187 }
1188
1189 if (dev_is_sata(task->dev))
1190 sas_ata_task_abort(task);
1191 else
1192 blk_abort_request(scsi_cmd_to_rq(sc));
1193}
1194EXPORT_SYMBOL_GPL(sas_task_abort);
1195
1196int sas_slave_alloc(struct scsi_device *sdev)
1197{
1198 if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
1199 return -ENXIO;
1200
1201 return 0;
1202}
1203EXPORT_SYMBOL_GPL(sas_slave_alloc);
1204
1205void sas_target_destroy(struct scsi_target *starget)
1206{
1207 struct domain_device *found_dev = starget->hostdata;
1208
1209 if (!found_dev)
1210 return;
1211
1212 starget->hostdata = NULL;
1213 sas_put_device(found_dev);
1214}
1215EXPORT_SYMBOL_GPL(sas_target_destroy);
1216
1217#define SAS_STRING_ADDR_SIZE 16
1218
1219int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
1220{
1221 int res;
1222 const struct firmware *fw;
1223
1224 res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
1225 if (res)
1226 return res;
1227
1228 if (fw->size < SAS_STRING_ADDR_SIZE) {
1229 res = -ENODEV;
1230 goto out;
1231 }
1232
1233 res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
1234 if (res)
1235 goto out;
1236
1237out:
1238 release_firmware(fw);
1239 return res;
1240}
1241EXPORT_SYMBOL_GPL(sas_request_addr);
1242
1/*
2 * Serial Attached SCSI (SAS) class SCSI Host glue.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#include <linux/kthread.h>
27#include <linux/firmware.h>
28#include <linux/export.h>
29#include <linux/ctype.h>
30
31#include "sas_internal.h"
32
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_eh.h>
38#include <scsi/scsi_transport.h>
39#include <scsi/scsi_transport_sas.h>
40#include <scsi/sas_ata.h>
41#include "../scsi_sas_internal.h"
42#include "../scsi_transport_api.h"
43#include "../scsi_priv.h"
44
45#include <linux/err.h>
46#include <linux/blkdev.h>
47#include <linux/freezer.h>
48#include <linux/gfp.h>
49#include <linux/scatterlist.h>
50#include <linux/libata.h>
51
52/* record final status and free the task */
53static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
54{
55 struct task_status_struct *ts = &task->task_status;
56 int hs = 0, stat = 0;
57
58 if (ts->resp == SAS_TASK_UNDELIVERED) {
59 /* transport error */
60 hs = DID_NO_CONNECT;
61 } else { /* ts->resp == SAS_TASK_COMPLETE */
62 /* task delivered, what happened afterwards? */
63 switch (ts->stat) {
64 case SAS_DEV_NO_RESPONSE:
65 case SAS_INTERRUPTED:
66 case SAS_PHY_DOWN:
67 case SAS_NAK_R_ERR:
68 case SAS_OPEN_TO:
69 hs = DID_NO_CONNECT;
70 break;
71 case SAS_DATA_UNDERRUN:
72 scsi_set_resid(sc, ts->residual);
73 if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
74 hs = DID_ERROR;
75 break;
76 case SAS_DATA_OVERRUN:
77 hs = DID_ERROR;
78 break;
79 case SAS_QUEUE_FULL:
80 hs = DID_SOFT_ERROR; /* retry */
81 break;
82 case SAS_DEVICE_UNKNOWN:
83 hs = DID_BAD_TARGET;
84 break;
85 case SAS_SG_ERR:
86 hs = DID_PARITY;
87 break;
88 case SAS_OPEN_REJECT:
89 if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
90 hs = DID_SOFT_ERROR; /* retry */
91 else
92 hs = DID_ERROR;
93 break;
94 case SAS_PROTO_RESPONSE:
95 SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
96 "task; please report this\n",
97 task->dev->port->ha->sas_ha_name);
98 break;
99 case SAS_ABORTED_TASK:
100 hs = DID_ABORT;
101 break;
102 case SAM_STAT_CHECK_CONDITION:
103 memcpy(sc->sense_buffer, ts->buf,
104 min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
105 stat = SAM_STAT_CHECK_CONDITION;
106 break;
107 default:
108 stat = ts->stat;
109 break;
110 }
111 }
112
113 sc->result = (hs << 16) | stat;
114 ASSIGN_SAS_TASK(sc, NULL);
115 sas_free_task(task);
116}
117
118static void sas_scsi_task_done(struct sas_task *task)
119{
120 struct scsi_cmnd *sc = task->uldd_task;
121 struct domain_device *dev = task->dev;
122 struct sas_ha_struct *ha = dev->port->ha;
123 unsigned long flags;
124
125 spin_lock_irqsave(&dev->done_lock, flags);
126 if (test_bit(SAS_HA_FROZEN, &ha->state))
127 task = NULL;
128 else
129 ASSIGN_SAS_TASK(sc, NULL);
130 spin_unlock_irqrestore(&dev->done_lock, flags);
131
132 if (unlikely(!task)) {
133 /* task will be completed by the error handler */
134 SAS_DPRINTK("task done but aborted\n");
135 return;
136 }
137
138 if (unlikely(!sc)) {
139 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
140 sas_free_task(task);
141 return;
142 }
143
144 sas_end_task(sc, task);
145 sc->scsi_done(sc);
146}
147
148static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
149 struct domain_device *dev,
150 gfp_t gfp_flags)
151{
152 struct sas_task *task = sas_alloc_task(gfp_flags);
153 struct scsi_lun lun;
154
155 if (!task)
156 return NULL;
157
158 task->uldd_task = cmd;
159 ASSIGN_SAS_TASK(cmd, task);
160
161 task->dev = dev;
162 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
163
164 task->ssp_task.retry_count = 1;
165 int_to_scsilun(cmd->device->lun, &lun);
166 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
167 task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
168 task->ssp_task.cmd = cmd;
169
170 task->scatter = scsi_sglist(cmd);
171 task->num_scatter = scsi_sg_count(cmd);
172 task->total_xfer_len = scsi_bufflen(cmd);
173 task->data_dir = cmd->sc_data_direction;
174
175 task->task_done = sas_scsi_task_done;
176
177 return task;
178}
179
180int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
181{
182 struct sas_internal *i = to_sas_internal(host->transportt);
183 struct domain_device *dev = cmd_to_domain_dev(cmd);
184 struct sas_task *task;
185 int res = 0;
186
187 /* If the device fell off, no sense in issuing commands */
188 if (test_bit(SAS_DEV_GONE, &dev->state)) {
189 cmd->result = DID_BAD_TARGET << 16;
190 goto out_done;
191 }
192
193 if (dev_is_sata(dev)) {
194 spin_lock_irq(dev->sata_dev.ap->lock);
195 res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
196 spin_unlock_irq(dev->sata_dev.ap->lock);
197 return res;
198 }
199
200 task = sas_create_task(cmd, dev, GFP_ATOMIC);
201 if (!task)
202 return SCSI_MLQUEUE_HOST_BUSY;
203
204 res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
205 if (res)
206 goto out_free_task;
207 return 0;
208
209out_free_task:
210 SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
211 ASSIGN_SAS_TASK(cmd, NULL);
212 sas_free_task(task);
213 if (res == -SAS_QUEUE_FULL)
214 cmd->result = DID_SOFT_ERROR << 16; /* retry */
215 else
216 cmd->result = DID_ERROR << 16;
217out_done:
218 cmd->scsi_done(cmd);
219 return 0;
220}
221
222static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
223{
224 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
225 struct sas_task *task = TO_SAS_TASK(cmd);
226
227 /* At this point, we only get called following an actual abort
228 * of the task, so we should be guaranteed not to be racing with
229 * any completions from the LLD. Task is freed after this.
230 */
231 sas_end_task(cmd, task);
232
233 /* now finish the command and move it on to the error
234 * handler done list, this also takes it off the
235 * error handler pending list.
236 */
237 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
238}
239
240static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
241{
242 struct domain_device *dev = cmd_to_domain_dev(cmd);
243 struct sas_ha_struct *ha = dev->port->ha;
244 struct sas_task *task = TO_SAS_TASK(cmd);
245
246 if (!dev_is_sata(dev)) {
247 sas_eh_finish_cmd(cmd);
248 return;
249 }
250
251 /* report the timeout to libata */
252 sas_end_task(cmd, task);
253 list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
254}
255
256static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
257{
258 struct scsi_cmnd *cmd, *n;
259
260 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
261 if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
262 cmd->device->lun == my_cmd->device->lun)
263 sas_eh_defer_cmd(cmd);
264 }
265}
266
267static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
268 struct domain_device *dev)
269{
270 struct scsi_cmnd *cmd, *n;
271
272 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
273 struct domain_device *x = cmd_to_domain_dev(cmd);
274
275 if (x == dev)
276 sas_eh_finish_cmd(cmd);
277 }
278}
279
280static void sas_scsi_clear_queue_port(struct list_head *error_q,
281 struct asd_sas_port *port)
282{
283 struct scsi_cmnd *cmd, *n;
284
285 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
286 struct domain_device *dev = cmd_to_domain_dev(cmd);
287 struct asd_sas_port *x = dev->port;
288
289 if (x == port)
290 sas_eh_finish_cmd(cmd);
291 }
292}
293
294enum task_disposition {
295 TASK_IS_DONE,
296 TASK_IS_ABORTED,
297 TASK_IS_AT_LU,
298 TASK_IS_NOT_AT_LU,
299 TASK_ABORT_FAILED,
300};
301
302static enum task_disposition sas_scsi_find_task(struct sas_task *task)
303{
304 unsigned long flags;
305 int i, res;
306 struct sas_internal *si =
307 to_sas_internal(task->dev->port->ha->core.shost->transportt);
308
309 for (i = 0; i < 5; i++) {
310 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
311 res = si->dft->lldd_abort_task(task);
312
313 spin_lock_irqsave(&task->task_state_lock, flags);
314 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
315 spin_unlock_irqrestore(&task->task_state_lock, flags);
316 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
317 task);
318 return TASK_IS_DONE;
319 }
320 spin_unlock_irqrestore(&task->task_state_lock, flags);
321
322 if (res == TMF_RESP_FUNC_COMPLETE) {
323 SAS_DPRINTK("%s: task 0x%p is aborted\n",
324 __func__, task);
325 return TASK_IS_ABORTED;
326 } else if (si->dft->lldd_query_task) {
327 SAS_DPRINTK("%s: querying task 0x%p\n",
328 __func__, task);
329 res = si->dft->lldd_query_task(task);
330 switch (res) {
331 case TMF_RESP_FUNC_SUCC:
332 SAS_DPRINTK("%s: task 0x%p at LU\n",
333 __func__, task);
334 return TASK_IS_AT_LU;
335 case TMF_RESP_FUNC_COMPLETE:
336 SAS_DPRINTK("%s: task 0x%p not at LU\n",
337 __func__, task);
338 return TASK_IS_NOT_AT_LU;
339 case TMF_RESP_FUNC_FAILED:
340 SAS_DPRINTK("%s: task 0x%p failed to abort\n",
341 __func__, task);
342 return TASK_ABORT_FAILED;
343 }
344
345 }
346 }
347 return res;
348}
349
350static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
351{
352 int res = TMF_RESP_FUNC_FAILED;
353 struct scsi_lun lun;
354 struct sas_internal *i =
355 to_sas_internal(dev->port->ha->core.shost->transportt);
356
357 int_to_scsilun(cmd->device->lun, &lun);
358
359 SAS_DPRINTK("eh: device %llx LUN %llx has the task\n",
360 SAS_ADDR(dev->sas_addr),
361 cmd->device->lun);
362
363 if (i->dft->lldd_abort_task_set)
364 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
365
366 if (res == TMF_RESP_FUNC_FAILED) {
367 if (i->dft->lldd_clear_task_set)
368 res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
369 }
370
371 if (res == TMF_RESP_FUNC_FAILED) {
372 if (i->dft->lldd_lu_reset)
373 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
374 }
375
376 return res;
377}
378
379static int sas_recover_I_T(struct domain_device *dev)
380{
381 int res = TMF_RESP_FUNC_FAILED;
382 struct sas_internal *i =
383 to_sas_internal(dev->port->ha->core.shost->transportt);
384
385 SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
386 SAS_ADDR(dev->sas_addr));
387
388 if (i->dft->lldd_I_T_nexus_reset)
389 res = i->dft->lldd_I_T_nexus_reset(dev);
390
391 return res;
392}
393
394/* take a reference on the last known good phy for this device */
395struct sas_phy *sas_get_local_phy(struct domain_device *dev)
396{
397 struct sas_ha_struct *ha = dev->port->ha;
398 struct sas_phy *phy;
399 unsigned long flags;
400
401 /* a published domain device always has a valid phy, it may be
402 * stale, but it is never NULL
403 */
404 BUG_ON(!dev->phy);
405
406 spin_lock_irqsave(&ha->phy_port_lock, flags);
407 phy = dev->phy;
408 get_device(&phy->dev);
409 spin_unlock_irqrestore(&ha->phy_port_lock, flags);
410
411 return phy;
412}
413EXPORT_SYMBOL_GPL(sas_get_local_phy);
414
415static void sas_wait_eh(struct domain_device *dev)
416{
417 struct sas_ha_struct *ha = dev->port->ha;
418 DEFINE_WAIT(wait);
419
420 if (dev_is_sata(dev)) {
421 ata_port_wait_eh(dev->sata_dev.ap);
422 return;
423 }
424 retry:
425 spin_lock_irq(&ha->lock);
426
427 while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
428 prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
429 spin_unlock_irq(&ha->lock);
430 schedule();
431 spin_lock_irq(&ha->lock);
432 }
433 finish_wait(&ha->eh_wait_q, &wait);
434
435 spin_unlock_irq(&ha->lock);
436
437 /* make sure SCSI EH is complete */
438 if (scsi_host_in_recovery(ha->core.shost)) {
439 msleep(10);
440 goto retry;
441 }
442}
443EXPORT_SYMBOL(sas_wait_eh);
444
445static int sas_queue_reset(struct domain_device *dev, int reset_type,
446 u64 lun, int wait)
447{
448 struct sas_ha_struct *ha = dev->port->ha;
449 int scheduled = 0, tries = 100;
450
451 /* ata: promote lun reset to bus reset */
452 if (dev_is_sata(dev)) {
453 sas_ata_schedule_reset(dev);
454 if (wait)
455 sas_ata_wait_eh(dev);
456 return SUCCESS;
457 }
458
459 while (!scheduled && tries--) {
460 spin_lock_irq(&ha->lock);
461 if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
462 !test_bit(reset_type, &dev->state)) {
463 scheduled = 1;
464 ha->eh_active++;
465 list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
466 set_bit(SAS_DEV_EH_PENDING, &dev->state);
467 set_bit(reset_type, &dev->state);
468 int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
469 scsi_schedule_eh(ha->core.shost);
470 }
471 spin_unlock_irq(&ha->lock);
472
473 if (wait)
474 sas_wait_eh(dev);
475
476 if (scheduled)
477 return SUCCESS;
478 }
479
480 SAS_DPRINTK("%s reset of %s failed\n",
481 reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
482 dev_name(&dev->rphy->dev));
483
484 return FAILED;
485}
486
487int sas_eh_abort_handler(struct scsi_cmnd *cmd)
488{
489 int res;
490 struct sas_task *task = TO_SAS_TASK(cmd);
491 struct Scsi_Host *host = cmd->device->host;
492 struct sas_internal *i = to_sas_internal(host->transportt);
493
494 if (current != host->ehandler)
495 return FAILED;
496
497 if (!i->dft->lldd_abort_task)
498 return FAILED;
499
500 res = i->dft->lldd_abort_task(task);
501 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
502 return SUCCESS;
503
504 return FAILED;
505}
506EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
507
508/* Attempt to send a LUN reset message to a device */
509int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
510{
511 int res;
512 struct scsi_lun lun;
513 struct Scsi_Host *host = cmd->device->host;
514 struct domain_device *dev = cmd_to_domain_dev(cmd);
515 struct sas_internal *i = to_sas_internal(host->transportt);
516
517 if (current != host->ehandler)
518 return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
519
520 int_to_scsilun(cmd->device->lun, &lun);
521
522 if (!i->dft->lldd_lu_reset)
523 return FAILED;
524
525 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
526 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
527 return SUCCESS;
528
529 return FAILED;
530}
531
532int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
533{
534 int res;
535 struct Scsi_Host *host = cmd->device->host;
536 struct domain_device *dev = cmd_to_domain_dev(cmd);
537 struct sas_internal *i = to_sas_internal(host->transportt);
538
539 if (current != host->ehandler)
540 return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
541
542 if (!i->dft->lldd_I_T_nexus_reset)
543 return FAILED;
544
545 res = i->dft->lldd_I_T_nexus_reset(dev);
546 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
547 res == -ENODEV)
548 return SUCCESS;
549
550 return FAILED;
551}
552
553/* Try to reset a device */
554static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
555{
556 int res;
557 struct Scsi_Host *shost = cmd->device->host;
558
559 if (!shost->hostt->eh_device_reset_handler)
560 goto try_bus_reset;
561
562 res = shost->hostt->eh_device_reset_handler(cmd);
563 if (res == SUCCESS)
564 return res;
565
566try_bus_reset:
567 if (shost->hostt->eh_bus_reset_handler)
568 return shost->hostt->eh_bus_reset_handler(cmd);
569
570 return FAILED;
571}
572
573static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
574{
575 struct scsi_cmnd *cmd, *n;
576 enum task_disposition res = TASK_IS_DONE;
577 int tmf_resp, need_reset;
578 struct sas_internal *i = to_sas_internal(shost->transportt);
579 unsigned long flags;
580 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
581 LIST_HEAD(done);
582
583 /* clean out any commands that won the completion vs eh race */
584 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
585 struct domain_device *dev = cmd_to_domain_dev(cmd);
586 struct sas_task *task;
587
588 spin_lock_irqsave(&dev->done_lock, flags);
589 /* by this point the lldd has either observed
590 * SAS_HA_FROZEN and is leaving the task alone, or has
591 * won the race with eh and decided to complete it
592 */
593 task = TO_SAS_TASK(cmd);
594 spin_unlock_irqrestore(&dev->done_lock, flags);
595
596 if (!task)
597 list_move_tail(&cmd->eh_entry, &done);
598 }
599
600 Again:
601 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
602 struct sas_task *task = TO_SAS_TASK(cmd);
603
604 list_del_init(&cmd->eh_entry);
605
606 spin_lock_irqsave(&task->task_state_lock, flags);
607 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
608 spin_unlock_irqrestore(&task->task_state_lock, flags);
609
610 if (need_reset) {
611 SAS_DPRINTK("%s: task 0x%p requests reset\n",
612 __func__, task);
613 goto reset;
614 }
615
616 SAS_DPRINTK("trying to find task 0x%p\n", task);
617 res = sas_scsi_find_task(task);
618
619 cmd->eh_eflags = 0;
620
621 switch (res) {
622 case TASK_IS_DONE:
623 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
624 task);
625 sas_eh_defer_cmd(cmd);
626 continue;
627 case TASK_IS_ABORTED:
628 SAS_DPRINTK("%s: task 0x%p is aborted\n",
629 __func__, task);
630 sas_eh_defer_cmd(cmd);
631 continue;
632 case TASK_IS_AT_LU:
633 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
634 reset:
635 tmf_resp = sas_recover_lu(task->dev, cmd);
636 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
637 SAS_DPRINTK("dev %016llx LU %llx is "
638 "recovered\n",
639 SAS_ADDR(task->dev),
640 cmd->device->lun);
641 sas_eh_defer_cmd(cmd);
642 sas_scsi_clear_queue_lu(work_q, cmd);
643 goto Again;
644 }
645 /* fallthrough */
646 case TASK_IS_NOT_AT_LU:
647 case TASK_ABORT_FAILED:
648 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
649 task);
650 tmf_resp = sas_recover_I_T(task->dev);
651 if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
652 tmf_resp == -ENODEV) {
653 struct domain_device *dev = task->dev;
654 SAS_DPRINTK("I_T %016llx recovered\n",
655 SAS_ADDR(task->dev->sas_addr));
656 sas_eh_finish_cmd(cmd);
657 sas_scsi_clear_queue_I_T(work_q, dev);
658 goto Again;
659 }
660 /* Hammer time :-) */
661 try_to_reset_cmd_device(cmd);
662 if (i->dft->lldd_clear_nexus_port) {
663 struct asd_sas_port *port = task->dev->port;
664 SAS_DPRINTK("clearing nexus for port:%d\n",
665 port->id);
666 res = i->dft->lldd_clear_nexus_port(port);
667 if (res == TMF_RESP_FUNC_COMPLETE) {
668 SAS_DPRINTK("clear nexus port:%d "
669 "succeeded\n", port->id);
670 sas_eh_finish_cmd(cmd);
671 sas_scsi_clear_queue_port(work_q,
672 port);
673 goto Again;
674 }
675 }
676 if (i->dft->lldd_clear_nexus_ha) {
677 SAS_DPRINTK("clear nexus ha\n");
678 res = i->dft->lldd_clear_nexus_ha(ha);
679 if (res == TMF_RESP_FUNC_COMPLETE) {
680 SAS_DPRINTK("clear nexus ha "
681 "succeeded\n");
682 sas_eh_finish_cmd(cmd);
683 goto clear_q;
684 }
685 }
686 /* If we are here -- this means that no amount
687 * of effort could recover from errors. Quite
688 * possibly the HA just disappeared.
689 */
690 SAS_DPRINTK("error from device %llx, LUN %llx "
691 "couldn't be recovered in any way\n",
692 SAS_ADDR(task->dev->sas_addr),
693 cmd->device->lun);
694
695 sas_eh_finish_cmd(cmd);
696 goto clear_q;
697 }
698 }
699 out:
700 list_splice_tail(&done, work_q);
701 list_splice_tail_init(&ha->eh_ata_q, work_q);
702 return;
703
704 clear_q:
705 SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
706 list_for_each_entry_safe(cmd, n, work_q, eh_entry)
707 sas_eh_finish_cmd(cmd);
708 goto out;
709}
710
711static void sas_eh_handle_resets(struct Scsi_Host *shost)
712{
713 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
714 struct sas_internal *i = to_sas_internal(shost->transportt);
715
716 /* handle directed resets to sas devices */
717 spin_lock_irq(&ha->lock);
718 while (!list_empty(&ha->eh_dev_q)) {
719 struct domain_device *dev;
720 struct ssp_device *ssp;
721
722 ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
723 list_del_init(&ssp->eh_list_node);
724 dev = container_of(ssp, typeof(*dev), ssp_dev);
725 kref_get(&dev->kref);
726 WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
727
728 spin_unlock_irq(&ha->lock);
729
730 if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
731 i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
732
733 if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
734 i->dft->lldd_I_T_nexus_reset(dev);
735
736 sas_put_device(dev);
737 spin_lock_irq(&ha->lock);
738 clear_bit(SAS_DEV_EH_PENDING, &dev->state);
739 ha->eh_active--;
740 }
741 spin_unlock_irq(&ha->lock);
742}
743
744
745void sas_scsi_recover_host(struct Scsi_Host *shost)
746{
747 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
748 LIST_HEAD(eh_work_q);
749 int tries = 0;
750 bool retry;
751
752retry:
753 tries++;
754 retry = true;
755 spin_lock_irq(shost->host_lock);
756 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
757 spin_unlock_irq(shost->host_lock);
758
759 SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
760 __func__, atomic_read(&shost->host_busy), shost->host_failed);
761 /*
762 * Deal with commands that still have SAS tasks (i.e. they didn't
763 * complete via the normal sas_task completion mechanism),
764 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
765 */
766 set_bit(SAS_HA_FROZEN, &ha->state);
767 sas_eh_handle_sas_errors(shost, &eh_work_q);
768 clear_bit(SAS_HA_FROZEN, &ha->state);
769 if (list_empty(&eh_work_q))
770 goto out;
771
772 /*
773 * Now deal with SCSI commands that completed ok but have a an error
774 * code (and hopefully sense data) attached. This is roughly what
775 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
776 * command we see here has no sas_task and is thus unknown to the HA.
777 */
778 sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
779 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
780 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
781
782out:
783 sas_eh_handle_resets(shost);
784
785 /* now link into libata eh --- if we have any ata devices */
786 sas_ata_strategy_handler(shost);
787
788 scsi_eh_flush_done_q(&ha->eh_done_q);
789
790 /* check if any new eh work was scheduled during the last run */
791 spin_lock_irq(&ha->lock);
792 if (ha->eh_active == 0) {
793 shost->host_eh_scheduled = 0;
794 retry = false;
795 }
796 spin_unlock_irq(&ha->lock);
797
798 if (retry)
799 goto retry;
800
801 SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
802 __func__, atomic_read(&shost->host_busy),
803 shost->host_failed, tries);
804}
805
806enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
807{
808 scmd_dbg(cmd, "command %p timed out\n", cmd);
809
810 return BLK_EH_NOT_HANDLED;
811}
812
813int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
814{
815 struct domain_device *dev = sdev_to_domain_dev(sdev);
816
817 if (dev_is_sata(dev))
818 return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
819
820 return -EINVAL;
821}
822
823struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
824{
825 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
826 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
827 struct domain_device *found_dev = NULL;
828 int i;
829 unsigned long flags;
830
831 spin_lock_irqsave(&ha->phy_port_lock, flags);
832 for (i = 0; i < ha->num_phys; i++) {
833 struct asd_sas_port *port = ha->sas_port[i];
834 struct domain_device *dev;
835
836 spin_lock(&port->dev_list_lock);
837 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
838 if (rphy == dev->rphy) {
839 found_dev = dev;
840 spin_unlock(&port->dev_list_lock);
841 goto found;
842 }
843 }
844 spin_unlock(&port->dev_list_lock);
845 }
846 found:
847 spin_unlock_irqrestore(&ha->phy_port_lock, flags);
848
849 return found_dev;
850}
851
852int sas_target_alloc(struct scsi_target *starget)
853{
854 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
855 struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
856
857 if (!found_dev)
858 return -ENODEV;
859
860 kref_get(&found_dev->kref);
861 starget->hostdata = found_dev;
862 return 0;
863}
864
865#define SAS_DEF_QD 256
866
867int sas_slave_configure(struct scsi_device *scsi_dev)
868{
869 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
870 struct sas_ha_struct *sas_ha;
871
872 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
873
874 if (dev_is_sata(dev)) {
875 ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
876 return 0;
877 }
878
879 sas_ha = dev->port->ha;
880
881 sas_read_port_mode_page(scsi_dev);
882
883 if (scsi_dev->tagged_supported) {
884 scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
885 } else {
886 SAS_DPRINTK("device %llx, LUN %llx doesn't support "
887 "TCQ\n", SAS_ADDR(dev->sas_addr),
888 scsi_dev->lun);
889 scsi_change_queue_depth(scsi_dev, 1);
890 }
891
892 scsi_dev->allow_restart = 1;
893
894 return 0;
895}
896
897int sas_change_queue_depth(struct scsi_device *sdev, int depth)
898{
899 struct domain_device *dev = sdev_to_domain_dev(sdev);
900
901 if (dev_is_sata(dev))
902 return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
903
904 if (!sdev->tagged_supported)
905 depth = 1;
906 return scsi_change_queue_depth(sdev, depth);
907}
908
909int sas_bios_param(struct scsi_device *scsi_dev,
910 struct block_device *bdev,
911 sector_t capacity, int *hsc)
912{
913 hsc[0] = 255;
914 hsc[1] = 63;
915 sector_div(capacity, 255*63);
916 hsc[2] = capacity;
917
918 return 0;
919}
920
921/*
922 * Tell an upper layer that it needs to initiate an abort for a given task.
923 * This should only ever be called by an LLDD.
924 */
925void sas_task_abort(struct sas_task *task)
926{
927 struct scsi_cmnd *sc = task->uldd_task;
928
929 /* Escape for libsas internal commands */
930 if (!sc) {
931 struct sas_task_slow *slow = task->slow_task;
932
933 if (!slow)
934 return;
935 if (!del_timer(&slow->timer))
936 return;
937 slow->timer.function(slow->timer.data);
938 return;
939 }
940
941 if (dev_is_sata(task->dev)) {
942 sas_ata_task_abort(task);
943 } else {
944 struct request_queue *q = sc->device->request_queue;
945 unsigned long flags;
946
947 spin_lock_irqsave(q->queue_lock, flags);
948 blk_abort_request(sc->request);
949 spin_unlock_irqrestore(q->queue_lock, flags);
950 }
951}
952
953void sas_target_destroy(struct scsi_target *starget)
954{
955 struct domain_device *found_dev = starget->hostdata;
956
957 if (!found_dev)
958 return;
959
960 starget->hostdata = NULL;
961 sas_put_device(found_dev);
962}
963
964static void sas_parse_addr(u8 *sas_addr, const char *p)
965{
966 int i;
967 for (i = 0; i < SAS_ADDR_SIZE; i++) {
968 u8 h, l;
969 if (!*p)
970 break;
971 h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
972 p++;
973 l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
974 p++;
975 sas_addr[i] = (h<<4) | l;
976 }
977}
978
979#define SAS_STRING_ADDR_SIZE 16
980
981int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
982{
983 int res;
984 const struct firmware *fw;
985
986 res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
987 if (res)
988 return res;
989
990 if (fw->size < SAS_STRING_ADDR_SIZE) {
991 res = -ENODEV;
992 goto out;
993 }
994
995 sas_parse_addr(addr, fw->data);
996
997out:
998 release_firmware(fw);
999 return res;
1000}
1001EXPORT_SYMBOL_GPL(sas_request_addr);
1002
1003EXPORT_SYMBOL_GPL(sas_queuecommand);
1004EXPORT_SYMBOL_GPL(sas_target_alloc);
1005EXPORT_SYMBOL_GPL(sas_slave_configure);
1006EXPORT_SYMBOL_GPL(sas_change_queue_depth);
1007EXPORT_SYMBOL_GPL(sas_bios_param);
1008EXPORT_SYMBOL_GPL(sas_task_abort);
1009EXPORT_SYMBOL_GPL(sas_phy_reset);
1010EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
1011EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
1012EXPORT_SYMBOL_GPL(sas_target_destroy);
1013EXPORT_SYMBOL_GPL(sas_ioctl);