Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * scsi_scan.c
4 *
5 * Copyright (C) 2000 Eric Youngdale,
6 * Copyright (C) 2002 Patrick Mansfield
7 *
8 * The general scanning/probing algorithm is as follows, exceptions are
9 * made to it depending on device specific flags, compilation options, and
10 * global variable (boot or module load time) settings.
11 *
12 * A specific LUN is scanned via an INQUIRY command; if the LUN has a
13 * device attached, a scsi_device is allocated and setup for it.
14 *
15 * For every id of every channel on the given host:
16 *
17 * Scan LUN 0; if the target responds to LUN 0 (even if there is no
18 * device or storage attached to LUN 0):
19 *
20 * If LUN 0 has a device attached, allocate and setup a
21 * scsi_device for it.
22 *
23 * If target is SCSI-3 or up, issue a REPORT LUN, and scan
24 * all of the LUNs returned by the REPORT LUN; else,
25 * sequentially scan LUNs up until some maximum is reached,
26 * or a LUN is seen that cannot have a device attached to it.
27 */
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/init.h>
32#include <linux/blkdev.h>
33#include <linux/delay.h>
34#include <linux/kthread.h>
35#include <linux/spinlock.h>
36#include <linux/async.h>
37#include <linux/slab.h>
38#include <asm/unaligned.h>
39
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_driver.h>
44#include <scsi/scsi_devinfo.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_transport.h>
47#include <scsi/scsi_dh.h>
48#include <scsi/scsi_eh.h>
49
50#include "scsi_priv.h"
51#include "scsi_logging.h"
52
53#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
54 " SCSI scanning, some SCSI devices might not be configured\n"
55
56/*
57 * Default timeout
58 */
59#define SCSI_TIMEOUT (2*HZ)
60#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61
62/*
63 * Prefix values for the SCSI id's (stored in sysfs name field)
64 */
65#define SCSI_UID_SER_NUM 'S'
66#define SCSI_UID_UNKNOWN 'Z'
67
68/*
69 * Return values of some of the scanning functions.
70 *
71 * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this
72 * includes allocation or general failures preventing IO from being sent.
73 *
74 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available
75 * on the given LUN.
76 *
77 * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a
78 * given LUN.
79 */
80#define SCSI_SCAN_NO_RESPONSE 0
81#define SCSI_SCAN_TARGET_PRESENT 1
82#define SCSI_SCAN_LUN_PRESENT 2
83
84static const char *scsi_null_device_strs = "nullnullnullnull";
85
86#define MAX_SCSI_LUNS 512
87
88static u64 max_scsi_luns = MAX_SCSI_LUNS;
89
90module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(max_luns,
92 "last scsi LUN (should be between 1 and 2^64-1)");
93
94#ifdef CONFIG_SCSI_SCAN_ASYNC
95#define SCSI_SCAN_TYPE_DEFAULT "async"
96#else
97#define SCSI_SCAN_TYPE_DEFAULT "sync"
98#endif
99
100static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101
102module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103 S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105 "Setting to 'manual' disables automatic scanning, but allows "
106 "for manual device scan via the 'scan' sysfs attribute.");
107
108static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109
110module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111MODULE_PARM_DESC(inq_timeout,
112 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113 " Default is 20. Some devices may need more; most need less.");
114
115/* This lock protects only this list */
116static DEFINE_SPINLOCK(async_scan_lock);
117static LIST_HEAD(scanning_hosts);
118
119struct async_scan_data {
120 struct list_head list;
121 struct Scsi_Host *shost;
122 struct completion prev_finished;
123};
124
125/*
126 * scsi_enable_async_suspend - Enable async suspend and resume
127 */
128void scsi_enable_async_suspend(struct device *dev)
129{
130 /*
131 * If a user has disabled async probing a likely reason is due to a
132 * storage enclosure that does not inject staggered spin-ups. For
133 * safety, make resume synchronous as well in that case.
134 */
135 if (strncmp(scsi_scan_type, "async", 5) != 0)
136 return;
137 /* Enable asynchronous suspend and resume. */
138 device_enable_async_suspend(dev);
139}
140
141/**
142 * scsi_complete_async_scans - Wait for asynchronous scans to complete
143 *
144 * When this function returns, any host which started scanning before
145 * this function was called will have finished its scan. Hosts which
146 * started scanning after this function was called may or may not have
147 * finished.
148 */
149int scsi_complete_async_scans(void)
150{
151 struct async_scan_data *data;
152
153 do {
154 if (list_empty(&scanning_hosts))
155 return 0;
156 /* If we can't get memory immediately, that's OK. Just
157 * sleep a little. Even if we never get memory, the async
158 * scans will finish eventually.
159 */
160 data = kmalloc(sizeof(*data), GFP_KERNEL);
161 if (!data)
162 msleep(1);
163 } while (!data);
164
165 data->shost = NULL;
166 init_completion(&data->prev_finished);
167
168 spin_lock(&async_scan_lock);
169 /* Check that there's still somebody else on the list */
170 if (list_empty(&scanning_hosts))
171 goto done;
172 list_add_tail(&data->list, &scanning_hosts);
173 spin_unlock(&async_scan_lock);
174
175 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
176 wait_for_completion(&data->prev_finished);
177
178 spin_lock(&async_scan_lock);
179 list_del(&data->list);
180 if (!list_empty(&scanning_hosts)) {
181 struct async_scan_data *next = list_entry(scanning_hosts.next,
182 struct async_scan_data, list);
183 complete(&next->prev_finished);
184 }
185 done:
186 spin_unlock(&async_scan_lock);
187
188 kfree(data);
189 return 0;
190}
191
192/**
193 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
194 * @sdev: scsi device to send command to
195 * @result: area to store the result of the MODE SENSE
196 *
197 * Description:
198 * Send a vendor specific MODE SENSE (not a MODE SELECT) command.
199 * Called for BLIST_KEY devices.
200 **/
201static void scsi_unlock_floptical(struct scsi_device *sdev,
202 unsigned char *result)
203{
204 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
205
206 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
207 scsi_cmd[0] = MODE_SENSE;
208 scsi_cmd[1] = 0;
209 scsi_cmd[2] = 0x2e;
210 scsi_cmd[3] = 0;
211 scsi_cmd[4] = 0x2a; /* size */
212 scsi_cmd[5] = 0;
213 scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a,
214 SCSI_TIMEOUT, 3, NULL);
215}
216
217static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
218 unsigned int depth)
219{
220 int new_shift = sbitmap_calculate_shift(depth);
221 bool need_alloc = !sdev->budget_map.map;
222 bool need_free = false;
223 int ret;
224 struct sbitmap sb_backup;
225
226 depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
227
228 /*
229 * realloc if new shift is calculated, which is caused by setting
230 * up one new default queue depth after calling ->slave_configure
231 */
232 if (!need_alloc && new_shift != sdev->budget_map.shift)
233 need_alloc = need_free = true;
234
235 if (!need_alloc)
236 return 0;
237
238 /*
239 * Request queue has to be frozen for reallocating budget map,
240 * and here disk isn't added yet, so freezing is pretty fast
241 */
242 if (need_free) {
243 blk_mq_freeze_queue(sdev->request_queue);
244 sb_backup = sdev->budget_map;
245 }
246 ret = sbitmap_init_node(&sdev->budget_map,
247 scsi_device_max_queue_depth(sdev),
248 new_shift, GFP_KERNEL,
249 sdev->request_queue->node, false, true);
250 if (!ret)
251 sbitmap_resize(&sdev->budget_map, depth);
252
253 if (need_free) {
254 if (ret)
255 sdev->budget_map = sb_backup;
256 else
257 sbitmap_free(&sb_backup);
258 ret = 0;
259 blk_mq_unfreeze_queue(sdev->request_queue);
260 }
261 return ret;
262}
263
264/**
265 * scsi_alloc_sdev - allocate and setup a scsi_Device
266 * @starget: which target to allocate a &scsi_device for
267 * @lun: which lun
268 * @hostdata: usually NULL and set by ->slave_alloc instead
269 *
270 * Description:
271 * Allocate, initialize for io, and return a pointer to a scsi_Device.
272 * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and
273 * adds scsi_Device to the appropriate list.
274 *
275 * Return value:
276 * scsi_Device pointer, or NULL on failure.
277 **/
278static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
279 u64 lun, void *hostdata)
280{
281 unsigned int depth;
282 struct scsi_device *sdev;
283 struct request_queue *q;
284 int display_failure_msg = 1, ret;
285 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
286
287 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
288 GFP_KERNEL);
289 if (!sdev)
290 goto out;
291
292 sdev->vendor = scsi_null_device_strs;
293 sdev->model = scsi_null_device_strs;
294 sdev->rev = scsi_null_device_strs;
295 sdev->host = shost;
296 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
297 sdev->id = starget->id;
298 sdev->lun = lun;
299 sdev->channel = starget->channel;
300 mutex_init(&sdev->state_mutex);
301 sdev->sdev_state = SDEV_CREATED;
302 INIT_LIST_HEAD(&sdev->siblings);
303 INIT_LIST_HEAD(&sdev->same_target_siblings);
304 INIT_LIST_HEAD(&sdev->starved_entry);
305 INIT_LIST_HEAD(&sdev->event_list);
306 spin_lock_init(&sdev->list_lock);
307 mutex_init(&sdev->inquiry_mutex);
308 INIT_WORK(&sdev->event_work, scsi_evt_thread);
309 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
310
311 sdev->sdev_gendev.parent = get_device(&starget->dev);
312 sdev->sdev_target = starget;
313
314 /* usually NULL and set by ->slave_alloc instead */
315 sdev->hostdata = hostdata;
316
317 /* if the device needs this changing, it may do so in the
318 * slave_configure function */
319 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
320
321 /*
322 * Some low level driver could use device->type
323 */
324 sdev->type = -1;
325
326 /*
327 * Assume that the device will have handshaking problems,
328 * and then fix this field later if it turns out it
329 * doesn't
330 */
331 sdev->borken = 1;
332
333 sdev->sg_reserved_size = INT_MAX;
334
335 q = blk_mq_init_queue(&sdev->host->tag_set);
336 if (IS_ERR(q)) {
337 /* release fn is set up in scsi_sysfs_device_initialise, so
338 * have to free and put manually here */
339 put_device(&starget->dev);
340 kfree(sdev);
341 goto out;
342 }
343 kref_get(&sdev->host->tagset_refcnt);
344 sdev->request_queue = q;
345 q->queuedata = sdev;
346 __scsi_init_queue(sdev->host, q);
347
348 depth = sdev->host->cmd_per_lun ?: 1;
349
350 /*
351 * Use .can_queue as budget map's depth because we have to
352 * support adjusting queue depth from sysfs. Meantime use
353 * default device queue depth to figure out sbitmap shift
354 * since we use this queue depth most of times.
355 */
356 if (scsi_realloc_sdev_budget_map(sdev, depth)) {
357 put_device(&starget->dev);
358 kfree(sdev);
359 goto out;
360 }
361
362 scsi_change_queue_depth(sdev, depth);
363
364 scsi_sysfs_device_initialize(sdev);
365
366 if (shost->hostt->slave_alloc) {
367 ret = shost->hostt->slave_alloc(sdev);
368 if (ret) {
369 /*
370 * if LLDD reports slave not present, don't clutter
371 * console with alloc failure messages
372 */
373 if (ret == -ENXIO)
374 display_failure_msg = 0;
375 goto out_device_destroy;
376 }
377 }
378
379 return sdev;
380
381out_device_destroy:
382 __scsi_remove_device(sdev);
383out:
384 if (display_failure_msg)
385 printk(ALLOC_FAILURE_MSG, __func__);
386 return NULL;
387}
388
389static void scsi_target_destroy(struct scsi_target *starget)
390{
391 struct device *dev = &starget->dev;
392 struct Scsi_Host *shost = dev_to_shost(dev->parent);
393 unsigned long flags;
394
395 BUG_ON(starget->state == STARGET_DEL);
396 starget->state = STARGET_DEL;
397 transport_destroy_device(dev);
398 spin_lock_irqsave(shost->host_lock, flags);
399 if (shost->hostt->target_destroy)
400 shost->hostt->target_destroy(starget);
401 list_del_init(&starget->siblings);
402 spin_unlock_irqrestore(shost->host_lock, flags);
403 put_device(dev);
404}
405
406static void scsi_target_dev_release(struct device *dev)
407{
408 struct device *parent = dev->parent;
409 struct scsi_target *starget = to_scsi_target(dev);
410
411 kfree(starget);
412 put_device(parent);
413}
414
415static struct device_type scsi_target_type = {
416 .name = "scsi_target",
417 .release = scsi_target_dev_release,
418};
419
420int scsi_is_target_device(const struct device *dev)
421{
422 return dev->type == &scsi_target_type;
423}
424EXPORT_SYMBOL(scsi_is_target_device);
425
426static struct scsi_target *__scsi_find_target(struct device *parent,
427 int channel, uint id)
428{
429 struct scsi_target *starget, *found_starget = NULL;
430 struct Scsi_Host *shost = dev_to_shost(parent);
431 /*
432 * Search for an existing target for this sdev.
433 */
434 list_for_each_entry(starget, &shost->__targets, siblings) {
435 if (starget->id == id &&
436 starget->channel == channel) {
437 found_starget = starget;
438 break;
439 }
440 }
441 if (found_starget)
442 get_device(&found_starget->dev);
443
444 return found_starget;
445}
446
447/**
448 * scsi_target_reap_ref_release - remove target from visibility
449 * @kref: the reap_ref in the target being released
450 *
451 * Called on last put of reap_ref, which is the indication that no device
452 * under this target is visible anymore, so render the target invisible in
453 * sysfs. Note: we have to be in user context here because the target reaps
454 * should be done in places where the scsi device visibility is being removed.
455 */
456static void scsi_target_reap_ref_release(struct kref *kref)
457{
458 struct scsi_target *starget
459 = container_of(kref, struct scsi_target, reap_ref);
460
461 /*
462 * if we get here and the target is still in a CREATED state that
463 * means it was allocated but never made visible (because a scan
464 * turned up no LUNs), so don't call device_del() on it.
465 */
466 if ((starget->state != STARGET_CREATED) &&
467 (starget->state != STARGET_CREATED_REMOVE)) {
468 transport_remove_device(&starget->dev);
469 device_del(&starget->dev);
470 }
471 scsi_target_destroy(starget);
472}
473
474static void scsi_target_reap_ref_put(struct scsi_target *starget)
475{
476 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
477}
478
479/**
480 * scsi_alloc_target - allocate a new or find an existing target
481 * @parent: parent of the target (need not be a scsi host)
482 * @channel: target channel number (zero if no channels)
483 * @id: target id number
484 *
485 * Return an existing target if one exists, provided it hasn't already
486 * gone into STARGET_DEL state, otherwise allocate a new target.
487 *
488 * The target is returned with an incremented reference, so the caller
489 * is responsible for both reaping and doing a last put
490 */
491static struct scsi_target *scsi_alloc_target(struct device *parent,
492 int channel, uint id)
493{
494 struct Scsi_Host *shost = dev_to_shost(parent);
495 struct device *dev = NULL;
496 unsigned long flags;
497 const int size = sizeof(struct scsi_target)
498 + shost->transportt->target_size;
499 struct scsi_target *starget;
500 struct scsi_target *found_target;
501 int error, ref_got;
502
503 starget = kzalloc(size, GFP_KERNEL);
504 if (!starget) {
505 printk(KERN_ERR "%s: allocation failure\n", __func__);
506 return NULL;
507 }
508 dev = &starget->dev;
509 device_initialize(dev);
510 kref_init(&starget->reap_ref);
511 dev->parent = get_device(parent);
512 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
513 dev->bus = &scsi_bus_type;
514 dev->type = &scsi_target_type;
515 scsi_enable_async_suspend(dev);
516 starget->id = id;
517 starget->channel = channel;
518 starget->can_queue = 0;
519 INIT_LIST_HEAD(&starget->siblings);
520 INIT_LIST_HEAD(&starget->devices);
521 starget->state = STARGET_CREATED;
522 starget->scsi_level = SCSI_2;
523 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
524 retry:
525 spin_lock_irqsave(shost->host_lock, flags);
526
527 found_target = __scsi_find_target(parent, channel, id);
528 if (found_target)
529 goto found;
530
531 list_add_tail(&starget->siblings, &shost->__targets);
532 spin_unlock_irqrestore(shost->host_lock, flags);
533 /* allocate and add */
534 transport_setup_device(dev);
535 if (shost->hostt->target_alloc) {
536 error = shost->hostt->target_alloc(starget);
537
538 if(error) {
539 if (error != -ENXIO)
540 dev_err(dev, "target allocation failed, error %d\n", error);
541 /* don't want scsi_target_reap to do the final
542 * put because it will be under the host lock */
543 scsi_target_destroy(starget);
544 return NULL;
545 }
546 }
547 get_device(dev);
548
549 return starget;
550
551 found:
552 /*
553 * release routine already fired if kref is zero, so if we can still
554 * take the reference, the target must be alive. If we can't, it must
555 * be dying and we need to wait for a new target
556 */
557 ref_got = kref_get_unless_zero(&found_target->reap_ref);
558
559 spin_unlock_irqrestore(shost->host_lock, flags);
560 if (ref_got) {
561 put_device(dev);
562 return found_target;
563 }
564 /*
565 * Unfortunately, we found a dying target; need to wait until it's
566 * dead before we can get a new one. There is an anomaly here. We
567 * *should* call scsi_target_reap() to balance the kref_get() of the
568 * reap_ref above. However, since the target being released, it's
569 * already invisible and the reap_ref is irrelevant. If we call
570 * scsi_target_reap() we might spuriously do another device_del() on
571 * an already invisible target.
572 */
573 put_device(&found_target->dev);
574 /*
575 * length of time is irrelevant here, we just want to yield the CPU
576 * for a tick to avoid busy waiting for the target to die.
577 */
578 msleep(1);
579 goto retry;
580}
581
582/**
583 * scsi_target_reap - check to see if target is in use and destroy if not
584 * @starget: target to be checked
585 *
586 * This is used after removing a LUN or doing a last put of the target
587 * it checks atomically that nothing is using the target and removes
588 * it if so.
589 */
590void scsi_target_reap(struct scsi_target *starget)
591{
592 /*
593 * serious problem if this triggers: STARGET_DEL is only set in the if
594 * the reap_ref drops to zero, so we're trying to do another final put
595 * on an already released kref
596 */
597 BUG_ON(starget->state == STARGET_DEL);
598 scsi_target_reap_ref_put(starget);
599}
600
601/**
602 * scsi_sanitize_inquiry_string - remove non-graphical chars from an
603 * INQUIRY result string
604 * @s: INQUIRY result string to sanitize
605 * @len: length of the string
606 *
607 * Description:
608 * The SCSI spec says that INQUIRY vendor, product, and revision
609 * strings must consist entirely of graphic ASCII characters,
610 * padded on the right with spaces. Since not all devices obey
611 * this rule, we will replace non-graphic or non-ASCII characters
612 * with spaces. Exception: a NUL character is interpreted as a
613 * string terminator, so all the following characters are set to
614 * spaces.
615 **/
616void scsi_sanitize_inquiry_string(unsigned char *s, int len)
617{
618 int terminated = 0;
619
620 for (; len > 0; (--len, ++s)) {
621 if (*s == 0)
622 terminated = 1;
623 if (terminated || *s < 0x20 || *s > 0x7e)
624 *s = ' ';
625 }
626}
627EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
628
629/**
630 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
631 * @sdev: scsi_device to probe
632 * @inq_result: area to store the INQUIRY result
633 * @result_len: len of inq_result
634 * @bflags: store any bflags found here
635 *
636 * Description:
637 * Probe the lun associated with @req using a standard SCSI INQUIRY;
638 *
639 * If the INQUIRY is successful, zero is returned and the
640 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
641 * are copied to the scsi_device any flags value is stored in *@bflags.
642 **/
643static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
644 int result_len, blist_flags_t *bflags)
645{
646 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
647 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
648 int response_len = 0;
649 int pass, count, result, resid;
650 struct scsi_sense_hdr sshdr;
651 const struct scsi_exec_args exec_args = {
652 .sshdr = &sshdr,
653 .resid = &resid,
654 };
655
656 *bflags = 0;
657
658 /* Perform up to 3 passes. The first pass uses a conservative
659 * transfer length of 36 unless sdev->inquiry_len specifies a
660 * different value. */
661 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
662 try_inquiry_len = first_inquiry_len;
663 pass = 1;
664
665 next_pass:
666 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
667 "scsi scan: INQUIRY pass %d length %d\n",
668 pass, try_inquiry_len));
669
670 /* Each pass gets up to three chances to ignore Unit Attention */
671 for (count = 0; count < 3; ++count) {
672 memset(scsi_cmd, 0, 6);
673 scsi_cmd[0] = INQUIRY;
674 scsi_cmd[4] = (unsigned char) try_inquiry_len;
675
676 memset(inq_result, 0, try_inquiry_len);
677
678 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
679 inq_result, try_inquiry_len,
680 HZ / 2 + HZ * scsi_inq_timeout, 3,
681 &exec_args);
682
683 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
684 "scsi scan: INQUIRY %s with code 0x%x\n",
685 result ? "failed" : "successful", result));
686
687 if (result > 0) {
688 /*
689 * not-ready to ready transition [asc/ascq=0x28/0x0]
690 * or power-on, reset [asc/ascq=0x29/0x0], continue.
691 * INQUIRY should not yield UNIT_ATTENTION
692 * but many buggy devices do so anyway.
693 */
694 if (scsi_status_is_check_condition(result) &&
695 scsi_sense_valid(&sshdr)) {
696 if ((sshdr.sense_key == UNIT_ATTENTION) &&
697 ((sshdr.asc == 0x28) ||
698 (sshdr.asc == 0x29)) &&
699 (sshdr.ascq == 0))
700 continue;
701 }
702 } else if (result == 0) {
703 /*
704 * if nothing was transferred, we try
705 * again. It's a workaround for some USB
706 * devices.
707 */
708 if (resid == try_inquiry_len)
709 continue;
710 }
711 break;
712 }
713
714 if (result == 0) {
715 scsi_sanitize_inquiry_string(&inq_result[8], 8);
716 scsi_sanitize_inquiry_string(&inq_result[16], 16);
717 scsi_sanitize_inquiry_string(&inq_result[32], 4);
718
719 response_len = inq_result[4] + 5;
720 if (response_len > 255)
721 response_len = first_inquiry_len; /* sanity */
722
723 /*
724 * Get any flags for this device.
725 *
726 * XXX add a bflags to scsi_device, and replace the
727 * corresponding bit fields in scsi_device, so bflags
728 * need not be passed as an argument.
729 */
730 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
731 &inq_result[16]);
732
733 /* When the first pass succeeds we gain information about
734 * what larger transfer lengths might work. */
735 if (pass == 1) {
736 if (BLIST_INQUIRY_36 & *bflags)
737 next_inquiry_len = 36;
738 /*
739 * LLD specified a maximum sdev->inquiry_len
740 * but device claims it has more data. Capping
741 * the length only makes sense for legacy
742 * devices. If a device supports SPC-4 (2014)
743 * or newer, assume that it is safe to ask for
744 * as much as the device says it supports.
745 */
746 else if (sdev->inquiry_len &&
747 response_len > sdev->inquiry_len &&
748 (inq_result[2] & 0x7) < 6) /* SPC-4 */
749 next_inquiry_len = sdev->inquiry_len;
750 else
751 next_inquiry_len = response_len;
752
753 /* If more data is available perform the second pass */
754 if (next_inquiry_len > try_inquiry_len) {
755 try_inquiry_len = next_inquiry_len;
756 pass = 2;
757 goto next_pass;
758 }
759 }
760
761 } else if (pass == 2) {
762 sdev_printk(KERN_INFO, sdev,
763 "scsi scan: %d byte inquiry failed. "
764 "Consider BLIST_INQUIRY_36 for this device\n",
765 try_inquiry_len);
766
767 /* If this pass failed, the third pass goes back and transfers
768 * the same amount as we successfully got in the first pass. */
769 try_inquiry_len = first_inquiry_len;
770 pass = 3;
771 goto next_pass;
772 }
773
774 /* If the last transfer attempt got an error, assume the
775 * peripheral doesn't exist or is dead. */
776 if (result)
777 return -EIO;
778
779 /* Don't report any more data than the device says is valid */
780 sdev->inquiry_len = min(try_inquiry_len, response_len);
781
782 /*
783 * XXX Abort if the response length is less than 36? If less than
784 * 32, the lookup of the device flags (above) could be invalid,
785 * and it would be possible to take an incorrect action - we do
786 * not want to hang because of a short INQUIRY. On the flip side,
787 * if the device is spun down or becoming ready (and so it gives a
788 * short INQUIRY), an abort here prevents any further use of the
789 * device, including spin up.
790 *
791 * On the whole, the best approach seems to be to assume the first
792 * 36 bytes are valid no matter what the device says. That's
793 * better than copying < 36 bytes to the inquiry-result buffer
794 * and displaying garbage for the Vendor, Product, or Revision
795 * strings.
796 */
797 if (sdev->inquiry_len < 36) {
798 if (!sdev->host->short_inquiry) {
799 shost_printk(KERN_INFO, sdev->host,
800 "scsi scan: INQUIRY result too short (%d),"
801 " using 36\n", sdev->inquiry_len);
802 sdev->host->short_inquiry = 1;
803 }
804 sdev->inquiry_len = 36;
805 }
806
807 /*
808 * Related to the above issue:
809 *
810 * XXX Devices (disk or all?) should be sent a TEST UNIT READY,
811 * and if not ready, sent a START_STOP to start (maybe spin up) and
812 * then send the INQUIRY again, since the INQUIRY can change after
813 * a device is initialized.
814 *
815 * Ideally, start a device if explicitly asked to do so. This
816 * assumes that a device is spun up on power on, spun down on
817 * request, and then spun up on request.
818 */
819
820 /*
821 * The scanning code needs to know the scsi_level, even if no
822 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
823 * non-zero LUNs can be scanned.
824 */
825 sdev->scsi_level = inq_result[2] & 0x0f;
826 if (sdev->scsi_level >= 2 ||
827 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
828 sdev->scsi_level++;
829 sdev->sdev_target->scsi_level = sdev->scsi_level;
830
831 /*
832 * If SCSI-2 or lower, and if the transport requires it,
833 * store the LUN value in CDB[1].
834 */
835 sdev->lun_in_cdb = 0;
836 if (sdev->scsi_level <= SCSI_2 &&
837 sdev->scsi_level != SCSI_UNKNOWN &&
838 !sdev->host->no_scsi2_lun_in_cdb)
839 sdev->lun_in_cdb = 1;
840
841 return 0;
842}
843
844/**
845 * scsi_add_lun - allocate and fully initialze a scsi_device
846 * @sdev: holds information to be stored in the new scsi_device
847 * @inq_result: holds the result of a previous INQUIRY to the LUN
848 * @bflags: black/white list flag
849 * @async: 1 if this device is being scanned asynchronously
850 *
851 * Description:
852 * Initialize the scsi_device @sdev. Optionally set fields based
853 * on values in *@bflags.
854 *
855 * Return:
856 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
857 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
858 **/
859static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
860 blist_flags_t *bflags, int async)
861{
862 int ret;
863
864 /*
865 * XXX do not save the inquiry, since it can change underneath us,
866 * save just vendor/model/rev.
867 *
868 * Rather than save it and have an ioctl that retrieves the saved
869 * value, have an ioctl that executes the same INQUIRY code used
870 * in scsi_probe_lun, let user level programs doing INQUIRY
871 * scanning run at their own risk, or supply a user level program
872 * that can correctly scan.
873 */
874
875 /*
876 * Copy at least 36 bytes of INQUIRY data, so that we don't
877 * dereference unallocated memory when accessing the Vendor,
878 * Product, and Revision strings. Badly behaved devices may set
879 * the INQUIRY Additional Length byte to a small value, indicating
880 * these strings are invalid, but often they contain plausible data
881 * nonetheless. It doesn't matter if the device sent < 36 bytes
882 * total, since scsi_probe_lun() initializes inq_result with 0s.
883 */
884 sdev->inquiry = kmemdup(inq_result,
885 max_t(size_t, sdev->inquiry_len, 36),
886 GFP_KERNEL);
887 if (sdev->inquiry == NULL)
888 return SCSI_SCAN_NO_RESPONSE;
889
890 sdev->vendor = (char *) (sdev->inquiry + 8);
891 sdev->model = (char *) (sdev->inquiry + 16);
892 sdev->rev = (char *) (sdev->inquiry + 32);
893
894 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
895 /*
896 * sata emulation layer device. This is a hack to work around
897 * the SATL power management specifications which state that
898 * when the SATL detects the device has gone into standby
899 * mode, it shall respond with NOT READY.
900 */
901 sdev->allow_restart = 1;
902 }
903
904 if (*bflags & BLIST_ISROM) {
905 sdev->type = TYPE_ROM;
906 sdev->removable = 1;
907 } else {
908 sdev->type = (inq_result[0] & 0x1f);
909 sdev->removable = (inq_result[1] & 0x80) >> 7;
910
911 /*
912 * some devices may respond with wrong type for
913 * well-known logical units. Force well-known type
914 * to enumerate them correctly.
915 */
916 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
917 sdev_printk(KERN_WARNING, sdev,
918 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
919 __func__, sdev->type, (unsigned int)sdev->lun);
920 sdev->type = TYPE_WLUN;
921 }
922
923 }
924
925 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
926 /* RBC and MMC devices can return SCSI-3 compliance and yet
927 * still not support REPORT LUNS, so make them act as
928 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
929 * specifically set */
930 if ((*bflags & BLIST_REPORTLUN2) == 0)
931 *bflags |= BLIST_NOREPORTLUN;
932 }
933
934 /*
935 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
936 * spec says: The device server is capable of supporting the
937 * specified peripheral device type on this logical unit. However,
938 * the physical device is not currently connected to this logical
939 * unit.
940 *
941 * The above is vague, as it implies that we could treat 001 and
942 * 011 the same. Stay compatible with previous code, and create a
943 * scsi_device for a PQ of 1
944 *
945 * Don't set the device offline here; rather let the upper
946 * level drivers eval the PQ to decide whether they should
947 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check.
948 */
949
950 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
951 sdev->lockable = sdev->removable;
952 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
953
954 if (sdev->scsi_level >= SCSI_3 ||
955 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
956 sdev->ppr = 1;
957 if (inq_result[7] & 0x60)
958 sdev->wdtr = 1;
959 if (inq_result[7] & 0x10)
960 sdev->sdtr = 1;
961
962 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
963 "ANSI: %d%s\n", scsi_device_type(sdev->type),
964 sdev->vendor, sdev->model, sdev->rev,
965 sdev->inq_periph_qual, inq_result[2] & 0x07,
966 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
967
968 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
969 !(*bflags & BLIST_NOTQ)) {
970 sdev->tagged_supported = 1;
971 sdev->simple_tags = 1;
972 }
973
974 /*
975 * Some devices (Texel CD ROM drives) have handshaking problems
976 * when used with the Seagate controllers. borken is initialized
977 * to 1, and then set it to 0 here.
978 */
979 if ((*bflags & BLIST_BORKEN) == 0)
980 sdev->borken = 0;
981
982 if (*bflags & BLIST_NO_ULD_ATTACH)
983 sdev->no_uld_attach = 1;
984
985 /*
986 * Apparently some really broken devices (contrary to the SCSI
987 * standards) need to be selected without asserting ATN
988 */
989 if (*bflags & BLIST_SELECT_NO_ATN)
990 sdev->select_no_atn = 1;
991
992 /*
993 * Maximum 512 sector transfer length
994 * broken RA4x00 Compaq Disk Array
995 */
996 if (*bflags & BLIST_MAX_512)
997 blk_queue_max_hw_sectors(sdev->request_queue, 512);
998 /*
999 * Max 1024 sector transfer length for targets that report incorrect
1000 * max/optimal lengths and relied on the old block layer safe default
1001 */
1002 else if (*bflags & BLIST_MAX_1024)
1003 blk_queue_max_hw_sectors(sdev->request_queue, 1024);
1004
1005 /*
1006 * Some devices may not want to have a start command automatically
1007 * issued when a device is added.
1008 */
1009 if (*bflags & BLIST_NOSTARTONADD)
1010 sdev->no_start_on_add = 1;
1011
1012 if (*bflags & BLIST_SINGLELUN)
1013 scsi_target(sdev)->single_lun = 1;
1014
1015 sdev->use_10_for_rw = 1;
1016
1017 /* some devices don't like REPORT SUPPORTED OPERATION CODES
1018 * and will simply timeout causing sd_mod init to take a very
1019 * very long time */
1020 if (*bflags & BLIST_NO_RSOC)
1021 sdev->no_report_opcodes = 1;
1022
1023 /* set the device running here so that slave configure
1024 * may do I/O */
1025 mutex_lock(&sdev->state_mutex);
1026 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
1027 if (ret)
1028 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
1029 mutex_unlock(&sdev->state_mutex);
1030
1031 if (ret) {
1032 sdev_printk(KERN_ERR, sdev,
1033 "in wrong state %s to complete scan\n",
1034 scsi_device_state_name(sdev->sdev_state));
1035 return SCSI_SCAN_NO_RESPONSE;
1036 }
1037
1038 if (*bflags & BLIST_NOT_LOCKABLE)
1039 sdev->lockable = 0;
1040
1041 if (*bflags & BLIST_RETRY_HWERROR)
1042 sdev->retry_hwerror = 1;
1043
1044 if (*bflags & BLIST_NO_DIF)
1045 sdev->no_dif = 1;
1046
1047 if (*bflags & BLIST_UNMAP_LIMIT_WS)
1048 sdev->unmap_limit_for_ws = 1;
1049
1050 if (*bflags & BLIST_IGN_MEDIA_CHANGE)
1051 sdev->ignore_media_change = 1;
1052
1053 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
1054
1055 if (*bflags & BLIST_TRY_VPD_PAGES)
1056 sdev->try_vpd_pages = 1;
1057 else if (*bflags & BLIST_SKIP_VPD_PAGES)
1058 sdev->skip_vpd_pages = 1;
1059
1060 if (*bflags & BLIST_NO_VPD_SIZE)
1061 sdev->no_vpd_size = 1;
1062
1063 transport_configure_device(&sdev->sdev_gendev);
1064
1065 if (sdev->host->hostt->slave_configure) {
1066 ret = sdev->host->hostt->slave_configure(sdev);
1067 if (ret) {
1068 /*
1069 * if LLDD reports slave not present, don't clutter
1070 * console with alloc failure messages
1071 */
1072 if (ret != -ENXIO) {
1073 sdev_printk(KERN_ERR, sdev,
1074 "failed to configure device\n");
1075 }
1076 return SCSI_SCAN_NO_RESPONSE;
1077 }
1078
1079 /*
1080 * The queue_depth is often changed in ->slave_configure.
1081 * Set up budget map again since memory consumption of
1082 * the map depends on actual queue depth.
1083 */
1084 scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
1085 }
1086
1087 if (sdev->scsi_level >= SCSI_3)
1088 scsi_attach_vpd(sdev);
1089
1090 scsi_cdl_check(sdev);
1091
1092 sdev->max_queue_depth = sdev->queue_depth;
1093 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1094 sdev->sdev_bflags = *bflags;
1095
1096 /*
1097 * Ok, the device is now all set up, we can
1098 * register it and tell the rest of the kernel
1099 * about it.
1100 */
1101 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1102 return SCSI_SCAN_NO_RESPONSE;
1103
1104 return SCSI_SCAN_LUN_PRESENT;
1105}
1106
1107#ifdef CONFIG_SCSI_LOGGING
1108/**
1109 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
1110 * @buf: Output buffer with at least end-first+1 bytes of space
1111 * @inq: Inquiry buffer (input)
1112 * @first: Offset of string into inq
1113 * @end: Index after last character in inq
1114 */
1115static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1116 unsigned first, unsigned end)
1117{
1118 unsigned term = 0, idx;
1119
1120 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1121 if (inq[idx+first] > ' ') {
1122 buf[idx] = inq[idx+first];
1123 term = idx+1;
1124 } else {
1125 buf[idx] = ' ';
1126 }
1127 }
1128 buf[term] = 0;
1129 return buf;
1130}
1131#endif
1132
1133/**
1134 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
1135 * @starget: pointer to target device structure
1136 * @lun: LUN of target device
1137 * @bflagsp: store bflags here if not NULL
1138 * @sdevp: probe the LUN corresponding to this scsi_device
1139 * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only
1140 * needed on first scan
1141 * @hostdata: passed to scsi_alloc_sdev()
1142 *
1143 * Description:
1144 * Call scsi_probe_lun, if a LUN with an attached device is found,
1145 * allocate and set it up by calling scsi_add_lun.
1146 *
1147 * Return:
1148 *
1149 * - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
1150 * - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
1151 * attached at the LUN
1152 * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1153 **/
1154static int scsi_probe_and_add_lun(struct scsi_target *starget,
1155 u64 lun, blist_flags_t *bflagsp,
1156 struct scsi_device **sdevp,
1157 enum scsi_scan_mode rescan,
1158 void *hostdata)
1159{
1160 struct scsi_device *sdev;
1161 unsigned char *result;
1162 blist_flags_t bflags;
1163 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1164 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1165
1166 /*
1167 * The rescan flag is used as an optimization, the first scan of a
1168 * host adapter calls into here with rescan == 0.
1169 */
1170 sdev = scsi_device_lookup_by_target(starget, lun);
1171 if (sdev) {
1172 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1173 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1174 "scsi scan: device exists on %s\n",
1175 dev_name(&sdev->sdev_gendev)));
1176 if (sdevp)
1177 *sdevp = sdev;
1178 else
1179 scsi_device_put(sdev);
1180
1181 if (bflagsp)
1182 *bflagsp = scsi_get_device_flags(sdev,
1183 sdev->vendor,
1184 sdev->model);
1185 return SCSI_SCAN_LUN_PRESENT;
1186 }
1187 scsi_device_put(sdev);
1188 } else
1189 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1190 if (!sdev)
1191 goto out;
1192
1193 result = kmalloc(result_len, GFP_KERNEL);
1194 if (!result)
1195 goto out_free_sdev;
1196
1197 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1198 goto out_free_result;
1199
1200 if (bflagsp)
1201 *bflagsp = bflags;
1202 /*
1203 * result contains valid SCSI INQUIRY data.
1204 */
1205 if ((result[0] >> 5) == 3) {
1206 /*
1207 * For a Peripheral qualifier 3 (011b), the SCSI
1208 * spec says: The device server is not capable of
1209 * supporting a physical device on this logical
1210 * unit.
1211 *
1212 * For disks, this implies that there is no
1213 * logical disk configured at sdev->lun, but there
1214 * is a target id responding.
1215 */
1216 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1217 " peripheral qualifier of 3, device not"
1218 " added\n"))
1219 if (lun == 0) {
1220 SCSI_LOG_SCAN_BUS(1, {
1221 unsigned char vend[9];
1222 unsigned char mod[17];
1223
1224 sdev_printk(KERN_INFO, sdev,
1225 "scsi scan: consider passing scsi_mod."
1226 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1227 scsi_inq_str(vend, result, 8, 16),
1228 scsi_inq_str(mod, result, 16, 32));
1229 });
1230
1231 }
1232
1233 res = SCSI_SCAN_TARGET_PRESENT;
1234 goto out_free_result;
1235 }
1236
1237 /*
1238 * Some targets may set slight variations of PQ and PDT to signal
1239 * that no LUN is present, so don't add sdev in these cases.
1240 * Two specific examples are:
1241 * 1) NetApp targets: return PQ=1, PDT=0x1f
1242 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
1243 * in the UFI 1.0 spec (we cannot rely on reserved bits).
1244 *
1245 * References:
1246 * 1) SCSI SPC-3, pp. 145-146
1247 * PQ=1: "A peripheral device having the specified peripheral
1248 * device type is not connected to this logical unit. However, the
1249 * device server is capable of supporting the specified peripheral
1250 * device type on this logical unit."
1251 * PDT=0x1f: "Unknown or no device type"
1252 * 2) USB UFI 1.0, p. 20
1253 * PDT=00h Direct-access device (floppy)
1254 * PDT=1Fh none (no FDD connected to the requested logical unit)
1255 */
1256 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1257 (result[0] & 0x1f) == 0x1f &&
1258 !scsi_is_wlun(lun)) {
1259 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1260 "scsi scan: peripheral device type"
1261 " of 31, no device added\n"));
1262 res = SCSI_SCAN_TARGET_PRESENT;
1263 goto out_free_result;
1264 }
1265
1266 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1267 if (res == SCSI_SCAN_LUN_PRESENT) {
1268 if (bflags & BLIST_KEY) {
1269 sdev->lockable = 0;
1270 scsi_unlock_floptical(sdev, result);
1271 }
1272 }
1273
1274 out_free_result:
1275 kfree(result);
1276 out_free_sdev:
1277 if (res == SCSI_SCAN_LUN_PRESENT) {
1278 if (sdevp) {
1279 if (scsi_device_get(sdev) == 0) {
1280 *sdevp = sdev;
1281 } else {
1282 __scsi_remove_device(sdev);
1283 res = SCSI_SCAN_NO_RESPONSE;
1284 }
1285 }
1286 } else
1287 __scsi_remove_device(sdev);
1288 out:
1289 return res;
1290}
1291
1292/**
1293 * scsi_sequential_lun_scan - sequentially scan a SCSI target
1294 * @starget: pointer to target structure to scan
1295 * @bflags: black/white list flag for LUN 0
1296 * @scsi_level: Which version of the standard does this device adhere to
1297 * @rescan: passed to scsi_probe_add_lun()
1298 *
1299 * Description:
1300 * Generally, scan from LUN 1 (LUN 0 is assumed to already have been
1301 * scanned) to some maximum lun until a LUN is found with no device
1302 * attached. Use the bflags to figure out any oddities.
1303 *
1304 * Modifies sdevscan->lun.
1305 **/
1306static void scsi_sequential_lun_scan(struct scsi_target *starget,
1307 blist_flags_t bflags, int scsi_level,
1308 enum scsi_scan_mode rescan)
1309{
1310 uint max_dev_lun;
1311 u64 sparse_lun, lun;
1312 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1313
1314 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1315 "scsi scan: Sequential scan\n"));
1316
1317 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1318 /*
1319 * If this device is known to support sparse multiple units,
1320 * override the other settings, and scan all of them. Normally,
1321 * SCSI-3 devices should be scanned via the REPORT LUNS.
1322 */
1323 if (bflags & BLIST_SPARSELUN) {
1324 max_dev_lun = shost->max_lun;
1325 sparse_lun = 1;
1326 } else
1327 sparse_lun = 0;
1328
1329 /*
1330 * If less than SCSI_1_CCS, and no special lun scanning, stop
1331 * scanning; this matches 2.4 behaviour, but could just be a bug
1332 * (to continue scanning a SCSI_1_CCS device).
1333 *
1334 * This test is broken. We might not have any device on lun0 for
1335 * a sparselun device, and if that's the case then how would we
1336 * know the real scsi_level, eh? It might make sense to just not
1337 * scan any SCSI_1 device for non-0 luns, but that check would best
1338 * go into scsi_alloc_sdev() and just have it return null when asked
1339 * to alloc an sdev for lun > 0 on an already found SCSI_1 device.
1340 *
1341 if ((sdevscan->scsi_level < SCSI_1_CCS) &&
1342 ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
1343 == 0))
1344 return;
1345 */
1346 /*
1347 * If this device is known to support multiple units, override
1348 * the other settings, and scan all of them.
1349 */
1350 if (bflags & BLIST_FORCELUN)
1351 max_dev_lun = shost->max_lun;
1352 /*
1353 * REGAL CDC-4X: avoid hang after LUN 4
1354 */
1355 if (bflags & BLIST_MAX5LUN)
1356 max_dev_lun = min(5U, max_dev_lun);
1357 /*
1358 * Do not scan SCSI-2 or lower device past LUN 7, unless
1359 * BLIST_LARGELUN.
1360 */
1361 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1362 max_dev_lun = min(8U, max_dev_lun);
1363 else
1364 max_dev_lun = min(256U, max_dev_lun);
1365
1366 /*
1367 * We have already scanned LUN 0, so start at LUN 1. Keep scanning
1368 * until we reach the max, or no LUN is found and we are not
1369 * sparse_lun.
1370 */
1371 for (lun = 1; lun < max_dev_lun; ++lun)
1372 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1373 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1374 !sparse_lun)
1375 return;
1376}
1377
1378/**
1379 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1380 * @starget: which target
1381 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
1382 * @rescan: nonzero if we can skip code only needed on first scan
1383 *
1384 * Description:
1385 * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1386 * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
1387 *
1388 * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
1389 * LUNs even if it's older than SCSI-3.
1390 * If BLIST_NOREPORTLUN is set, return 1 always.
1391 * If BLIST_NOLUN is set, return 0 always.
1392 * If starget->no_report_luns is set, return 1 always.
1393 *
1394 * Return:
1395 * 0: scan completed (or no memory, so further scanning is futile)
1396 * 1: could not scan with REPORT LUN
1397 **/
1398static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1399 enum scsi_scan_mode rescan)
1400{
1401 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1402 unsigned int length;
1403 u64 lun;
1404 unsigned int num_luns;
1405 unsigned int retries;
1406 int result;
1407 struct scsi_lun *lunp, *lun_data;
1408 struct scsi_sense_hdr sshdr;
1409 struct scsi_device *sdev;
1410 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1411 const struct scsi_exec_args exec_args = {
1412 .sshdr = &sshdr,
1413 };
1414 int ret = 0;
1415
1416 /*
1417 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
1418 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
1419 * support more than 8 LUNs.
1420 * Don't attempt if the target doesn't support REPORT LUNS.
1421 */
1422 if (bflags & BLIST_NOREPORTLUN)
1423 return 1;
1424 if (starget->scsi_level < SCSI_2 &&
1425 starget->scsi_level != SCSI_UNKNOWN)
1426 return 1;
1427 if (starget->scsi_level < SCSI_3 &&
1428 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1429 return 1;
1430 if (bflags & BLIST_NOLUN)
1431 return 0;
1432 if (starget->no_report_luns)
1433 return 1;
1434
1435 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1436 sdev = scsi_alloc_sdev(starget, 0, NULL);
1437 if (!sdev)
1438 return 0;
1439 if (scsi_device_get(sdev)) {
1440 __scsi_remove_device(sdev);
1441 return 0;
1442 }
1443 }
1444
1445 /*
1446 * Allocate enough to hold the header (the same size as one scsi_lun)
1447 * plus the number of luns we are requesting. 511 was the default
1448 * value of the now removed max_report_luns parameter.
1449 */
1450 length = (511 + 1) * sizeof(struct scsi_lun);
1451retry:
1452 lun_data = kmalloc(length, GFP_KERNEL);
1453 if (!lun_data) {
1454 printk(ALLOC_FAILURE_MSG, __func__);
1455 goto out;
1456 }
1457
1458 scsi_cmd[0] = REPORT_LUNS;
1459
1460 /*
1461 * bytes 1 - 5: reserved, set to zero.
1462 */
1463 memset(&scsi_cmd[1], 0, 5);
1464
1465 /*
1466 * bytes 6 - 9: length of the command.
1467 */
1468 put_unaligned_be32(length, &scsi_cmd[6]);
1469
1470 scsi_cmd[10] = 0; /* reserved */
1471 scsi_cmd[11] = 0; /* control */
1472
1473 /*
1474 * We can get a UNIT ATTENTION, for example a power on/reset, so
1475 * retry a few times (like sd.c does for TEST UNIT READY).
1476 * Experience shows some combinations of adapter/devices get at
1477 * least two power on/resets.
1478 *
1479 * Illegal requests (for devices that do not support REPORT LUNS)
1480 * should come through as a check condition, and will not generate
1481 * a retry.
1482 */
1483 for (retries = 0; retries < 3; retries++) {
1484 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1485 "scsi scan: Sending REPORT LUNS to (try %d)\n",
1486 retries));
1487
1488 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
1489 lun_data, length,
1490 SCSI_REPORT_LUNS_TIMEOUT, 3,
1491 &exec_args);
1492
1493 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1494 "scsi scan: REPORT LUNS"
1495 " %s (try %d) result 0x%x\n",
1496 result ? "failed" : "successful",
1497 retries, result));
1498 if (result == 0)
1499 break;
1500 else if (scsi_sense_valid(&sshdr)) {
1501 if (sshdr.sense_key != UNIT_ATTENTION)
1502 break;
1503 }
1504 }
1505
1506 if (result) {
1507 /*
1508 * The device probably does not support a REPORT LUN command
1509 */
1510 ret = 1;
1511 goto out_err;
1512 }
1513
1514 /*
1515 * Get the length from the first four bytes of lun_data.
1516 */
1517 if (get_unaligned_be32(lun_data->scsi_lun) +
1518 sizeof(struct scsi_lun) > length) {
1519 length = get_unaligned_be32(lun_data->scsi_lun) +
1520 sizeof(struct scsi_lun);
1521 kfree(lun_data);
1522 goto retry;
1523 }
1524 length = get_unaligned_be32(lun_data->scsi_lun);
1525
1526 num_luns = (length / sizeof(struct scsi_lun));
1527
1528 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1529 "scsi scan: REPORT LUN scan\n"));
1530
1531 /*
1532 * Scan the luns in lun_data. The entry at offset 0 is really
1533 * the header, so start at 1 and go up to and including num_luns.
1534 */
1535 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1536 lun = scsilun_to_int(lunp);
1537
1538 if (lun > sdev->host->max_lun) {
1539 sdev_printk(KERN_WARNING, sdev,
1540 "lun%llu has a LUN larger than"
1541 " allowed by the host adapter\n", lun);
1542 } else {
1543 int res;
1544
1545 res = scsi_probe_and_add_lun(starget,
1546 lun, NULL, NULL, rescan, NULL);
1547 if (res == SCSI_SCAN_NO_RESPONSE) {
1548 /*
1549 * Got some results, but now none, abort.
1550 */
1551 sdev_printk(KERN_ERR, sdev,
1552 "Unexpected response"
1553 " from lun %llu while scanning, scan"
1554 " aborted\n", (unsigned long long)lun);
1555 break;
1556 }
1557 }
1558 }
1559
1560 out_err:
1561 kfree(lun_data);
1562 out:
1563 if (scsi_device_created(sdev))
1564 /*
1565 * the sdev we used didn't appear in the report luns scan
1566 */
1567 __scsi_remove_device(sdev);
1568 scsi_device_put(sdev);
1569 return ret;
1570}
1571
1572struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1573 uint id, u64 lun, void *hostdata)
1574{
1575 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1576 struct device *parent = &shost->shost_gendev;
1577 struct scsi_target *starget;
1578
1579 if (strncmp(scsi_scan_type, "none", 4) == 0)
1580 return ERR_PTR(-ENODEV);
1581
1582 starget = scsi_alloc_target(parent, channel, id);
1583 if (!starget)
1584 return ERR_PTR(-ENOMEM);
1585 scsi_autopm_get_target(starget);
1586
1587 mutex_lock(&shost->scan_mutex);
1588 if (!shost->async_scan)
1589 scsi_complete_async_scans();
1590
1591 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1592 scsi_probe_and_add_lun(starget, lun, NULL, &sdev,
1593 SCSI_SCAN_RESCAN, hostdata);
1594 scsi_autopm_put_host(shost);
1595 }
1596 mutex_unlock(&shost->scan_mutex);
1597 scsi_autopm_put_target(starget);
1598 /*
1599 * paired with scsi_alloc_target(). Target will be destroyed unless
1600 * scsi_probe_and_add_lun made an underlying device visible
1601 */
1602 scsi_target_reap(starget);
1603 put_device(&starget->dev);
1604
1605 return sdev;
1606}
1607EXPORT_SYMBOL(__scsi_add_device);
1608
1609int scsi_add_device(struct Scsi_Host *host, uint channel,
1610 uint target, u64 lun)
1611{
1612 struct scsi_device *sdev =
1613 __scsi_add_device(host, channel, target, lun, NULL);
1614 if (IS_ERR(sdev))
1615 return PTR_ERR(sdev);
1616
1617 scsi_device_put(sdev);
1618 return 0;
1619}
1620EXPORT_SYMBOL(scsi_add_device);
1621
1622int scsi_rescan_device(struct scsi_device *sdev)
1623{
1624 struct device *dev = &sdev->sdev_gendev;
1625 int ret = 0;
1626
1627 device_lock(dev);
1628
1629 /*
1630 * Bail out if the device or its queue are not running. Otherwise,
1631 * the rescan may block waiting for commands to be executed, with us
1632 * holding the device lock. This can result in a potential deadlock
1633 * in the power management core code when system resume is on-going.
1634 */
1635 if (sdev->sdev_state != SDEV_RUNNING ||
1636 blk_queue_pm_only(sdev->request_queue)) {
1637 ret = -EWOULDBLOCK;
1638 goto unlock;
1639 }
1640
1641 scsi_attach_vpd(sdev);
1642 scsi_cdl_check(sdev);
1643
1644 if (sdev->handler && sdev->handler->rescan)
1645 sdev->handler->rescan(sdev);
1646
1647 if (dev->driver && try_module_get(dev->driver->owner)) {
1648 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1649
1650 if (drv->rescan)
1651 drv->rescan(dev);
1652 module_put(dev->driver->owner);
1653 }
1654
1655unlock:
1656 device_unlock(dev);
1657
1658 return ret;
1659}
1660EXPORT_SYMBOL(scsi_rescan_device);
1661
1662static void __scsi_scan_target(struct device *parent, unsigned int channel,
1663 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1664{
1665 struct Scsi_Host *shost = dev_to_shost(parent);
1666 blist_flags_t bflags = 0;
1667 int res;
1668 struct scsi_target *starget;
1669
1670 if (shost->this_id == id)
1671 /*
1672 * Don't scan the host adapter
1673 */
1674 return;
1675
1676 starget = scsi_alloc_target(parent, channel, id);
1677 if (!starget)
1678 return;
1679 scsi_autopm_get_target(starget);
1680
1681 if (lun != SCAN_WILD_CARD) {
1682 /*
1683 * Scan for a specific host/chan/id/lun.
1684 */
1685 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1686 goto out_reap;
1687 }
1688
1689 /*
1690 * Scan LUN 0, if there is some response, scan further. Ideally, we
1691 * would not configure LUN 0 until all LUNs are scanned.
1692 */
1693 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1694 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1695 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1696 /*
1697 * The REPORT LUN did not scan the target,
1698 * do a sequential scan.
1699 */
1700 scsi_sequential_lun_scan(starget, bflags,
1701 starget->scsi_level, rescan);
1702 }
1703
1704 out_reap:
1705 scsi_autopm_put_target(starget);
1706 /*
1707 * paired with scsi_alloc_target(): determine if the target has
1708 * any children at all and if not, nuke it
1709 */
1710 scsi_target_reap(starget);
1711
1712 put_device(&starget->dev);
1713}
1714
1715/**
1716 * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1717 * @parent: host to scan
1718 * @channel: channel to scan
1719 * @id: target id to scan
1720 * @lun: Specific LUN to scan or SCAN_WILD_CARD
1721 * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for
1722 * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs,
1723 * and SCSI_SCAN_MANUAL to force scanning even if
1724 * 'scan=manual' is set.
1725 *
1726 * Description:
1727 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
1728 * and possibly all LUNs on the target id.
1729 *
1730 * First try a REPORT LUN scan, if that does not scan the target, do a
1731 * sequential scan of LUNs on the target id.
1732 **/
1733void scsi_scan_target(struct device *parent, unsigned int channel,
1734 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1735{
1736 struct Scsi_Host *shost = dev_to_shost(parent);
1737
1738 if (strncmp(scsi_scan_type, "none", 4) == 0)
1739 return;
1740
1741 if (rescan != SCSI_SCAN_MANUAL &&
1742 strncmp(scsi_scan_type, "manual", 6) == 0)
1743 return;
1744
1745 mutex_lock(&shost->scan_mutex);
1746 if (!shost->async_scan)
1747 scsi_complete_async_scans();
1748
1749 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1750 __scsi_scan_target(parent, channel, id, lun, rescan);
1751 scsi_autopm_put_host(shost);
1752 }
1753 mutex_unlock(&shost->scan_mutex);
1754}
1755EXPORT_SYMBOL(scsi_scan_target);
1756
1757static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1758 unsigned int id, u64 lun,
1759 enum scsi_scan_mode rescan)
1760{
1761 uint order_id;
1762
1763 if (id == SCAN_WILD_CARD)
1764 for (id = 0; id < shost->max_id; ++id) {
1765 /*
1766 * XXX adapter drivers when possible (FCP, iSCSI)
1767 * could modify max_id to match the current max,
1768 * not the absolute max.
1769 *
1770 * XXX add a shost id iterator, so for example,
1771 * the FC ID can be the same as a target id
1772 * without a huge overhead of sparse id's.
1773 */
1774 if (shost->reverse_ordering)
1775 /*
1776 * Scan from high to low id.
1777 */
1778 order_id = shost->max_id - id - 1;
1779 else
1780 order_id = id;
1781 __scsi_scan_target(&shost->shost_gendev, channel,
1782 order_id, lun, rescan);
1783 }
1784 else
1785 __scsi_scan_target(&shost->shost_gendev, channel,
1786 id, lun, rescan);
1787}
1788
1789int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1790 unsigned int id, u64 lun,
1791 enum scsi_scan_mode rescan)
1792{
1793 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1794 "%s: <%u:%u:%llu>\n",
1795 __func__, channel, id, lun));
1796
1797 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1798 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1799 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1800 return -EINVAL;
1801
1802 mutex_lock(&shost->scan_mutex);
1803 if (!shost->async_scan)
1804 scsi_complete_async_scans();
1805
1806 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1807 if (channel == SCAN_WILD_CARD)
1808 for (channel = 0; channel <= shost->max_channel;
1809 channel++)
1810 scsi_scan_channel(shost, channel, id, lun,
1811 rescan);
1812 else
1813 scsi_scan_channel(shost, channel, id, lun, rescan);
1814 scsi_autopm_put_host(shost);
1815 }
1816 mutex_unlock(&shost->scan_mutex);
1817
1818 return 0;
1819}
1820
1821static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1822{
1823 struct scsi_device *sdev;
1824 shost_for_each_device(sdev, shost) {
1825 /* target removed before the device could be added */
1826 if (sdev->sdev_state == SDEV_DEL)
1827 continue;
1828 /* If device is already visible, skip adding it to sysfs */
1829 if (sdev->is_visible)
1830 continue;
1831 if (!scsi_host_scan_allowed(shost) ||
1832 scsi_sysfs_add_sdev(sdev) != 0)
1833 __scsi_remove_device(sdev);
1834 }
1835}
1836
1837/**
1838 * scsi_prep_async_scan - prepare for an async scan
1839 * @shost: the host which will be scanned
1840 * Returns: a cookie to be passed to scsi_finish_async_scan()
1841 *
1842 * Tells the midlayer this host is going to do an asynchronous scan.
1843 * It reserves the host's position in the scanning list and ensures
1844 * that other asynchronous scans started after this one won't affect the
1845 * ordering of the discovered devices.
1846 */
1847static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1848{
1849 struct async_scan_data *data = NULL;
1850 unsigned long flags;
1851
1852 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1853 return NULL;
1854
1855 mutex_lock(&shost->scan_mutex);
1856 if (shost->async_scan) {
1857 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1858 goto err;
1859 }
1860
1861 data = kmalloc(sizeof(*data), GFP_KERNEL);
1862 if (!data)
1863 goto err;
1864 data->shost = scsi_host_get(shost);
1865 if (!data->shost)
1866 goto err;
1867 init_completion(&data->prev_finished);
1868
1869 spin_lock_irqsave(shost->host_lock, flags);
1870 shost->async_scan = 1;
1871 spin_unlock_irqrestore(shost->host_lock, flags);
1872 mutex_unlock(&shost->scan_mutex);
1873
1874 spin_lock(&async_scan_lock);
1875 if (list_empty(&scanning_hosts))
1876 complete(&data->prev_finished);
1877 list_add_tail(&data->list, &scanning_hosts);
1878 spin_unlock(&async_scan_lock);
1879
1880 return data;
1881
1882 err:
1883 mutex_unlock(&shost->scan_mutex);
1884 kfree(data);
1885 return NULL;
1886}
1887
1888/**
1889 * scsi_finish_async_scan - asynchronous scan has finished
1890 * @data: cookie returned from earlier call to scsi_prep_async_scan()
1891 *
1892 * All the devices currently attached to this host have been found.
1893 * This function announces all the devices it has found to the rest
1894 * of the system.
1895 */
1896static void scsi_finish_async_scan(struct async_scan_data *data)
1897{
1898 struct Scsi_Host *shost;
1899 unsigned long flags;
1900
1901 if (!data)
1902 return;
1903
1904 shost = data->shost;
1905
1906 mutex_lock(&shost->scan_mutex);
1907
1908 if (!shost->async_scan) {
1909 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1910 dump_stack();
1911 mutex_unlock(&shost->scan_mutex);
1912 return;
1913 }
1914
1915 wait_for_completion(&data->prev_finished);
1916
1917 scsi_sysfs_add_devices(shost);
1918
1919 spin_lock_irqsave(shost->host_lock, flags);
1920 shost->async_scan = 0;
1921 spin_unlock_irqrestore(shost->host_lock, flags);
1922
1923 mutex_unlock(&shost->scan_mutex);
1924
1925 spin_lock(&async_scan_lock);
1926 list_del(&data->list);
1927 if (!list_empty(&scanning_hosts)) {
1928 struct async_scan_data *next = list_entry(scanning_hosts.next,
1929 struct async_scan_data, list);
1930 complete(&next->prev_finished);
1931 }
1932 spin_unlock(&async_scan_lock);
1933
1934 scsi_autopm_put_host(shost);
1935 scsi_host_put(shost);
1936 kfree(data);
1937}
1938
1939static void do_scsi_scan_host(struct Scsi_Host *shost)
1940{
1941 if (shost->hostt->scan_finished) {
1942 unsigned long start = jiffies;
1943 if (shost->hostt->scan_start)
1944 shost->hostt->scan_start(shost);
1945
1946 while (!shost->hostt->scan_finished(shost, jiffies - start))
1947 msleep(10);
1948 } else {
1949 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1950 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
1951 }
1952}
1953
1954static void do_scan_async(void *_data, async_cookie_t c)
1955{
1956 struct async_scan_data *data = _data;
1957 struct Scsi_Host *shost = data->shost;
1958
1959 do_scsi_scan_host(shost);
1960 scsi_finish_async_scan(data);
1961}
1962
1963/**
1964 * scsi_scan_host - scan the given adapter
1965 * @shost: adapter to scan
1966 **/
1967void scsi_scan_host(struct Scsi_Host *shost)
1968{
1969 struct async_scan_data *data;
1970
1971 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
1972 strncmp(scsi_scan_type, "manual", 6) == 0)
1973 return;
1974 if (scsi_autopm_get_host(shost) < 0)
1975 return;
1976
1977 data = scsi_prep_async_scan(shost);
1978 if (!data) {
1979 do_scsi_scan_host(shost);
1980 scsi_autopm_put_host(shost);
1981 return;
1982 }
1983
1984 /* register with the async subsystem so wait_for_device_probe()
1985 * will flush this work
1986 */
1987 async_schedule(do_scan_async, data);
1988
1989 /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
1990}
1991EXPORT_SYMBOL(scsi_scan_host);
1992
1993void scsi_forget_host(struct Scsi_Host *shost)
1994{
1995 struct scsi_device *sdev;
1996 unsigned long flags;
1997
1998 restart:
1999 spin_lock_irqsave(shost->host_lock, flags);
2000 list_for_each_entry(sdev, &shost->__devices, siblings) {
2001 if (sdev->sdev_state == SDEV_DEL)
2002 continue;
2003 spin_unlock_irqrestore(shost->host_lock, flags);
2004 __scsi_remove_device(sdev);
2005 goto restart;
2006 }
2007 spin_unlock_irqrestore(shost->host_lock, flags);
2008}
2009
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * scsi_scan.c
4 *
5 * Copyright (C) 2000 Eric Youngdale,
6 * Copyright (C) 2002 Patrick Mansfield
7 *
8 * The general scanning/probing algorithm is as follows, exceptions are
9 * made to it depending on device specific flags, compilation options, and
10 * global variable (boot or module load time) settings.
11 *
12 * A specific LUN is scanned via an INQUIRY command; if the LUN has a
13 * device attached, a scsi_device is allocated and setup for it.
14 *
15 * For every id of every channel on the given host:
16 *
17 * Scan LUN 0; if the target responds to LUN 0 (even if there is no
18 * device or storage attached to LUN 0):
19 *
20 * If LUN 0 has a device attached, allocate and setup a
21 * scsi_device for it.
22 *
23 * If target is SCSI-3 or up, issue a REPORT LUN, and scan
24 * all of the LUNs returned by the REPORT LUN; else,
25 * sequentially scan LUNs up until some maximum is reached,
26 * or a LUN is seen that cannot have a device attached to it.
27 */
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/init.h>
32#include <linux/blkdev.h>
33#include <linux/delay.h>
34#include <linux/kthread.h>
35#include <linux/spinlock.h>
36#include <linux/async.h>
37#include <linux/slab.h>
38#include <linux/unaligned.h>
39
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_driver.h>
44#include <scsi/scsi_devinfo.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_transport.h>
47#include <scsi/scsi_dh.h>
48#include <scsi/scsi_eh.h>
49
50#include "scsi_priv.h"
51#include "scsi_logging.h"
52
53#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
54 " SCSI scanning, some SCSI devices might not be configured\n"
55
56/*
57 * Default timeout
58 */
59#define SCSI_TIMEOUT (2*HZ)
60#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61
62/*
63 * Prefix values for the SCSI id's (stored in sysfs name field)
64 */
65#define SCSI_UID_SER_NUM 'S'
66#define SCSI_UID_UNKNOWN 'Z'
67
68/*
69 * Return values of some of the scanning functions.
70 *
71 * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this
72 * includes allocation or general failures preventing IO from being sent.
73 *
74 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available
75 * on the given LUN.
76 *
77 * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a
78 * given LUN.
79 */
80#define SCSI_SCAN_NO_RESPONSE 0
81#define SCSI_SCAN_TARGET_PRESENT 1
82#define SCSI_SCAN_LUN_PRESENT 2
83
84static const char *scsi_null_device_strs = "nullnullnullnull";
85
86#define MAX_SCSI_LUNS 512
87
88static u64 max_scsi_luns = MAX_SCSI_LUNS;
89
90module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(max_luns,
92 "last scsi LUN (should be between 1 and 2^64-1)");
93
94#ifdef CONFIG_SCSI_SCAN_ASYNC
95#define SCSI_SCAN_TYPE_DEFAULT "async"
96#else
97#define SCSI_SCAN_TYPE_DEFAULT "sync"
98#endif
99
100static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101
102module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103 S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105 "Setting to 'manual' disables automatic scanning, but allows "
106 "for manual device scan via the 'scan' sysfs attribute.");
107
108static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109
110module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111MODULE_PARM_DESC(inq_timeout,
112 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113 " Default is 20. Some devices may need more; most need less.");
114
115/* This lock protects only this list */
116static DEFINE_SPINLOCK(async_scan_lock);
117static LIST_HEAD(scanning_hosts);
118
119struct async_scan_data {
120 struct list_head list;
121 struct Scsi_Host *shost;
122 struct completion prev_finished;
123};
124
125/*
126 * scsi_enable_async_suspend - Enable async suspend and resume
127 */
128void scsi_enable_async_suspend(struct device *dev)
129{
130 /*
131 * If a user has disabled async probing a likely reason is due to a
132 * storage enclosure that does not inject staggered spin-ups. For
133 * safety, make resume synchronous as well in that case.
134 */
135 if (strncmp(scsi_scan_type, "async", 5) != 0)
136 return;
137 /* Enable asynchronous suspend and resume. */
138 device_enable_async_suspend(dev);
139}
140
141/**
142 * scsi_complete_async_scans - Wait for asynchronous scans to complete
143 *
144 * When this function returns, any host which started scanning before
145 * this function was called will have finished its scan. Hosts which
146 * started scanning after this function was called may or may not have
147 * finished.
148 */
149int scsi_complete_async_scans(void)
150{
151 struct async_scan_data *data;
152
153 do {
154 if (list_empty(&scanning_hosts))
155 return 0;
156 /* If we can't get memory immediately, that's OK. Just
157 * sleep a little. Even if we never get memory, the async
158 * scans will finish eventually.
159 */
160 data = kmalloc(sizeof(*data), GFP_KERNEL);
161 if (!data)
162 msleep(1);
163 } while (!data);
164
165 data->shost = NULL;
166 init_completion(&data->prev_finished);
167
168 spin_lock(&async_scan_lock);
169 /* Check that there's still somebody else on the list */
170 if (list_empty(&scanning_hosts))
171 goto done;
172 list_add_tail(&data->list, &scanning_hosts);
173 spin_unlock(&async_scan_lock);
174
175 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
176 wait_for_completion(&data->prev_finished);
177
178 spin_lock(&async_scan_lock);
179 list_del(&data->list);
180 if (!list_empty(&scanning_hosts)) {
181 struct async_scan_data *next = list_entry(scanning_hosts.next,
182 struct async_scan_data, list);
183 complete(&next->prev_finished);
184 }
185 done:
186 spin_unlock(&async_scan_lock);
187
188 kfree(data);
189 return 0;
190}
191
192/**
193 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
194 * @sdev: scsi device to send command to
195 * @result: area to store the result of the MODE SENSE
196 *
197 * Description:
198 * Send a vendor specific MODE SENSE (not a MODE SELECT) command.
199 * Called for BLIST_KEY devices.
200 **/
201static void scsi_unlock_floptical(struct scsi_device *sdev,
202 unsigned char *result)
203{
204 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
205
206 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
207 scsi_cmd[0] = MODE_SENSE;
208 scsi_cmd[1] = 0;
209 scsi_cmd[2] = 0x2e;
210 scsi_cmd[3] = 0;
211 scsi_cmd[4] = 0x2a; /* size */
212 scsi_cmd[5] = 0;
213 scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a,
214 SCSI_TIMEOUT, 3, NULL);
215}
216
217static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
218 unsigned int depth)
219{
220 int new_shift = sbitmap_calculate_shift(depth);
221 bool need_alloc = !sdev->budget_map.map;
222 bool need_free = false;
223 int ret;
224 struct sbitmap sb_backup;
225
226 depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
227
228 /*
229 * realloc if new shift is calculated, which is caused by setting
230 * up one new default queue depth after calling ->device_configure
231 */
232 if (!need_alloc && new_shift != sdev->budget_map.shift)
233 need_alloc = need_free = true;
234
235 if (!need_alloc)
236 return 0;
237
238 /*
239 * Request queue has to be frozen for reallocating budget map,
240 * and here disk isn't added yet, so freezing is pretty fast
241 */
242 if (need_free) {
243 blk_mq_freeze_queue(sdev->request_queue);
244 sb_backup = sdev->budget_map;
245 }
246 ret = sbitmap_init_node(&sdev->budget_map,
247 scsi_device_max_queue_depth(sdev),
248 new_shift, GFP_KERNEL,
249 sdev->request_queue->node, false, true);
250 if (!ret)
251 sbitmap_resize(&sdev->budget_map, depth);
252
253 if (need_free) {
254 if (ret)
255 sdev->budget_map = sb_backup;
256 else
257 sbitmap_free(&sb_backup);
258 ret = 0;
259 blk_mq_unfreeze_queue(sdev->request_queue);
260 }
261 return ret;
262}
263
264/**
265 * scsi_alloc_sdev - allocate and setup a scsi_Device
266 * @starget: which target to allocate a &scsi_device for
267 * @lun: which lun
268 * @hostdata: usually NULL and set by ->slave_alloc instead
269 *
270 * Description:
271 * Allocate, initialize for io, and return a pointer to a scsi_Device.
272 * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and
273 * adds scsi_Device to the appropriate list.
274 *
275 * Return value:
276 * scsi_Device pointer, or NULL on failure.
277 **/
278static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
279 u64 lun, void *hostdata)
280{
281 unsigned int depth;
282 struct scsi_device *sdev;
283 struct request_queue *q;
284 int display_failure_msg = 1, ret;
285 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
286 struct queue_limits lim;
287
288 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
289 GFP_KERNEL);
290 if (!sdev)
291 goto out;
292
293 sdev->vendor = scsi_null_device_strs;
294 sdev->model = scsi_null_device_strs;
295 sdev->rev = scsi_null_device_strs;
296 sdev->host = shost;
297 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
298 sdev->id = starget->id;
299 sdev->lun = lun;
300 sdev->channel = starget->channel;
301 mutex_init(&sdev->state_mutex);
302 sdev->sdev_state = SDEV_CREATED;
303 INIT_LIST_HEAD(&sdev->siblings);
304 INIT_LIST_HEAD(&sdev->same_target_siblings);
305 INIT_LIST_HEAD(&sdev->starved_entry);
306 INIT_LIST_HEAD(&sdev->event_list);
307 spin_lock_init(&sdev->list_lock);
308 mutex_init(&sdev->inquiry_mutex);
309 INIT_WORK(&sdev->event_work, scsi_evt_thread);
310 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
311
312 sdev->sdev_gendev.parent = get_device(&starget->dev);
313 sdev->sdev_target = starget;
314
315 /* usually NULL and set by ->slave_alloc instead */
316 sdev->hostdata = hostdata;
317
318 /* if the device needs this changing, it may do so in the
319 * slave_configure function */
320 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
321
322 /*
323 * Some low level driver could use device->type
324 */
325 sdev->type = -1;
326
327 /*
328 * Assume that the device will have handshaking problems,
329 * and then fix this field later if it turns out it
330 * doesn't
331 */
332 sdev->borken = 1;
333
334 sdev->sg_reserved_size = INT_MAX;
335
336 scsi_init_limits(shost, &lim);
337 q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev);
338 if (IS_ERR(q)) {
339 /* release fn is set up in scsi_sysfs_device_initialise, so
340 * have to free and put manually here */
341 put_device(&starget->dev);
342 kfree(sdev);
343 goto out;
344 }
345 kref_get(&sdev->host->tagset_refcnt);
346 sdev->request_queue = q;
347
348 depth = sdev->host->cmd_per_lun ?: 1;
349
350 /*
351 * Use .can_queue as budget map's depth because we have to
352 * support adjusting queue depth from sysfs. Meantime use
353 * default device queue depth to figure out sbitmap shift
354 * since we use this queue depth most of times.
355 */
356 if (scsi_realloc_sdev_budget_map(sdev, depth)) {
357 put_device(&starget->dev);
358 kfree(sdev);
359 goto out;
360 }
361
362 scsi_change_queue_depth(sdev, depth);
363
364 scsi_sysfs_device_initialize(sdev);
365
366 if (shost->hostt->slave_alloc) {
367 ret = shost->hostt->slave_alloc(sdev);
368 if (ret) {
369 /*
370 * if LLDD reports slave not present, don't clutter
371 * console with alloc failure messages
372 */
373 if (ret == -ENXIO)
374 display_failure_msg = 0;
375 goto out_device_destroy;
376 }
377 }
378
379 return sdev;
380
381out_device_destroy:
382 __scsi_remove_device(sdev);
383out:
384 if (display_failure_msg)
385 printk(ALLOC_FAILURE_MSG, __func__);
386 return NULL;
387}
388
389static void scsi_target_destroy(struct scsi_target *starget)
390{
391 struct device *dev = &starget->dev;
392 struct Scsi_Host *shost = dev_to_shost(dev->parent);
393 unsigned long flags;
394
395 BUG_ON(starget->state == STARGET_DEL);
396 starget->state = STARGET_DEL;
397 transport_destroy_device(dev);
398 spin_lock_irqsave(shost->host_lock, flags);
399 if (shost->hostt->target_destroy)
400 shost->hostt->target_destroy(starget);
401 list_del_init(&starget->siblings);
402 spin_unlock_irqrestore(shost->host_lock, flags);
403 put_device(dev);
404}
405
406static void scsi_target_dev_release(struct device *dev)
407{
408 struct device *parent = dev->parent;
409 struct scsi_target *starget = to_scsi_target(dev);
410
411 kfree(starget);
412 put_device(parent);
413}
414
415static const struct device_type scsi_target_type = {
416 .name = "scsi_target",
417 .release = scsi_target_dev_release,
418};
419
420int scsi_is_target_device(const struct device *dev)
421{
422 return dev->type == &scsi_target_type;
423}
424EXPORT_SYMBOL(scsi_is_target_device);
425
426static struct scsi_target *__scsi_find_target(struct device *parent,
427 int channel, uint id)
428{
429 struct scsi_target *starget, *found_starget = NULL;
430 struct Scsi_Host *shost = dev_to_shost(parent);
431 /*
432 * Search for an existing target for this sdev.
433 */
434 list_for_each_entry(starget, &shost->__targets, siblings) {
435 if (starget->id == id &&
436 starget->channel == channel) {
437 found_starget = starget;
438 break;
439 }
440 }
441 if (found_starget)
442 get_device(&found_starget->dev);
443
444 return found_starget;
445}
446
447/**
448 * scsi_target_reap_ref_release - remove target from visibility
449 * @kref: the reap_ref in the target being released
450 *
451 * Called on last put of reap_ref, which is the indication that no device
452 * under this target is visible anymore, so render the target invisible in
453 * sysfs. Note: we have to be in user context here because the target reaps
454 * should be done in places where the scsi device visibility is being removed.
455 */
456static void scsi_target_reap_ref_release(struct kref *kref)
457{
458 struct scsi_target *starget
459 = container_of(kref, struct scsi_target, reap_ref);
460
461 /*
462 * if we get here and the target is still in a CREATED state that
463 * means it was allocated but never made visible (because a scan
464 * turned up no LUNs), so don't call device_del() on it.
465 */
466 if ((starget->state != STARGET_CREATED) &&
467 (starget->state != STARGET_CREATED_REMOVE)) {
468 transport_remove_device(&starget->dev);
469 device_del(&starget->dev);
470 }
471 scsi_target_destroy(starget);
472}
473
474static void scsi_target_reap_ref_put(struct scsi_target *starget)
475{
476 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
477}
478
479/**
480 * scsi_alloc_target - allocate a new or find an existing target
481 * @parent: parent of the target (need not be a scsi host)
482 * @channel: target channel number (zero if no channels)
483 * @id: target id number
484 *
485 * Return an existing target if one exists, provided it hasn't already
486 * gone into STARGET_DEL state, otherwise allocate a new target.
487 *
488 * The target is returned with an incremented reference, so the caller
489 * is responsible for both reaping and doing a last put
490 */
491static struct scsi_target *scsi_alloc_target(struct device *parent,
492 int channel, uint id)
493{
494 struct Scsi_Host *shost = dev_to_shost(parent);
495 struct device *dev = NULL;
496 unsigned long flags;
497 const int size = sizeof(struct scsi_target)
498 + shost->transportt->target_size;
499 struct scsi_target *starget;
500 struct scsi_target *found_target;
501 int error, ref_got;
502
503 starget = kzalloc(size, GFP_KERNEL);
504 if (!starget) {
505 printk(KERN_ERR "%s: allocation failure\n", __func__);
506 return NULL;
507 }
508 dev = &starget->dev;
509 device_initialize(dev);
510 kref_init(&starget->reap_ref);
511 dev->parent = get_device(parent);
512 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
513 dev->bus = &scsi_bus_type;
514 dev->type = &scsi_target_type;
515 scsi_enable_async_suspend(dev);
516 starget->id = id;
517 starget->channel = channel;
518 starget->can_queue = 0;
519 INIT_LIST_HEAD(&starget->siblings);
520 INIT_LIST_HEAD(&starget->devices);
521 starget->state = STARGET_CREATED;
522 starget->scsi_level = SCSI_2;
523 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
524 retry:
525 spin_lock_irqsave(shost->host_lock, flags);
526
527 found_target = __scsi_find_target(parent, channel, id);
528 if (found_target)
529 goto found;
530
531 list_add_tail(&starget->siblings, &shost->__targets);
532 spin_unlock_irqrestore(shost->host_lock, flags);
533 /* allocate and add */
534 transport_setup_device(dev);
535 if (shost->hostt->target_alloc) {
536 error = shost->hostt->target_alloc(starget);
537
538 if(error) {
539 if (error != -ENXIO)
540 dev_err(dev, "target allocation failed, error %d\n", error);
541 /* don't want scsi_target_reap to do the final
542 * put because it will be under the host lock */
543 scsi_target_destroy(starget);
544 return NULL;
545 }
546 }
547 get_device(dev);
548
549 return starget;
550
551 found:
552 /*
553 * release routine already fired if kref is zero, so if we can still
554 * take the reference, the target must be alive. If we can't, it must
555 * be dying and we need to wait for a new target
556 */
557 ref_got = kref_get_unless_zero(&found_target->reap_ref);
558
559 spin_unlock_irqrestore(shost->host_lock, flags);
560 if (ref_got) {
561 put_device(dev);
562 return found_target;
563 }
564 /*
565 * Unfortunately, we found a dying target; need to wait until it's
566 * dead before we can get a new one. There is an anomaly here. We
567 * *should* call scsi_target_reap() to balance the kref_get() of the
568 * reap_ref above. However, since the target being released, it's
569 * already invisible and the reap_ref is irrelevant. If we call
570 * scsi_target_reap() we might spuriously do another device_del() on
571 * an already invisible target.
572 */
573 put_device(&found_target->dev);
574 /*
575 * length of time is irrelevant here, we just want to yield the CPU
576 * for a tick to avoid busy waiting for the target to die.
577 */
578 msleep(1);
579 goto retry;
580}
581
582/**
583 * scsi_target_reap - check to see if target is in use and destroy if not
584 * @starget: target to be checked
585 *
586 * This is used after removing a LUN or doing a last put of the target
587 * it checks atomically that nothing is using the target and removes
588 * it if so.
589 */
590void scsi_target_reap(struct scsi_target *starget)
591{
592 /*
593 * serious problem if this triggers: STARGET_DEL is only set in the if
594 * the reap_ref drops to zero, so we're trying to do another final put
595 * on an already released kref
596 */
597 BUG_ON(starget->state == STARGET_DEL);
598 scsi_target_reap_ref_put(starget);
599}
600
601/**
602 * scsi_sanitize_inquiry_string - remove non-graphical chars from an
603 * INQUIRY result string
604 * @s: INQUIRY result string to sanitize
605 * @len: length of the string
606 *
607 * Description:
608 * The SCSI spec says that INQUIRY vendor, product, and revision
609 * strings must consist entirely of graphic ASCII characters,
610 * padded on the right with spaces. Since not all devices obey
611 * this rule, we will replace non-graphic or non-ASCII characters
612 * with spaces. Exception: a NUL character is interpreted as a
613 * string terminator, so all the following characters are set to
614 * spaces.
615 **/
616void scsi_sanitize_inquiry_string(unsigned char *s, int len)
617{
618 int terminated = 0;
619
620 for (; len > 0; (--len, ++s)) {
621 if (*s == 0)
622 terminated = 1;
623 if (terminated || *s < 0x20 || *s > 0x7e)
624 *s = ' ';
625 }
626}
627EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
628
629
630/**
631 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
632 * @sdev: scsi_device to probe
633 * @inq_result: area to store the INQUIRY result
634 * @result_len: len of inq_result
635 * @bflags: store any bflags found here
636 *
637 * Description:
638 * Probe the lun associated with @req using a standard SCSI INQUIRY;
639 *
640 * If the INQUIRY is successful, zero is returned and the
641 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
642 * are copied to the scsi_device any flags value is stored in *@bflags.
643 **/
644static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
645 int result_len, blist_flags_t *bflags)
646{
647 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
648 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
649 int response_len = 0;
650 int pass, count, result, resid;
651 struct scsi_failure failure_defs[] = {
652 /*
653 * not-ready to ready transition [asc/ascq=0x28/0x0] or
654 * power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY
655 * should not yield UNIT_ATTENTION but many buggy devices do
656 * so anyway.
657 */
658 {
659 .sense = UNIT_ATTENTION,
660 .asc = 0x28,
661 .result = SAM_STAT_CHECK_CONDITION,
662 },
663 {
664 .sense = UNIT_ATTENTION,
665 .asc = 0x29,
666 .result = SAM_STAT_CHECK_CONDITION,
667 },
668 {
669 .allowed = 1,
670 .result = DID_TIME_OUT << 16,
671 },
672 {}
673 };
674 struct scsi_failures failures = {
675 .total_allowed = 3,
676 .failure_definitions = failure_defs,
677 };
678 const struct scsi_exec_args exec_args = {
679 .resid = &resid,
680 .failures = &failures,
681 };
682
683 *bflags = 0;
684
685 /* Perform up to 3 passes. The first pass uses a conservative
686 * transfer length of 36 unless sdev->inquiry_len specifies a
687 * different value. */
688 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
689 try_inquiry_len = first_inquiry_len;
690 pass = 1;
691
692 next_pass:
693 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
694 "scsi scan: INQUIRY pass %d length %d\n",
695 pass, try_inquiry_len));
696
697 /* Each pass gets up to three chances to ignore Unit Attention */
698 scsi_failures_reset_retries(&failures);
699
700 for (count = 0; count < 3; ++count) {
701 memset(scsi_cmd, 0, 6);
702 scsi_cmd[0] = INQUIRY;
703 scsi_cmd[4] = (unsigned char) try_inquiry_len;
704
705 memset(inq_result, 0, try_inquiry_len);
706
707 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
708 inq_result, try_inquiry_len,
709 HZ / 2 + HZ * scsi_inq_timeout, 3,
710 &exec_args);
711
712 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
713 "scsi scan: INQUIRY %s with code 0x%x\n",
714 result ? "failed" : "successful", result));
715
716 if (result == 0) {
717 /*
718 * if nothing was transferred, we try
719 * again. It's a workaround for some USB
720 * devices.
721 */
722 if (resid == try_inquiry_len)
723 continue;
724 }
725 break;
726 }
727
728 if (result == 0) {
729 scsi_sanitize_inquiry_string(&inq_result[8], 8);
730 scsi_sanitize_inquiry_string(&inq_result[16], 16);
731 scsi_sanitize_inquiry_string(&inq_result[32], 4);
732
733 response_len = inq_result[4] + 5;
734 if (response_len > 255)
735 response_len = first_inquiry_len; /* sanity */
736
737 /*
738 * Get any flags for this device.
739 *
740 * XXX add a bflags to scsi_device, and replace the
741 * corresponding bit fields in scsi_device, so bflags
742 * need not be passed as an argument.
743 */
744 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
745 &inq_result[16]);
746
747 /* When the first pass succeeds we gain information about
748 * what larger transfer lengths might work. */
749 if (pass == 1) {
750 if (BLIST_INQUIRY_36 & *bflags)
751 next_inquiry_len = 36;
752 /*
753 * LLD specified a maximum sdev->inquiry_len
754 * but device claims it has more data. Capping
755 * the length only makes sense for legacy
756 * devices. If a device supports SPC-4 (2014)
757 * or newer, assume that it is safe to ask for
758 * as much as the device says it supports.
759 */
760 else if (sdev->inquiry_len &&
761 response_len > sdev->inquiry_len &&
762 (inq_result[2] & 0x7) < 6) /* SPC-4 */
763 next_inquiry_len = sdev->inquiry_len;
764 else
765 next_inquiry_len = response_len;
766
767 /* If more data is available perform the second pass */
768 if (next_inquiry_len > try_inquiry_len) {
769 try_inquiry_len = next_inquiry_len;
770 pass = 2;
771 goto next_pass;
772 }
773 }
774
775 } else if (pass == 2) {
776 sdev_printk(KERN_INFO, sdev,
777 "scsi scan: %d byte inquiry failed. "
778 "Consider BLIST_INQUIRY_36 for this device\n",
779 try_inquiry_len);
780
781 /* If this pass failed, the third pass goes back and transfers
782 * the same amount as we successfully got in the first pass. */
783 try_inquiry_len = first_inquiry_len;
784 pass = 3;
785 goto next_pass;
786 }
787
788 /* If the last transfer attempt got an error, assume the
789 * peripheral doesn't exist or is dead. */
790 if (result)
791 return -EIO;
792
793 /* Don't report any more data than the device says is valid */
794 sdev->inquiry_len = min(try_inquiry_len, response_len);
795
796 /*
797 * XXX Abort if the response length is less than 36? If less than
798 * 32, the lookup of the device flags (above) could be invalid,
799 * and it would be possible to take an incorrect action - we do
800 * not want to hang because of a short INQUIRY. On the flip side,
801 * if the device is spun down or becoming ready (and so it gives a
802 * short INQUIRY), an abort here prevents any further use of the
803 * device, including spin up.
804 *
805 * On the whole, the best approach seems to be to assume the first
806 * 36 bytes are valid no matter what the device says. That's
807 * better than copying < 36 bytes to the inquiry-result buffer
808 * and displaying garbage for the Vendor, Product, or Revision
809 * strings.
810 */
811 if (sdev->inquiry_len < 36) {
812 if (!sdev->host->short_inquiry) {
813 shost_printk(KERN_INFO, sdev->host,
814 "scsi scan: INQUIRY result too short (%d),"
815 " using 36\n", sdev->inquiry_len);
816 sdev->host->short_inquiry = 1;
817 }
818 sdev->inquiry_len = 36;
819 }
820
821 /*
822 * Related to the above issue:
823 *
824 * XXX Devices (disk or all?) should be sent a TEST UNIT READY,
825 * and if not ready, sent a START_STOP to start (maybe spin up) and
826 * then send the INQUIRY again, since the INQUIRY can change after
827 * a device is initialized.
828 *
829 * Ideally, start a device if explicitly asked to do so. This
830 * assumes that a device is spun up on power on, spun down on
831 * request, and then spun up on request.
832 */
833
834 /*
835 * The scanning code needs to know the scsi_level, even if no
836 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
837 * non-zero LUNs can be scanned.
838 */
839 sdev->scsi_level = inq_result[2] & 0x0f;
840 if (sdev->scsi_level >= 2 ||
841 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
842 sdev->scsi_level++;
843 sdev->sdev_target->scsi_level = sdev->scsi_level;
844
845 /*
846 * If SCSI-2 or lower, and if the transport requires it,
847 * store the LUN value in CDB[1].
848 */
849 sdev->lun_in_cdb = 0;
850 if (sdev->scsi_level <= SCSI_2 &&
851 sdev->scsi_level != SCSI_UNKNOWN &&
852 !sdev->host->no_scsi2_lun_in_cdb)
853 sdev->lun_in_cdb = 1;
854
855 return 0;
856}
857
858/**
859 * scsi_add_lun - allocate and fully initialze a scsi_device
860 * @sdev: holds information to be stored in the new scsi_device
861 * @inq_result: holds the result of a previous INQUIRY to the LUN
862 * @bflags: black/white list flag
863 * @async: 1 if this device is being scanned asynchronously
864 *
865 * Description:
866 * Initialize the scsi_device @sdev. Optionally set fields based
867 * on values in *@bflags.
868 *
869 * Return:
870 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
871 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
872 **/
873static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
874 blist_flags_t *bflags, int async)
875{
876 const struct scsi_host_template *hostt = sdev->host->hostt;
877 struct queue_limits lim;
878 int ret;
879
880 /*
881 * XXX do not save the inquiry, since it can change underneath us,
882 * save just vendor/model/rev.
883 *
884 * Rather than save it and have an ioctl that retrieves the saved
885 * value, have an ioctl that executes the same INQUIRY code used
886 * in scsi_probe_lun, let user level programs doing INQUIRY
887 * scanning run at their own risk, or supply a user level program
888 * that can correctly scan.
889 */
890
891 /*
892 * Copy at least 36 bytes of INQUIRY data, so that we don't
893 * dereference unallocated memory when accessing the Vendor,
894 * Product, and Revision strings. Badly behaved devices may set
895 * the INQUIRY Additional Length byte to a small value, indicating
896 * these strings are invalid, but often they contain plausible data
897 * nonetheless. It doesn't matter if the device sent < 36 bytes
898 * total, since scsi_probe_lun() initializes inq_result with 0s.
899 */
900 sdev->inquiry = kmemdup(inq_result,
901 max_t(size_t, sdev->inquiry_len, 36),
902 GFP_KERNEL);
903 if (sdev->inquiry == NULL)
904 return SCSI_SCAN_NO_RESPONSE;
905
906 sdev->vendor = (char *) (sdev->inquiry + 8);
907 sdev->model = (char *) (sdev->inquiry + 16);
908 sdev->rev = (char *) (sdev->inquiry + 32);
909
910 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
911 /*
912 * sata emulation layer device. This is a hack to work around
913 * the SATL power management specifications which state that
914 * when the SATL detects the device has gone into standby
915 * mode, it shall respond with NOT READY.
916 */
917 sdev->allow_restart = 1;
918 }
919
920 if (*bflags & BLIST_ISROM) {
921 sdev->type = TYPE_ROM;
922 sdev->removable = 1;
923 } else {
924 sdev->type = (inq_result[0] & 0x1f);
925 sdev->removable = (inq_result[1] & 0x80) >> 7;
926
927 /*
928 * some devices may respond with wrong type for
929 * well-known logical units. Force well-known type
930 * to enumerate them correctly.
931 */
932 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
933 sdev_printk(KERN_WARNING, sdev,
934 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
935 __func__, sdev->type, (unsigned int)sdev->lun);
936 sdev->type = TYPE_WLUN;
937 }
938
939 }
940
941 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
942 /* RBC and MMC devices can return SCSI-3 compliance and yet
943 * still not support REPORT LUNS, so make them act as
944 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
945 * specifically set */
946 if ((*bflags & BLIST_REPORTLUN2) == 0)
947 *bflags |= BLIST_NOREPORTLUN;
948 }
949
950 /*
951 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
952 * spec says: The device server is capable of supporting the
953 * specified peripheral device type on this logical unit. However,
954 * the physical device is not currently connected to this logical
955 * unit.
956 *
957 * The above is vague, as it implies that we could treat 001 and
958 * 011 the same. Stay compatible with previous code, and create a
959 * scsi_device for a PQ of 1
960 *
961 * Don't set the device offline here; rather let the upper
962 * level drivers eval the PQ to decide whether they should
963 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check.
964 */
965
966 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
967 sdev->lockable = sdev->removable;
968 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
969
970 if (sdev->scsi_level >= SCSI_3 ||
971 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
972 sdev->ppr = 1;
973 if (inq_result[7] & 0x60)
974 sdev->wdtr = 1;
975 if (inq_result[7] & 0x10)
976 sdev->sdtr = 1;
977
978 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
979 "ANSI: %d%s\n", scsi_device_type(sdev->type),
980 sdev->vendor, sdev->model, sdev->rev,
981 sdev->inq_periph_qual, inq_result[2] & 0x07,
982 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
983
984 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
985 !(*bflags & BLIST_NOTQ)) {
986 sdev->tagged_supported = 1;
987 sdev->simple_tags = 1;
988 }
989
990 /*
991 * Some devices (Texel CD ROM drives) have handshaking problems
992 * when used with the Seagate controllers. borken is initialized
993 * to 1, and then set it to 0 here.
994 */
995 if ((*bflags & BLIST_BORKEN) == 0)
996 sdev->borken = 0;
997
998 if (*bflags & BLIST_NO_ULD_ATTACH)
999 sdev->no_uld_attach = 1;
1000
1001 /*
1002 * Apparently some really broken devices (contrary to the SCSI
1003 * standards) need to be selected without asserting ATN
1004 */
1005 if (*bflags & BLIST_SELECT_NO_ATN)
1006 sdev->select_no_atn = 1;
1007
1008 /*
1009 * Some devices may not want to have a start command automatically
1010 * issued when a device is added.
1011 */
1012 if (*bflags & BLIST_NOSTARTONADD)
1013 sdev->no_start_on_add = 1;
1014
1015 if (*bflags & BLIST_SINGLELUN)
1016 scsi_target(sdev)->single_lun = 1;
1017
1018 sdev->use_10_for_rw = 1;
1019
1020 /* some devices don't like REPORT SUPPORTED OPERATION CODES
1021 * and will simply timeout causing sd_mod init to take a very
1022 * very long time */
1023 if (*bflags & BLIST_NO_RSOC)
1024 sdev->no_report_opcodes = 1;
1025
1026 /* set the device running here so that slave configure
1027 * may do I/O */
1028 mutex_lock(&sdev->state_mutex);
1029 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
1030 if (ret)
1031 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
1032 mutex_unlock(&sdev->state_mutex);
1033
1034 if (ret) {
1035 sdev_printk(KERN_ERR, sdev,
1036 "in wrong state %s to complete scan\n",
1037 scsi_device_state_name(sdev->sdev_state));
1038 return SCSI_SCAN_NO_RESPONSE;
1039 }
1040
1041 if (*bflags & BLIST_NOT_LOCKABLE)
1042 sdev->lockable = 0;
1043
1044 if (*bflags & BLIST_RETRY_HWERROR)
1045 sdev->retry_hwerror = 1;
1046
1047 if (*bflags & BLIST_NO_DIF)
1048 sdev->no_dif = 1;
1049
1050 if (*bflags & BLIST_UNMAP_LIMIT_WS)
1051 sdev->unmap_limit_for_ws = 1;
1052
1053 if (*bflags & BLIST_IGN_MEDIA_CHANGE)
1054 sdev->ignore_media_change = 1;
1055
1056 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
1057
1058 if (*bflags & BLIST_TRY_VPD_PAGES)
1059 sdev->try_vpd_pages = 1;
1060 else if (*bflags & BLIST_SKIP_VPD_PAGES)
1061 sdev->skip_vpd_pages = 1;
1062
1063 if (*bflags & BLIST_NO_VPD_SIZE)
1064 sdev->no_vpd_size = 1;
1065
1066 transport_configure_device(&sdev->sdev_gendev);
1067
1068 /*
1069 * No need to freeze the queue as it isn't reachable to anyone else yet.
1070 */
1071 lim = queue_limits_start_update(sdev->request_queue);
1072 if (*bflags & BLIST_MAX_512)
1073 lim.max_hw_sectors = 512;
1074 else if (*bflags & BLIST_MAX_1024)
1075 lim.max_hw_sectors = 1024;
1076
1077 if (hostt->device_configure)
1078 ret = hostt->device_configure(sdev, &lim);
1079 else if (hostt->slave_configure)
1080 ret = hostt->slave_configure(sdev);
1081 if (ret) {
1082 queue_limits_cancel_update(sdev->request_queue);
1083 /*
1084 * If the LLDD reports device not present, don't clutter the
1085 * console with failure messages.
1086 */
1087 if (ret != -ENXIO)
1088 sdev_printk(KERN_ERR, sdev,
1089 "failed to configure device\n");
1090 return SCSI_SCAN_NO_RESPONSE;
1091 }
1092
1093 ret = queue_limits_commit_update(sdev->request_queue, &lim);
1094 if (ret) {
1095 sdev_printk(KERN_ERR, sdev, "failed to apply queue limits.\n");
1096 return SCSI_SCAN_NO_RESPONSE;
1097 }
1098
1099 /*
1100 * The queue_depth is often changed in ->device_configure.
1101 *
1102 * Set up budget map again since memory consumption of the map depends
1103 * on actual queue depth.
1104 */
1105 if (hostt->device_configure || hostt->slave_configure)
1106 scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
1107
1108 if (sdev->scsi_level >= SCSI_3)
1109 scsi_attach_vpd(sdev);
1110
1111 scsi_cdl_check(sdev);
1112
1113 sdev->max_queue_depth = sdev->queue_depth;
1114 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1115 sdev->sdev_bflags = *bflags;
1116
1117 /*
1118 * Ok, the device is now all set up, we can
1119 * register it and tell the rest of the kernel
1120 * about it.
1121 */
1122 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1123 return SCSI_SCAN_NO_RESPONSE;
1124
1125 return SCSI_SCAN_LUN_PRESENT;
1126}
1127
1128#ifdef CONFIG_SCSI_LOGGING
1129/**
1130 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
1131 * @buf: Output buffer with at least end-first+1 bytes of space
1132 * @inq: Inquiry buffer (input)
1133 * @first: Offset of string into inq
1134 * @end: Index after last character in inq
1135 */
1136static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1137 unsigned first, unsigned end)
1138{
1139 unsigned term = 0, idx;
1140
1141 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1142 if (inq[idx+first] > ' ') {
1143 buf[idx] = inq[idx+first];
1144 term = idx+1;
1145 } else {
1146 buf[idx] = ' ';
1147 }
1148 }
1149 buf[term] = 0;
1150 return buf;
1151}
1152#endif
1153
1154/**
1155 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
1156 * @starget: pointer to target device structure
1157 * @lun: LUN of target device
1158 * @bflagsp: store bflags here if not NULL
1159 * @sdevp: probe the LUN corresponding to this scsi_device
1160 * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only
1161 * needed on first scan
1162 * @hostdata: passed to scsi_alloc_sdev()
1163 *
1164 * Description:
1165 * Call scsi_probe_lun, if a LUN with an attached device is found,
1166 * allocate and set it up by calling scsi_add_lun.
1167 *
1168 * Return:
1169 *
1170 * - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
1171 * - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
1172 * attached at the LUN
1173 * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1174 **/
1175static int scsi_probe_and_add_lun(struct scsi_target *starget,
1176 u64 lun, blist_flags_t *bflagsp,
1177 struct scsi_device **sdevp,
1178 enum scsi_scan_mode rescan,
1179 void *hostdata)
1180{
1181 struct scsi_device *sdev;
1182 unsigned char *result;
1183 blist_flags_t bflags;
1184 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1185 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1186
1187 /*
1188 * The rescan flag is used as an optimization, the first scan of a
1189 * host adapter calls into here with rescan == 0.
1190 */
1191 sdev = scsi_device_lookup_by_target(starget, lun);
1192 if (sdev) {
1193 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1194 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1195 "scsi scan: device exists on %s\n",
1196 dev_name(&sdev->sdev_gendev)));
1197 if (sdevp)
1198 *sdevp = sdev;
1199 else
1200 scsi_device_put(sdev);
1201
1202 if (bflagsp)
1203 *bflagsp = scsi_get_device_flags(sdev,
1204 sdev->vendor,
1205 sdev->model);
1206 return SCSI_SCAN_LUN_PRESENT;
1207 }
1208 scsi_device_put(sdev);
1209 } else
1210 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1211 if (!sdev)
1212 goto out;
1213
1214 result = kmalloc(result_len, GFP_KERNEL);
1215 if (!result)
1216 goto out_free_sdev;
1217
1218 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1219 goto out_free_result;
1220
1221 if (bflagsp)
1222 *bflagsp = bflags;
1223 /*
1224 * result contains valid SCSI INQUIRY data.
1225 */
1226 if ((result[0] >> 5) == 3) {
1227 /*
1228 * For a Peripheral qualifier 3 (011b), the SCSI
1229 * spec says: The device server is not capable of
1230 * supporting a physical device on this logical
1231 * unit.
1232 *
1233 * For disks, this implies that there is no
1234 * logical disk configured at sdev->lun, but there
1235 * is a target id responding.
1236 */
1237 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1238 " peripheral qualifier of 3, device not"
1239 " added\n"))
1240 if (lun == 0) {
1241 SCSI_LOG_SCAN_BUS(1, {
1242 unsigned char vend[9];
1243 unsigned char mod[17];
1244
1245 sdev_printk(KERN_INFO, sdev,
1246 "scsi scan: consider passing scsi_mod."
1247 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1248 scsi_inq_str(vend, result, 8, 16),
1249 scsi_inq_str(mod, result, 16, 32));
1250 });
1251
1252 }
1253
1254 res = SCSI_SCAN_TARGET_PRESENT;
1255 goto out_free_result;
1256 }
1257
1258 /*
1259 * Some targets may set slight variations of PQ and PDT to signal
1260 * that no LUN is present, so don't add sdev in these cases.
1261 * Two specific examples are:
1262 * 1) NetApp targets: return PQ=1, PDT=0x1f
1263 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
1264 * in the UFI 1.0 spec (we cannot rely on reserved bits).
1265 *
1266 * References:
1267 * 1) SCSI SPC-3, pp. 145-146
1268 * PQ=1: "A peripheral device having the specified peripheral
1269 * device type is not connected to this logical unit. However, the
1270 * device server is capable of supporting the specified peripheral
1271 * device type on this logical unit."
1272 * PDT=0x1f: "Unknown or no device type"
1273 * 2) USB UFI 1.0, p. 20
1274 * PDT=00h Direct-access device (floppy)
1275 * PDT=1Fh none (no FDD connected to the requested logical unit)
1276 */
1277 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1278 (result[0] & 0x1f) == 0x1f &&
1279 !scsi_is_wlun(lun)) {
1280 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1281 "scsi scan: peripheral device type"
1282 " of 31, no device added\n"));
1283 res = SCSI_SCAN_TARGET_PRESENT;
1284 goto out_free_result;
1285 }
1286
1287 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1288 if (res == SCSI_SCAN_LUN_PRESENT) {
1289 if (bflags & BLIST_KEY) {
1290 sdev->lockable = 0;
1291 scsi_unlock_floptical(sdev, result);
1292 }
1293 }
1294
1295 out_free_result:
1296 kfree(result);
1297 out_free_sdev:
1298 if (res == SCSI_SCAN_LUN_PRESENT) {
1299 if (sdevp) {
1300 if (scsi_device_get(sdev) == 0) {
1301 *sdevp = sdev;
1302 } else {
1303 __scsi_remove_device(sdev);
1304 res = SCSI_SCAN_NO_RESPONSE;
1305 }
1306 }
1307 } else
1308 __scsi_remove_device(sdev);
1309 out:
1310 return res;
1311}
1312
1313/**
1314 * scsi_sequential_lun_scan - sequentially scan a SCSI target
1315 * @starget: pointer to target structure to scan
1316 * @bflags: black/white list flag for LUN 0
1317 * @scsi_level: Which version of the standard does this device adhere to
1318 * @rescan: passed to scsi_probe_add_lun()
1319 *
1320 * Description:
1321 * Generally, scan from LUN 1 (LUN 0 is assumed to already have been
1322 * scanned) to some maximum lun until a LUN is found with no device
1323 * attached. Use the bflags to figure out any oddities.
1324 *
1325 * Modifies sdevscan->lun.
1326 **/
1327static void scsi_sequential_lun_scan(struct scsi_target *starget,
1328 blist_flags_t bflags, int scsi_level,
1329 enum scsi_scan_mode rescan)
1330{
1331 uint max_dev_lun;
1332 u64 sparse_lun, lun;
1333 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1334
1335 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1336 "scsi scan: Sequential scan\n"));
1337
1338 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1339 /*
1340 * If this device is known to support sparse multiple units,
1341 * override the other settings, and scan all of them. Normally,
1342 * SCSI-3 devices should be scanned via the REPORT LUNS.
1343 */
1344 if (bflags & BLIST_SPARSELUN) {
1345 max_dev_lun = shost->max_lun;
1346 sparse_lun = 1;
1347 } else
1348 sparse_lun = 0;
1349
1350 /*
1351 * If less than SCSI_1_CCS, and no special lun scanning, stop
1352 * scanning; this matches 2.4 behaviour, but could just be a bug
1353 * (to continue scanning a SCSI_1_CCS device).
1354 *
1355 * This test is broken. We might not have any device on lun0 for
1356 * a sparselun device, and if that's the case then how would we
1357 * know the real scsi_level, eh? It might make sense to just not
1358 * scan any SCSI_1 device for non-0 luns, but that check would best
1359 * go into scsi_alloc_sdev() and just have it return null when asked
1360 * to alloc an sdev for lun > 0 on an already found SCSI_1 device.
1361 *
1362 if ((sdevscan->scsi_level < SCSI_1_CCS) &&
1363 ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
1364 == 0))
1365 return;
1366 */
1367 /*
1368 * If this device is known to support multiple units, override
1369 * the other settings, and scan all of them.
1370 */
1371 if (bflags & BLIST_FORCELUN)
1372 max_dev_lun = shost->max_lun;
1373 /*
1374 * REGAL CDC-4X: avoid hang after LUN 4
1375 */
1376 if (bflags & BLIST_MAX5LUN)
1377 max_dev_lun = min(5U, max_dev_lun);
1378 /*
1379 * Do not scan SCSI-2 or lower device past LUN 7, unless
1380 * BLIST_LARGELUN.
1381 */
1382 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1383 max_dev_lun = min(8U, max_dev_lun);
1384 else
1385 max_dev_lun = min(256U, max_dev_lun);
1386
1387 /*
1388 * We have already scanned LUN 0, so start at LUN 1. Keep scanning
1389 * until we reach the max, or no LUN is found and we are not
1390 * sparse_lun.
1391 */
1392 for (lun = 1; lun < max_dev_lun; ++lun)
1393 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1394 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1395 !sparse_lun)
1396 return;
1397}
1398
1399/**
1400 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1401 * @starget: which target
1402 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
1403 * @rescan: nonzero if we can skip code only needed on first scan
1404 *
1405 * Description:
1406 * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1407 * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
1408 *
1409 * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
1410 * LUNs even if it's older than SCSI-3.
1411 * If BLIST_NOREPORTLUN is set, return 1 always.
1412 * If BLIST_NOLUN is set, return 0 always.
1413 * If starget->no_report_luns is set, return 1 always.
1414 *
1415 * Return:
1416 * 0: scan completed (or no memory, so further scanning is futile)
1417 * 1: could not scan with REPORT LUN
1418 **/
1419static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1420 enum scsi_scan_mode rescan)
1421{
1422 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1423 unsigned int length;
1424 u64 lun;
1425 unsigned int num_luns;
1426 int result;
1427 struct scsi_lun *lunp, *lun_data;
1428 struct scsi_device *sdev;
1429 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1430 struct scsi_failure failure_defs[] = {
1431 {
1432 .sense = UNIT_ATTENTION,
1433 .asc = SCMD_FAILURE_ASC_ANY,
1434 .ascq = SCMD_FAILURE_ASCQ_ANY,
1435 .result = SAM_STAT_CHECK_CONDITION,
1436 },
1437 /* Fail all CCs except the UA above */
1438 {
1439 .sense = SCMD_FAILURE_SENSE_ANY,
1440 .result = SAM_STAT_CHECK_CONDITION,
1441 },
1442 /* Retry any other errors not listed above */
1443 {
1444 .result = SCMD_FAILURE_RESULT_ANY,
1445 },
1446 {}
1447 };
1448 struct scsi_failures failures = {
1449 .total_allowed = 3,
1450 .failure_definitions = failure_defs,
1451 };
1452 const struct scsi_exec_args exec_args = {
1453 .failures = &failures,
1454 };
1455 int ret = 0;
1456
1457 /*
1458 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
1459 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
1460 * support more than 8 LUNs.
1461 * Don't attempt if the target doesn't support REPORT LUNS.
1462 */
1463 if (bflags & BLIST_NOREPORTLUN)
1464 return 1;
1465 if (starget->scsi_level < SCSI_2 &&
1466 starget->scsi_level != SCSI_UNKNOWN)
1467 return 1;
1468 if (starget->scsi_level < SCSI_3 &&
1469 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1470 return 1;
1471 if (bflags & BLIST_NOLUN)
1472 return 0;
1473 if (starget->no_report_luns)
1474 return 1;
1475
1476 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1477 sdev = scsi_alloc_sdev(starget, 0, NULL);
1478 if (!sdev)
1479 return 0;
1480 if (scsi_device_get(sdev)) {
1481 __scsi_remove_device(sdev);
1482 return 0;
1483 }
1484 }
1485
1486 /*
1487 * Allocate enough to hold the header (the same size as one scsi_lun)
1488 * plus the number of luns we are requesting. 511 was the default
1489 * value of the now removed max_report_luns parameter.
1490 */
1491 length = (511 + 1) * sizeof(struct scsi_lun);
1492retry:
1493 lun_data = kmalloc(length, GFP_KERNEL);
1494 if (!lun_data) {
1495 printk(ALLOC_FAILURE_MSG, __func__);
1496 goto out;
1497 }
1498
1499 scsi_cmd[0] = REPORT_LUNS;
1500
1501 /*
1502 * bytes 1 - 5: reserved, set to zero.
1503 */
1504 memset(&scsi_cmd[1], 0, 5);
1505
1506 /*
1507 * bytes 6 - 9: length of the command.
1508 */
1509 put_unaligned_be32(length, &scsi_cmd[6]);
1510
1511 scsi_cmd[10] = 0; /* reserved */
1512 scsi_cmd[11] = 0; /* control */
1513
1514 /*
1515 * We can get a UNIT ATTENTION, for example a power on/reset, so
1516 * retry a few times (like sd.c does for TEST UNIT READY).
1517 * Experience shows some combinations of adapter/devices get at
1518 * least two power on/resets.
1519 *
1520 * Illegal requests (for devices that do not support REPORT LUNS)
1521 * should come through as a check condition, and will not generate
1522 * a retry.
1523 */
1524 scsi_failures_reset_retries(&failures);
1525
1526 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1527 "scsi scan: Sending REPORT LUNS\n"));
1528
1529 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data,
1530 length, SCSI_REPORT_LUNS_TIMEOUT, 3,
1531 &exec_args);
1532
1533 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1534 "scsi scan: REPORT LUNS %s result 0x%x\n",
1535 result ? "failed" : "successful", result));
1536 if (result) {
1537 /*
1538 * The device probably does not support a REPORT LUN command
1539 */
1540 ret = 1;
1541 goto out_err;
1542 }
1543
1544 /*
1545 * Get the length from the first four bytes of lun_data.
1546 */
1547 if (get_unaligned_be32(lun_data->scsi_lun) +
1548 sizeof(struct scsi_lun) > length) {
1549 length = get_unaligned_be32(lun_data->scsi_lun) +
1550 sizeof(struct scsi_lun);
1551 kfree(lun_data);
1552 goto retry;
1553 }
1554 length = get_unaligned_be32(lun_data->scsi_lun);
1555
1556 num_luns = (length / sizeof(struct scsi_lun));
1557
1558 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1559 "scsi scan: REPORT LUN scan\n"));
1560
1561 /*
1562 * Scan the luns in lun_data. The entry at offset 0 is really
1563 * the header, so start at 1 and go up to and including num_luns.
1564 */
1565 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1566 lun = scsilun_to_int(lunp);
1567
1568 if (lun > sdev->host->max_lun) {
1569 sdev_printk(KERN_WARNING, sdev,
1570 "lun%llu has a LUN larger than"
1571 " allowed by the host adapter\n", lun);
1572 } else {
1573 int res;
1574
1575 res = scsi_probe_and_add_lun(starget,
1576 lun, NULL, NULL, rescan, NULL);
1577 if (res == SCSI_SCAN_NO_RESPONSE) {
1578 /*
1579 * Got some results, but now none, abort.
1580 */
1581 sdev_printk(KERN_ERR, sdev,
1582 "Unexpected response"
1583 " from lun %llu while scanning, scan"
1584 " aborted\n", (unsigned long long)lun);
1585 break;
1586 }
1587 }
1588 }
1589
1590 out_err:
1591 kfree(lun_data);
1592 out:
1593 if (scsi_device_created(sdev))
1594 /*
1595 * the sdev we used didn't appear in the report luns scan
1596 */
1597 __scsi_remove_device(sdev);
1598 scsi_device_put(sdev);
1599 return ret;
1600}
1601
1602struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1603 uint id, u64 lun, void *hostdata)
1604{
1605 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1606 struct device *parent = &shost->shost_gendev;
1607 struct scsi_target *starget;
1608
1609 if (strncmp(scsi_scan_type, "none", 4) == 0)
1610 return ERR_PTR(-ENODEV);
1611
1612 starget = scsi_alloc_target(parent, channel, id);
1613 if (!starget)
1614 return ERR_PTR(-ENOMEM);
1615 scsi_autopm_get_target(starget);
1616
1617 mutex_lock(&shost->scan_mutex);
1618 if (!shost->async_scan)
1619 scsi_complete_async_scans();
1620
1621 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1622 scsi_probe_and_add_lun(starget, lun, NULL, &sdev,
1623 SCSI_SCAN_RESCAN, hostdata);
1624 scsi_autopm_put_host(shost);
1625 }
1626 mutex_unlock(&shost->scan_mutex);
1627 scsi_autopm_put_target(starget);
1628 /*
1629 * paired with scsi_alloc_target(). Target will be destroyed unless
1630 * scsi_probe_and_add_lun made an underlying device visible
1631 */
1632 scsi_target_reap(starget);
1633 put_device(&starget->dev);
1634
1635 return sdev;
1636}
1637EXPORT_SYMBOL(__scsi_add_device);
1638
1639int scsi_add_device(struct Scsi_Host *host, uint channel,
1640 uint target, u64 lun)
1641{
1642 struct scsi_device *sdev =
1643 __scsi_add_device(host, channel, target, lun, NULL);
1644 if (IS_ERR(sdev))
1645 return PTR_ERR(sdev);
1646
1647 scsi_device_put(sdev);
1648 return 0;
1649}
1650EXPORT_SYMBOL(scsi_add_device);
1651
1652int scsi_resume_device(struct scsi_device *sdev)
1653{
1654 struct device *dev = &sdev->sdev_gendev;
1655 int ret = 0;
1656
1657 device_lock(dev);
1658
1659 /*
1660 * Bail out if the device or its queue are not running. Otherwise,
1661 * the rescan may block waiting for commands to be executed, with us
1662 * holding the device lock. This can result in a potential deadlock
1663 * in the power management core code when system resume is on-going.
1664 */
1665 if (sdev->sdev_state != SDEV_RUNNING ||
1666 blk_queue_pm_only(sdev->request_queue)) {
1667 ret = -EWOULDBLOCK;
1668 goto unlock;
1669 }
1670
1671 if (dev->driver && try_module_get(dev->driver->owner)) {
1672 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1673
1674 if (drv->resume)
1675 ret = drv->resume(dev);
1676 module_put(dev->driver->owner);
1677 }
1678
1679unlock:
1680 device_unlock(dev);
1681
1682 return ret;
1683}
1684EXPORT_SYMBOL(scsi_resume_device);
1685
1686int scsi_rescan_device(struct scsi_device *sdev)
1687{
1688 struct device *dev = &sdev->sdev_gendev;
1689 int ret = 0;
1690
1691 device_lock(dev);
1692
1693 /*
1694 * Bail out if the device or its queue are not running. Otherwise,
1695 * the rescan may block waiting for commands to be executed, with us
1696 * holding the device lock. This can result in a potential deadlock
1697 * in the power management core code when system resume is on-going.
1698 */
1699 if (sdev->sdev_state != SDEV_RUNNING ||
1700 blk_queue_pm_only(sdev->request_queue)) {
1701 ret = -EWOULDBLOCK;
1702 goto unlock;
1703 }
1704
1705 scsi_attach_vpd(sdev);
1706 scsi_cdl_check(sdev);
1707
1708 if (sdev->handler && sdev->handler->rescan)
1709 sdev->handler->rescan(sdev);
1710
1711 if (dev->driver && try_module_get(dev->driver->owner)) {
1712 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1713
1714 if (drv->rescan)
1715 drv->rescan(dev);
1716 module_put(dev->driver->owner);
1717 }
1718
1719unlock:
1720 device_unlock(dev);
1721
1722 return ret;
1723}
1724EXPORT_SYMBOL(scsi_rescan_device);
1725
1726static void __scsi_scan_target(struct device *parent, unsigned int channel,
1727 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1728{
1729 struct Scsi_Host *shost = dev_to_shost(parent);
1730 blist_flags_t bflags = 0;
1731 int res;
1732 struct scsi_target *starget;
1733
1734 if (shost->this_id == id)
1735 /*
1736 * Don't scan the host adapter
1737 */
1738 return;
1739
1740 starget = scsi_alloc_target(parent, channel, id);
1741 if (!starget)
1742 return;
1743 scsi_autopm_get_target(starget);
1744
1745 if (lun != SCAN_WILD_CARD) {
1746 /*
1747 * Scan for a specific host/chan/id/lun.
1748 */
1749 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1750 goto out_reap;
1751 }
1752
1753 /*
1754 * Scan LUN 0, if there is some response, scan further. Ideally, we
1755 * would not configure LUN 0 until all LUNs are scanned.
1756 */
1757 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1758 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1759 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1760 /*
1761 * The REPORT LUN did not scan the target,
1762 * do a sequential scan.
1763 */
1764 scsi_sequential_lun_scan(starget, bflags,
1765 starget->scsi_level, rescan);
1766 }
1767
1768 out_reap:
1769 scsi_autopm_put_target(starget);
1770 /*
1771 * paired with scsi_alloc_target(): determine if the target has
1772 * any children at all and if not, nuke it
1773 */
1774 scsi_target_reap(starget);
1775
1776 put_device(&starget->dev);
1777}
1778
1779/**
1780 * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1781 * @parent: host to scan
1782 * @channel: channel to scan
1783 * @id: target id to scan
1784 * @lun: Specific LUN to scan or SCAN_WILD_CARD
1785 * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for
1786 * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs,
1787 * and SCSI_SCAN_MANUAL to force scanning even if
1788 * 'scan=manual' is set.
1789 *
1790 * Description:
1791 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
1792 * and possibly all LUNs on the target id.
1793 *
1794 * First try a REPORT LUN scan, if that does not scan the target, do a
1795 * sequential scan of LUNs on the target id.
1796 **/
1797void scsi_scan_target(struct device *parent, unsigned int channel,
1798 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1799{
1800 struct Scsi_Host *shost = dev_to_shost(parent);
1801
1802 if (strncmp(scsi_scan_type, "none", 4) == 0)
1803 return;
1804
1805 if (rescan != SCSI_SCAN_MANUAL &&
1806 strncmp(scsi_scan_type, "manual", 6) == 0)
1807 return;
1808
1809 mutex_lock(&shost->scan_mutex);
1810 if (!shost->async_scan)
1811 scsi_complete_async_scans();
1812
1813 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1814 __scsi_scan_target(parent, channel, id, lun, rescan);
1815 scsi_autopm_put_host(shost);
1816 }
1817 mutex_unlock(&shost->scan_mutex);
1818}
1819EXPORT_SYMBOL(scsi_scan_target);
1820
1821static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1822 unsigned int id, u64 lun,
1823 enum scsi_scan_mode rescan)
1824{
1825 uint order_id;
1826
1827 if (id == SCAN_WILD_CARD)
1828 for (id = 0; id < shost->max_id; ++id) {
1829 /*
1830 * XXX adapter drivers when possible (FCP, iSCSI)
1831 * could modify max_id to match the current max,
1832 * not the absolute max.
1833 *
1834 * XXX add a shost id iterator, so for example,
1835 * the FC ID can be the same as a target id
1836 * without a huge overhead of sparse id's.
1837 */
1838 if (shost->reverse_ordering)
1839 /*
1840 * Scan from high to low id.
1841 */
1842 order_id = shost->max_id - id - 1;
1843 else
1844 order_id = id;
1845 __scsi_scan_target(&shost->shost_gendev, channel,
1846 order_id, lun, rescan);
1847 }
1848 else
1849 __scsi_scan_target(&shost->shost_gendev, channel,
1850 id, lun, rescan);
1851}
1852
1853int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1854 unsigned int id, u64 lun,
1855 enum scsi_scan_mode rescan)
1856{
1857 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1858 "%s: <%u:%u:%llu>\n",
1859 __func__, channel, id, lun));
1860
1861 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1862 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1863 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1864 return -EINVAL;
1865
1866 mutex_lock(&shost->scan_mutex);
1867 if (!shost->async_scan)
1868 scsi_complete_async_scans();
1869
1870 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1871 if (channel == SCAN_WILD_CARD)
1872 for (channel = 0; channel <= shost->max_channel;
1873 channel++)
1874 scsi_scan_channel(shost, channel, id, lun,
1875 rescan);
1876 else
1877 scsi_scan_channel(shost, channel, id, lun, rescan);
1878 scsi_autopm_put_host(shost);
1879 }
1880 mutex_unlock(&shost->scan_mutex);
1881
1882 return 0;
1883}
1884
1885static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1886{
1887 struct scsi_device *sdev;
1888 shost_for_each_device(sdev, shost) {
1889 /* target removed before the device could be added */
1890 if (sdev->sdev_state == SDEV_DEL)
1891 continue;
1892 /* If device is already visible, skip adding it to sysfs */
1893 if (sdev->is_visible)
1894 continue;
1895 if (!scsi_host_scan_allowed(shost) ||
1896 scsi_sysfs_add_sdev(sdev) != 0)
1897 __scsi_remove_device(sdev);
1898 }
1899}
1900
1901/**
1902 * scsi_prep_async_scan - prepare for an async scan
1903 * @shost: the host which will be scanned
1904 * Returns: a cookie to be passed to scsi_finish_async_scan()
1905 *
1906 * Tells the midlayer this host is going to do an asynchronous scan.
1907 * It reserves the host's position in the scanning list and ensures
1908 * that other asynchronous scans started after this one won't affect the
1909 * ordering of the discovered devices.
1910 */
1911static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1912{
1913 struct async_scan_data *data = NULL;
1914 unsigned long flags;
1915
1916 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1917 return NULL;
1918
1919 mutex_lock(&shost->scan_mutex);
1920 if (shost->async_scan) {
1921 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1922 goto err;
1923 }
1924
1925 data = kmalloc(sizeof(*data), GFP_KERNEL);
1926 if (!data)
1927 goto err;
1928 data->shost = scsi_host_get(shost);
1929 if (!data->shost)
1930 goto err;
1931 init_completion(&data->prev_finished);
1932
1933 spin_lock_irqsave(shost->host_lock, flags);
1934 shost->async_scan = 1;
1935 spin_unlock_irqrestore(shost->host_lock, flags);
1936 mutex_unlock(&shost->scan_mutex);
1937
1938 spin_lock(&async_scan_lock);
1939 if (list_empty(&scanning_hosts))
1940 complete(&data->prev_finished);
1941 list_add_tail(&data->list, &scanning_hosts);
1942 spin_unlock(&async_scan_lock);
1943
1944 return data;
1945
1946 err:
1947 mutex_unlock(&shost->scan_mutex);
1948 kfree(data);
1949 return NULL;
1950}
1951
1952/**
1953 * scsi_finish_async_scan - asynchronous scan has finished
1954 * @data: cookie returned from earlier call to scsi_prep_async_scan()
1955 *
1956 * All the devices currently attached to this host have been found.
1957 * This function announces all the devices it has found to the rest
1958 * of the system.
1959 */
1960static void scsi_finish_async_scan(struct async_scan_data *data)
1961{
1962 struct Scsi_Host *shost;
1963 unsigned long flags;
1964
1965 if (!data)
1966 return;
1967
1968 shost = data->shost;
1969
1970 mutex_lock(&shost->scan_mutex);
1971
1972 if (!shost->async_scan) {
1973 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1974 dump_stack();
1975 mutex_unlock(&shost->scan_mutex);
1976 return;
1977 }
1978
1979 wait_for_completion(&data->prev_finished);
1980
1981 scsi_sysfs_add_devices(shost);
1982
1983 spin_lock_irqsave(shost->host_lock, flags);
1984 shost->async_scan = 0;
1985 spin_unlock_irqrestore(shost->host_lock, flags);
1986
1987 mutex_unlock(&shost->scan_mutex);
1988
1989 spin_lock(&async_scan_lock);
1990 list_del(&data->list);
1991 if (!list_empty(&scanning_hosts)) {
1992 struct async_scan_data *next = list_entry(scanning_hosts.next,
1993 struct async_scan_data, list);
1994 complete(&next->prev_finished);
1995 }
1996 spin_unlock(&async_scan_lock);
1997
1998 scsi_autopm_put_host(shost);
1999 scsi_host_put(shost);
2000 kfree(data);
2001}
2002
2003static void do_scsi_scan_host(struct Scsi_Host *shost)
2004{
2005 if (shost->hostt->scan_finished) {
2006 unsigned long start = jiffies;
2007 if (shost->hostt->scan_start)
2008 shost->hostt->scan_start(shost);
2009
2010 while (!shost->hostt->scan_finished(shost, jiffies - start))
2011 msleep(10);
2012 } else {
2013 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
2014 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2015 }
2016}
2017
2018static void do_scan_async(void *_data, async_cookie_t c)
2019{
2020 struct async_scan_data *data = _data;
2021 struct Scsi_Host *shost = data->shost;
2022
2023 do_scsi_scan_host(shost);
2024 scsi_finish_async_scan(data);
2025}
2026
2027/**
2028 * scsi_scan_host - scan the given adapter
2029 * @shost: adapter to scan
2030 **/
2031void scsi_scan_host(struct Scsi_Host *shost)
2032{
2033 struct async_scan_data *data;
2034
2035 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
2036 strncmp(scsi_scan_type, "manual", 6) == 0)
2037 return;
2038 if (scsi_autopm_get_host(shost) < 0)
2039 return;
2040
2041 data = scsi_prep_async_scan(shost);
2042 if (!data) {
2043 do_scsi_scan_host(shost);
2044 scsi_autopm_put_host(shost);
2045 return;
2046 }
2047
2048 /* register with the async subsystem so wait_for_device_probe()
2049 * will flush this work
2050 */
2051 async_schedule(do_scan_async, data);
2052
2053 /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
2054}
2055EXPORT_SYMBOL(scsi_scan_host);
2056
2057void scsi_forget_host(struct Scsi_Host *shost)
2058{
2059 struct scsi_device *sdev;
2060 unsigned long flags;
2061
2062 restart:
2063 spin_lock_irqsave(shost->host_lock, flags);
2064 list_for_each_entry(sdev, &shost->__devices, siblings) {
2065 if (sdev->sdev_state == SDEV_DEL)
2066 continue;
2067 spin_unlock_irqrestore(shost->host_lock, flags);
2068 __scsi_remove_device(sdev);
2069 goto restart;
2070 }
2071 spin_unlock_irqrestore(shost->host_lock, flags);
2072}
2073