Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.15
 
  1#ifndef _SCSI_SCSI_HOST_H
  2#define _SCSI_SCSI_HOST_H
  3
  4#include <linux/device.h>
  5#include <linux/list.h>
  6#include <linux/types.h>
  7#include <linux/workqueue.h>
  8#include <linux/mutex.h>
  9#include <linux/seq_file.h>
 
 10#include <scsi/scsi.h>
 11
 12struct request_queue;
 13struct block_device;
 14struct completion;
 15struct module;
 16struct scsi_cmnd;
 17struct scsi_device;
 18struct scsi_host_cmd_pool;
 19struct scsi_target;
 20struct Scsi_Host;
 21struct scsi_host_cmd_pool;
 22struct scsi_transport_template;
 23struct blk_queue_tags;
 24
 25
 26/*
 27 * The various choices mean:
 28 * NONE: Self evident.	Host adapter is not capable of scatter-gather.
 29 * ALL:	 Means that the host adapter module can do scatter-gather,
 30 *	 and that there is no limit to the size of the table to which
 31 *	 we scatter/gather data.  The value we set here is the maximum
 32 *	 single element sglist.  To use chained sglists, the adapter
 33 *	 has to set a value beyond ALL (and correctly use the chain
 34 *	 handling API.
 35 * Anything else:  Indicates the maximum number of chains that can be
 36 *	 used in one scatter-gather request.
 37 */
 38#define SG_NONE 0
 39#define SG_ALL	SCSI_MAX_SG_SEGMENTS
 40
 41#define MODE_UNKNOWN 0x00
 42#define MODE_INITIATOR 0x01
 43#define MODE_TARGET 0x02
 44
 45#define DISABLE_CLUSTERING 0
 46#define ENABLE_CLUSTERING 1
 47
 48enum {
 49	SCSI_QDEPTH_DEFAULT,	/* default requested change, e.g. from sysfs */
 50	SCSI_QDEPTH_QFULL,	/* scsi-ml requested due to queue full */
 51	SCSI_QDEPTH_RAMP_UP,	/* scsi-ml requested due to threshold event */
 52};
 53
 54struct scsi_host_template {
 55	struct module *module;
 56	const char *name;
 57
 58	/*
 59	 * Used to initialize old-style drivers.  For new-style drivers
 60	 * just perform all work in your module initialization function.
 61	 *
 62	 * Status:  OBSOLETE
 63	 */
 64	int (* detect)(struct scsi_host_template *);
 65
 66	/*
 67	 * Used as unload callback for hosts with old-style drivers.
 68	 *
 69	 * Status: OBSOLETE
 70	 */
 71	int (* release)(struct Scsi_Host *);
 72
 73	/*
 74	 * The info function will return whatever useful information the
 75	 * developer sees fit.  If not provided, then the name field will
 76	 * be used instead.
 77	 *
 78	 * Status: OPTIONAL
 79	 */
 80	const char *(* info)(struct Scsi_Host *);
 81
 82	/*
 83	 * Ioctl interface
 84	 *
 85	 * Status: OPTIONAL
 86	 */
 87	int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
 88
 89
 90#ifdef CONFIG_COMPAT
 91	/* 
 92	 * Compat handler. Handle 32bit ABI.
 93	 * When unknown ioctl is passed return -ENOIOCTLCMD.
 94	 *
 95	 * Status: OPTIONAL
 96	 */
 97	int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
 98#endif
 99
100	/*
101	 * The queuecommand function is used to queue up a scsi
102	 * command block to the LLDD.  When the driver finished
103	 * processing the command the done callback is invoked.
104	 *
105	 * If queuecommand returns 0, then the HBA has accepted the
106	 * command.  The done() function must be called on the command
107	 * when the driver has finished with it. (you may call done on the
108	 * command before queuecommand returns, but in this case you
109	 * *must* return 0 from queuecommand).
110	 *
111	 * Queuecommand may also reject the command, in which case it may
112	 * not touch the command and must not call done() for it.
113	 *
114	 * There are two possible rejection returns:
115	 *
116	 *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
117	 *   allow commands to other devices serviced by this host.
118	 *
119	 *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
120	 *   host temporarily.
121	 *
122         * For compatibility, any other non-zero return is treated the
123         * same as SCSI_MLQUEUE_HOST_BUSY.
124	 *
125	 * NOTE: "temporarily" means either until the next command for#
126	 * this device/host completes, or a period of time determined by
127	 * I/O pressure in the system if there are no other outstanding
128	 * commands.
129	 *
130	 * STATUS: REQUIRED
131	 */
132	int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
133
134	/*
135	 * The transfer functions are used to queue a scsi command to
136	 * the LLD. When the driver is finished processing the command
137	 * the done callback is invoked.
138	 *
139	 * This is called to inform the LLD to transfer
140	 * scsi_bufflen(cmd) bytes. scsi_sg_count(cmd) speciefies the
141	 * number of scatterlist entried in the command and
142	 * scsi_sglist(cmd) returns the scatterlist.
143	 *
144	 * return values: see queuecommand
145	 *
146	 * If the LLD accepts the cmd, it should set the result to an
147	 * appropriate value when completed before calling the done function.
148	 *
149	 * STATUS: REQUIRED FOR TARGET DRIVERS
150	 */
151	/* TODO: rename */
152	int (* transfer_response)(struct scsi_cmnd *,
153				  void (*done)(struct scsi_cmnd *));
154
155	/*
156	 * This is an error handling strategy routine.  You don't need to
157	 * define one of these if you don't want to - there is a default
158	 * routine that is present that should work in most cases.  For those
159	 * driver authors that have the inclination and ability to write their
160	 * own strategy routine, this is where it is specified.  Note - the
161	 * strategy routine is *ALWAYS* run in the context of the kernel eh
162	 * thread.  Thus you are guaranteed to *NOT* be in an interrupt
163	 * handler when you execute this, and you are also guaranteed to
164	 * *NOT* have any other commands being queued while you are in the
165	 * strategy routine. When you return from this function, operations
166	 * return to normal.
167	 *
168	 * See scsi_error.c scsi_unjam_host for additional comments about
169	 * what this function should and should not be attempting to do.
170	 *
171	 * Status: REQUIRED	(at least one of them)
172	 */
173	int (* eh_abort_handler)(struct scsi_cmnd *);
174	int (* eh_device_reset_handler)(struct scsi_cmnd *);
175	int (* eh_target_reset_handler)(struct scsi_cmnd *);
176	int (* eh_bus_reset_handler)(struct scsi_cmnd *);
177	int (* eh_host_reset_handler)(struct scsi_cmnd *);
178
179	/*
180	 * Before the mid layer attempts to scan for a new device where none
181	 * currently exists, it will call this entry in your driver.  Should
182	 * your driver need to allocate any structs or perform any other init
183	 * items in order to send commands to a currently unused target/lun
184	 * combo, then this is where you can perform those allocations.  This
185	 * is specifically so that drivers won't have to perform any kind of
186	 * "is this a new device" checks in their queuecommand routine,
187	 * thereby making the hot path a bit quicker.
188	 *
189	 * Return values: 0 on success, non-0 on failure
190	 *
191	 * Deallocation:  If we didn't find any devices at this ID, you will
192	 * get an immediate call to slave_destroy().  If we find something
193	 * here then you will get a call to slave_configure(), then the
194	 * device will be used for however long it is kept around, then when
195	 * the device is removed from the system (or * possibly at reboot
196	 * time), you will then get a call to slave_destroy().  This is
197	 * assuming you implement slave_configure and slave_destroy.
198	 * However, if you allocate memory and hang it off the device struct,
199	 * then you must implement the slave_destroy() routine at a minimum
200	 * in order to avoid leaking memory
201	 * each time a device is tore down.
202	 *
203	 * Status: OPTIONAL
204	 */
205	int (* slave_alloc)(struct scsi_device *);
206
207	/*
208	 * Once the device has responded to an INQUIRY and we know the
209	 * device is online, we call into the low level driver with the
210	 * struct scsi_device *.  If the low level device driver implements
211	 * this function, it *must* perform the task of setting the queue
212	 * depth on the device.  All other tasks are optional and depend
213	 * on what the driver supports and various implementation details.
214	 * 
215	 * Things currently recommended to be handled at this time include:
216	 *
217	 * 1.  Setting the device queue depth.  Proper setting of this is
218	 *     described in the comments for scsi_adjust_queue_depth.
219	 * 2.  Determining if the device supports the various synchronous
220	 *     negotiation protocols.  The device struct will already have
221	 *     responded to INQUIRY and the results of the standard items
222	 *     will have been shoved into the various device flag bits, eg.
223	 *     device->sdtr will be true if the device supports SDTR messages.
224	 * 3.  Allocating command structs that the device will need.
225	 * 4.  Setting the default timeout on this device (if needed).
226	 * 5.  Anything else the low level driver might want to do on a device
227	 *     specific setup basis...
228	 * 6.  Return 0 on success, non-0 on error.  The device will be marked
229	 *     as offline on error so that no access will occur.  If you return
230	 *     non-0, your slave_destroy routine will never get called for this
231	 *     device, so don't leave any loose memory hanging around, clean
232	 *     up after yourself before returning non-0
233	 *
234	 * Status: OPTIONAL
235	 */
236	int (* slave_configure)(struct scsi_device *);
237
238	/*
239	 * Immediately prior to deallocating the device and after all activity
240	 * has ceased the mid layer calls this point so that the low level
241	 * driver may completely detach itself from the scsi device and vice
242	 * versa.  The low level driver is responsible for freeing any memory
243	 * it allocated in the slave_alloc or slave_configure calls. 
244	 *
245	 * Status: OPTIONAL
246	 */
247	void (* slave_destroy)(struct scsi_device *);
248
249	/*
250	 * Before the mid layer attempts to scan for a new device attached
251	 * to a target where no target currently exists, it will call this
252	 * entry in your driver.  Should your driver need to allocate any
253	 * structs or perform any other init items in order to send commands
254	 * to a currently unused target, then this is where you can perform
255	 * those allocations.
256	 *
257	 * Return values: 0 on success, non-0 on failure
258	 *
259	 * Status: OPTIONAL
260	 */
261	int (* target_alloc)(struct scsi_target *);
262
263	/*
264	 * Immediately prior to deallocating the target structure, and
265	 * after all activity to attached scsi devices has ceased, the
266	 * midlayer calls this point so that the driver may deallocate
267	 * and terminate any references to the target.
268	 *
269	 * Status: OPTIONAL
270	 */
271	void (* target_destroy)(struct scsi_target *);
272
273	/*
274	 * If a host has the ability to discover targets on its own instead
275	 * of scanning the entire bus, it can fill in this function and
276	 * call scsi_scan_host().  This function will be called periodically
277	 * until it returns 1 with the scsi_host and the elapsed time of
278	 * the scan in jiffies.
279	 *
280	 * Status: OPTIONAL
281	 */
282	int (* scan_finished)(struct Scsi_Host *, unsigned long);
283
284	/*
285	 * If the host wants to be called before the scan starts, but
286	 * after the midlayer has set up ready for the scan, it can fill
287	 * in this function.
288	 *
289	 * Status: OPTIONAL
290	 */
291	void (* scan_start)(struct Scsi_Host *);
292
293	/*
294	 * Fill in this function to allow the queue depth of this host
295	 * to be changeable (on a per device basis).  Returns either
296	 * the current queue depth setting (may be different from what
297	 * was passed in) or an error.  An error should only be
298	 * returned if the requested depth is legal but the driver was
299	 * unable to set it.  If the requested depth is illegal, the
300	 * driver should set and return the closest legal queue depth.
301	 *
302	 * Status: OPTIONAL
303	 */
304	int (* change_queue_depth)(struct scsi_device *, int, int);
305
306	/*
307	 * Fill in this function to allow the changing of tag types
308	 * (this also allows the enabling/disabling of tag command
309	 * queueing).  An error should only be returned if something
310	 * went wrong in the driver while trying to set the tag type.
311	 * If the driver doesn't support the requested tag type, then
312	 * it should set the closest type it does support without
313	 * returning an error.  Returns the actual tag type set.
314	 *
315	 * Status: OPTIONAL
316	 */
317	int (* change_queue_type)(struct scsi_device *, int);
318
319	/*
320	 * This function determines the BIOS parameters for a given
321	 * harddisk.  These tend to be numbers that are made up by
322	 * the host adapter.  Parameters:
323	 * size, device, list (heads, sectors, cylinders)
324	 *
325	 * Status: OPTIONAL
326	 */
327	int (* bios_param)(struct scsi_device *, struct block_device *,
328			sector_t, int []);
329
330	/*
331	 * This function is called when one or more partitions on the
332	 * device reach beyond the end of the device.
333	 *
334	 * Status: OPTIONAL
335	 */
336	void (*unlock_native_capacity)(struct scsi_device *);
337
338	/*
339	 * Can be used to export driver statistics and other infos to the
340	 * world outside the kernel ie. userspace and it also provides an
341	 * interface to feed the driver with information.
342	 *
343	 * Status: OBSOLETE
344	 */
345	int (*show_info)(struct seq_file *, struct Scsi_Host *);
346	int (*write_info)(struct Scsi_Host *, char *, int);
347
348	/*
349	 * This is an optional routine that allows the transport to become
350	 * involved when a scsi io timer fires. The return value tells the
351	 * timer routine how to finish the io timeout handling:
352	 * EH_HANDLED:		I fixed the error, please complete the command
353	 * EH_RESET_TIMER:	I need more time, reset the timer and
354	 *			begin counting again
355	 * EH_NOT_HANDLED	Begin normal error recovery
356	 *
357	 * Status: OPTIONAL
358	 */
359	enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
360
361	/* This is an optional routine that allows transport to initiate
362	 * LLD adapter or firmware reset using sysfs attribute.
363	 *
364	 * Return values: 0 on success, -ve value on failure.
365	 *
366	 * Status: OPTIONAL
367	 */
368
369	int (*host_reset)(struct Scsi_Host *shost, int reset_type);
370#define SCSI_ADAPTER_RESET	1
371#define SCSI_FIRMWARE_RESET	2
372
373
374	/*
375	 * Name of proc directory
376	 */
377	const char *proc_name;
378
379	/*
380	 * Used to store the procfs directory if a driver implements the
381	 * show_info method.
382	 */
383	struct proc_dir_entry *proc_dir;
384
385	/*
386	 * This determines if we will use a non-interrupt driven
387	 * or an interrupt driven scheme.  It is set to the maximum number
388	 * of simultaneous commands a given host adapter will accept.
389	 */
390	int can_queue;
391
392	/*
393	 * In many instances, especially where disconnect / reconnect are
394	 * supported, our host also has an ID on the SCSI bus.  If this is
395	 * the case, then it must be reserved.  Please set this_id to -1 if
396	 * your setup is in single initiator mode, and the host lacks an
397	 * ID.
398	 */
399	int this_id;
400
401	/*
402	 * This determines the degree to which the host adapter is capable
403	 * of scatter-gather.
404	 */
405	unsigned short sg_tablesize;
406	unsigned short sg_prot_tablesize;
407
408	/*
409	 * Set this if the host adapter has limitations beside segment count.
410	 */
411	unsigned short max_sectors;
412
413	/*
414	 * DMA scatter gather segment boundary limit. A segment crossing this
415	 * boundary will be split in two.
416	 */
417	unsigned long dma_boundary;
418
419	/*
420	 * This specifies "machine infinity" for host templates which don't
421	 * limit the transfer size.  Note this limit represents an absolute
422	 * maximum, and may be over the transfer limits allowed for
423	 * individual devices (e.g. 256 for SCSI-1).
424	 */
425#define SCSI_DEFAULT_MAX_SECTORS	1024
426
427	/*
428	 * True if this host adapter can make good use of linked commands.
429	 * This will allow more than one command to be queued to a given
430	 * unit on a given host.  Set this to the maximum number of command
431	 * blocks to be provided for each device.  Set this to 1 for one
432	 * command block per lun, 2 for two, etc.  Do not set this to 0.
433	 * You should make sure that the host adapter will do the right thing
434	 * before you try setting this above 1.
435	 */
436	short cmd_per_lun;
437
438	/*
439	 * present contains counter indicating how many boards of this
440	 * type were found when we did the scan.
441	 */
442	unsigned char present;
443
 
 
 
 
 
 
 
 
444	/*
445	 * This specifies the mode that a LLD supports.
446	 */
447	unsigned supported_mode:2;
448
449	/*
450	 * True if this host adapter uses unchecked DMA onto an ISA bus.
451	 */
452	unsigned unchecked_isa_dma:1;
453
454	/*
455	 * True if this host adapter can make good use of clustering.
456	 * I originally thought that if the tablesize was large that it
457	 * was a waste of CPU cycles to prepare a cluster list, but
458	 * it works out that the Buslogic is faster if you use a smaller
459	 * number of segments (i.e. use clustering).  I guess it is
460	 * inefficient.
461	 */
462	unsigned use_clustering:1;
463
464	/*
465	 * True for emulated SCSI host adapters (e.g. ATAPI).
466	 */
467	unsigned emulated:1;
468
469	/*
470	 * True if the low-level driver performs its own reset-settle delays.
471	 */
472	unsigned skip_settle_delay:1;
473
474	/*
475	 * True if we are using ordered write support.
476	 */
477	unsigned ordered_tag:1;
478
479	/* True if the controller does not support WRITE SAME */
480	unsigned no_write_same:1;
481
482	/*
483	 * True if asynchronous aborts are not supported
484	 */
485	unsigned no_async_abort:1;
486
487	/*
488	 * Countdown for host blocking with no commands outstanding.
489	 */
490	unsigned int max_host_blocked;
491
492	/*
493	 * Default value for the blocking.  If the queue is empty,
494	 * host_blocked counts down in the request_fn until it restarts
495	 * host operations as zero is reached.  
496	 *
497	 * FIXME: This should probably be a value in the template
498	 */
499#define SCSI_DEFAULT_HOST_BLOCKED	7
500
501	/*
502	 * Pointer to the sysfs class properties for this host, NULL terminated.
503	 */
504	struct device_attribute **shost_attrs;
505
506	/*
507	 * Pointer to the SCSI device properties for this host, NULL terminated.
508	 */
509	struct device_attribute **sdev_attrs;
510
511	/*
512	 * List of hosts per template.
513	 *
514	 * This is only for use by scsi_module.c for legacy templates.
515	 * For these access to it is synchronized implicitly by
516	 * module_init/module_exit.
517	 */
518	struct list_head legacy_hosts;
519
520	/*
521	 * Vendor Identifier associated with the host
522	 *
523	 * Note: When specifying vendor_id, be sure to read the
524	 *   Vendor Type and ID formatting requirements specified in
525	 *   scsi_netlink.h
526	 */
527	u64 vendor_id;
528
529	/*
530	 * Additional per-command data allocated for the driver.
531	 */
532	unsigned int cmd_size;
533	struct scsi_host_cmd_pool *cmd_pool;
534};
535
536/*
537 * Temporary #define for host lock push down. Can be removed when all
538 * drivers have been updated to take advantage of unlocked
539 * queuecommand.
540 *
541 */
542#define DEF_SCSI_QCMD(func_name) \
543	int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)	\
544	{								\
545		unsigned long irq_flags;				\
546		int rc;							\
547		spin_lock_irqsave(shost->host_lock, irq_flags);		\
548		scsi_cmd_get_serial(shost, cmd);			\
549		rc = func_name##_lck (cmd, cmd->scsi_done);			\
550		spin_unlock_irqrestore(shost->host_lock, irq_flags);	\
551		return rc;						\
552	}
553
554
555/*
556 * shost state: If you alter this, you also need to alter scsi_sysfs.c
557 * (for the ascii descriptions) and the state model enforcer:
558 * scsi_host_set_state()
559 */
560enum scsi_host_state {
561	SHOST_CREATED = 1,
562	SHOST_RUNNING,
563	SHOST_CANCEL,
564	SHOST_DEL,
565	SHOST_RECOVERY,
566	SHOST_CANCEL_RECOVERY,
567	SHOST_DEL_RECOVERY,
568};
569
570struct Scsi_Host {
571	/*
572	 * __devices is protected by the host_lock, but you should
573	 * usually use scsi_device_lookup / shost_for_each_device
574	 * to access it and don't care about locking yourself.
575	 * In the rare case of beeing in irq context you can use
576	 * their __ prefixed variants with the lock held. NEVER
577	 * access this list directly from a driver.
578	 */
579	struct list_head	__devices;
580	struct list_head	__targets;
581	
582	struct scsi_host_cmd_pool *cmd_pool;
583	spinlock_t		free_list_lock;
584	struct list_head	free_list; /* backup store of cmd structs */
585	struct list_head	starved_list;
586
587	spinlock_t		default_lock;
588	spinlock_t		*host_lock;
589
590	struct mutex		scan_mutex;/* serialize scanning activity */
591
592	struct list_head	eh_cmd_q;
593	struct task_struct    * ehandler;  /* Error recovery thread. */
594	struct completion     * eh_action; /* Wait for specific actions on the
595					      host. */
596	wait_queue_head_t       host_wait;
597	struct scsi_host_template *hostt;
598	struct scsi_transport_template *transportt;
599
600	/*
601	 * Area to keep a shared tag map (if needed, will be
602	 * NULL if not).
603	 */
604	struct blk_queue_tag	*bqt;
 
 
 
605
606	/*
607	 * The following two fields are protected with host_lock;
608	 * however, eh routines can safely access during eh processing
609	 * without acquiring the lock.
610	 */
611	unsigned int host_busy;		   /* commands actually active on low-level */
612	unsigned int host_failed;	   /* commands that failed. */
613	unsigned int host_eh_scheduled;    /* EH scheduled without command */
614    
615	unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
616
617	/* next two fields are used to bound the time spent in error handling */
618	int eh_deadline;
619	unsigned long last_reset;
620
621
622	/*
623	 * These three parameters can be used to allow for wide scsi,
624	 * and for host adapters that support multiple busses
625	 * The first two should be set to 1 more than the actual max id
626	 * or lun (i.e. 8 for normal systems).
627	 */
628	unsigned int max_id;
629	unsigned int max_lun;
630	unsigned int max_channel;
 
 
631
632	/*
633	 * This is a unique identifier that must be assigned so that we
634	 * have some way of identifying each detected host adapter properly
635	 * and uniquely.  For hosts that do not support more than one card
636	 * in the system at one time, this does not need to be set.  It is
637	 * initialized to 0 in scsi_register.
638	 */
639	unsigned int unique_id;
640
641	/*
642	 * The maximum length of SCSI commands that this host can accept.
643	 * Probably 12 for most host adapters, but could be 16 for others.
644	 * or 260 if the driver supports variable length cdbs.
645	 * For drivers that don't set this field, a value of 12 is
646	 * assumed.
647	 */
648	unsigned short max_cmd_len;
649
650	int this_id;
651	int can_queue;
652	short cmd_per_lun;
653	short unsigned int sg_tablesize;
654	short unsigned int sg_prot_tablesize;
655	short unsigned int max_sectors;
656	unsigned long dma_boundary;
 
 
 
 
 
 
 
 
657	/* 
658	 * Used to assign serial numbers to the cmds.
659	 * Protected by the host lock.
660	 */
661	unsigned long cmd_serial_number;
662	
663	unsigned active_mode:2;
664	unsigned unchecked_isa_dma:1;
665	unsigned use_clustering:1;
666	unsigned use_blk_tcq:1;
667
668	/*
669	 * Host has requested that no further requests come through for the
670	 * time being.
671	 */
672	unsigned host_self_blocked:1;
673    
674	/*
675	 * Host uses correct SCSI ordering not PC ordering. The bit is
676	 * set for the minority of drivers whose authors actually read
677	 * the spec ;).
678	 */
679	unsigned reverse_ordering:1;
680
681	/*
682	 * Ordered write support
683	 */
684	unsigned ordered_tag:1;
685
686	/* Task mgmt function in progress */
687	unsigned tmf_in_progress:1;
688
689	/* Asynchronous scan in progress */
690	unsigned async_scan:1;
691
692	/* Don't resume host in EH */
693	unsigned eh_noresume:1;
694
695	/* The controller does not support WRITE SAME */
696	unsigned no_write_same:1;
697
 
 
 
 
 
 
698	/*
699	 * Optional work queue to be utilized by the transport
700	 */
701	char work_q_name[20];
702	struct workqueue_struct *work_q;
703
704	/*
705	 * Task management function work queue
706	 */
707	struct workqueue_struct *tmf_work_q;
708
709	/*
710	 * Host has rejected a command because it was busy.
711	 */
712	unsigned int host_blocked;
713
714	/*
715	 * Value host_blocked counts down from
716	 */
717	unsigned int max_host_blocked;
718
719	/* Protection Information */
720	unsigned int prot_capabilities;
721	unsigned char prot_guard_type;
722
723	/*
724	 * q used for scsi_tgt msgs, async events or any other requests that
725	 * need to be processed in userspace
726	 */
727	struct request_queue *uspace_req_q;
728
729	/* legacy crap */
730	unsigned long base;
731	unsigned long io_port;
732	unsigned char n_io_port;
733	unsigned char dma_channel;
734	unsigned int  irq;
735	
736
737	enum scsi_host_state shost_state;
738
739	/* ldm bits */
740	struct device		shost_gendev, shost_dev;
741
742	/*
743	 * List of hosts per template.
744	 *
745	 * This is only for use by scsi_module.c for legacy templates.
746	 * For these access to it is synchronized implicitly by
747	 * module_init/module_exit.
748	 */
749	struct list_head sht_legacy_list;
750
751	/*
752	 * Points to the transport data (if any) which is allocated
753	 * separately
754	 */
755	void *shost_data;
756
757	/*
758	 * Points to the physical bus device we'd use to do DMA
759	 * Needed just in case we have virtual hosts.
760	 */
761	struct device *dma_dev;
762
763	/*
764	 * We should ensure that this is aligned, both for better performance
765	 * and also because some compilers (m68k) don't automatically force
766	 * alignment to a long boundary.
767	 */
768	unsigned long hostdata[0]  /* Used for storage of host specific stuff */
769		__attribute__ ((aligned (sizeof(unsigned long))));
770};
771
772#define		class_to_shost(d)	\
773	container_of(d, struct Scsi_Host, shost_dev)
774
775#define shost_printk(prefix, shost, fmt, a...)	\
776	dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
777
778static inline void *shost_priv(struct Scsi_Host *shost)
779{
780	return (void *)shost->hostdata;
781}
782
783int scsi_is_host_device(const struct device *);
784
785static inline struct Scsi_Host *dev_to_shost(struct device *dev)
786{
787	while (!scsi_is_host_device(dev)) {
788		if (!dev->parent)
789			return NULL;
790		dev = dev->parent;
791	}
792	return container_of(dev, struct Scsi_Host, shost_gendev);
793}
794
795static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
796{
797	return shost->shost_state == SHOST_RECOVERY ||
798		shost->shost_state == SHOST_CANCEL_RECOVERY ||
799		shost->shost_state == SHOST_DEL_RECOVERY ||
800		shost->tmf_in_progress;
801}
802
 
 
 
 
 
803extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
804extern void scsi_flush_work(struct Scsi_Host *);
805
806extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
807extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
808					       struct device *,
809					       struct device *);
810extern void scsi_scan_host(struct Scsi_Host *);
811extern void scsi_rescan_device(struct device *);
812extern void scsi_remove_host(struct Scsi_Host *);
813extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
814extern void scsi_host_put(struct Scsi_Host *t);
815extern struct Scsi_Host *scsi_host_lookup(unsigned short);
816extern const char *scsi_host_state_name(enum scsi_host_state);
817extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *);
818
819extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
820
821static inline int __must_check scsi_add_host(struct Scsi_Host *host,
822					     struct device *dev)
823{
824	return scsi_add_host_with_dma(host, dev, dev);
825}
826
827static inline struct device *scsi_get_device(struct Scsi_Host *shost)
828{
829        return shost->shost_gendev.parent;
830}
831
832/**
833 * scsi_host_scan_allowed - Is scanning of this host allowed
834 * @shost:	Pointer to Scsi_Host.
835 **/
836static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
837{
838	return shost->shost_state == SHOST_RUNNING ||
839	       shost->shost_state == SHOST_RECOVERY;
840}
841
842extern void scsi_unblock_requests(struct Scsi_Host *);
843extern void scsi_block_requests(struct Scsi_Host *);
844
845struct class_container;
846
847extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
848						void (*) (struct request_queue *));
849/*
850 * These two functions are used to allocate and free a pseudo device
851 * which will connect to the host adapter itself rather than any
852 * physical device.  You must deallocate when you are done with the
853 * thing.  This physical pseudo-device isn't real and won't be available
854 * from any high-level drivers.
855 */
856extern void scsi_free_host_dev(struct scsi_device *);
857extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
858
859/*
860 * DIF defines the exchange of protection information between
861 * initiator and SBC block device.
862 *
863 * DIX defines the exchange of protection information between OS and
864 * initiator.
865 */
866enum scsi_host_prot_capabilities {
867	SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
868	SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
869	SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
870
871	SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
872	SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
873	SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
874	SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
875};
876
877/*
878 * SCSI hosts which support the Data Integrity Extensions must
879 * indicate their capabilities by setting the prot_capabilities using
880 * this call.
881 */
882static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
883{
884	shost->prot_capabilities = mask;
885}
886
887static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
888{
889	return shost->prot_capabilities;
890}
891
892static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
893{
894	return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
895}
896
897static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
898{
899	static unsigned char cap[] = { 0,
900				       SHOST_DIF_TYPE1_PROTECTION,
901				       SHOST_DIF_TYPE2_PROTECTION,
902				       SHOST_DIF_TYPE3_PROTECTION };
903
904	if (target_type >= ARRAY_SIZE(cap))
905		return 0;
906
907	return shost->prot_capabilities & cap[target_type] ? target_type : 0;
908}
909
910static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
911{
912#if defined(CONFIG_BLK_DEV_INTEGRITY)
913	static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
914				       SHOST_DIX_TYPE1_PROTECTION,
915				       SHOST_DIX_TYPE2_PROTECTION,
916				       SHOST_DIX_TYPE3_PROTECTION };
917
918	if (target_type >= ARRAY_SIZE(cap))
919		return 0;
920
921	return shost->prot_capabilities & cap[target_type];
922#endif
923	return 0;
924}
925
926/*
927 * All DIX-capable initiators must support the T10-mandated CRC
928 * checksum.  Controllers can optionally implement the IP checksum
929 * scheme which has much lower impact on system performance.  Note
930 * that the main rationale for the checksum is to match integrity
931 * metadata with data.  Detecting bit errors are a job for ECC memory
932 * and buses.
933 */
934
935enum scsi_host_guard_type {
936	SHOST_DIX_GUARD_CRC = 1 << 0,
937	SHOST_DIX_GUARD_IP  = 1 << 1,
938};
939
940static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
941{
942	shost->prot_guard_type = type;
943}
944
945static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
946{
947	return shost->prot_guard_type;
948}
949
950/* legacy interfaces */
951extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
952extern void scsi_unregister(struct Scsi_Host *);
953extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
954
955#endif /* _SCSI_SCSI_HOST_H */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _SCSI_SCSI_HOST_H
  3#define _SCSI_SCSI_HOST_H
  4
  5#include <linux/device.h>
  6#include <linux/list.h>
  7#include <linux/types.h>
  8#include <linux/workqueue.h>
  9#include <linux/mutex.h>
 10#include <linux/seq_file.h>
 11#include <linux/blk-mq.h>
 12#include <scsi/scsi.h>
 13
 14struct request_queue;
 15struct block_device;
 16struct completion;
 17struct module;
 18struct scsi_cmnd;
 19struct scsi_device;
 20struct scsi_host_cmd_pool;
 21struct scsi_target;
 22struct Scsi_Host;
 23struct scsi_host_cmd_pool;
 24struct scsi_transport_template;
 25struct blk_queue_tags;
 26
 27
 28/*
 29 * The various choices mean:
 30 * NONE: Self evident.	Host adapter is not capable of scatter-gather.
 31 * ALL:	 Means that the host adapter module can do scatter-gather,
 32 *	 and that there is no limit to the size of the table to which
 33 *	 we scatter/gather data.  The value we set here is the maximum
 34 *	 single element sglist.  To use chained sglists, the adapter
 35 *	 has to set a value beyond ALL (and correctly use the chain
 36 *	 handling API.
 37 * Anything else:  Indicates the maximum number of chains that can be
 38 *	 used in one scatter-gather request.
 39 */
 40#define SG_NONE 0
 41#define SG_ALL	SG_CHUNK_SIZE
 42
 43#define MODE_UNKNOWN 0x00
 44#define MODE_INITIATOR 0x01
 45#define MODE_TARGET 0x02
 46
 47#define DISABLE_CLUSTERING 0
 48#define ENABLE_CLUSTERING 1
 49
 
 
 
 
 
 
 50struct scsi_host_template {
 51	struct module *module;
 52	const char *name;
 53
 54	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 55	 * The info function will return whatever useful information the
 56	 * developer sees fit.  If not provided, then the name field will
 57	 * be used instead.
 58	 *
 59	 * Status: OPTIONAL
 60	 */
 61	const char *(* info)(struct Scsi_Host *);
 62
 63	/*
 64	 * Ioctl interface
 65	 *
 66	 * Status: OPTIONAL
 67	 */
 68	int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
 69
 70
 71#ifdef CONFIG_COMPAT
 72	/* 
 73	 * Compat handler. Handle 32bit ABI.
 74	 * When unknown ioctl is passed return -ENOIOCTLCMD.
 75	 *
 76	 * Status: OPTIONAL
 77	 */
 78	int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
 79#endif
 80
 81	/*
 82	 * The queuecommand function is used to queue up a scsi
 83	 * command block to the LLDD.  When the driver finished
 84	 * processing the command the done callback is invoked.
 85	 *
 86	 * If queuecommand returns 0, then the HBA has accepted the
 87	 * command.  The done() function must be called on the command
 88	 * when the driver has finished with it. (you may call done on the
 89	 * command before queuecommand returns, but in this case you
 90	 * *must* return 0 from queuecommand).
 91	 *
 92	 * Queuecommand may also reject the command, in which case it may
 93	 * not touch the command and must not call done() for it.
 94	 *
 95	 * There are two possible rejection returns:
 96	 *
 97	 *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
 98	 *   allow commands to other devices serviced by this host.
 99	 *
100	 *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
101	 *   host temporarily.
102	 *
103         * For compatibility, any other non-zero return is treated the
104         * same as SCSI_MLQUEUE_HOST_BUSY.
105	 *
106	 * NOTE: "temporarily" means either until the next command for#
107	 * this device/host completes, or a period of time determined by
108	 * I/O pressure in the system if there are no other outstanding
109	 * commands.
110	 *
111	 * STATUS: REQUIRED
112	 */
113	int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
114
115	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116	 * This is an error handling strategy routine.  You don't need to
117	 * define one of these if you don't want to - there is a default
118	 * routine that is present that should work in most cases.  For those
119	 * driver authors that have the inclination and ability to write their
120	 * own strategy routine, this is where it is specified.  Note - the
121	 * strategy routine is *ALWAYS* run in the context of the kernel eh
122	 * thread.  Thus you are guaranteed to *NOT* be in an interrupt
123	 * handler when you execute this, and you are also guaranteed to
124	 * *NOT* have any other commands being queued while you are in the
125	 * strategy routine. When you return from this function, operations
126	 * return to normal.
127	 *
128	 * See scsi_error.c scsi_unjam_host for additional comments about
129	 * what this function should and should not be attempting to do.
130	 *
131	 * Status: REQUIRED	(at least one of them)
132	 */
133	int (* eh_abort_handler)(struct scsi_cmnd *);
134	int (* eh_device_reset_handler)(struct scsi_cmnd *);
135	int (* eh_target_reset_handler)(struct scsi_cmnd *);
136	int (* eh_bus_reset_handler)(struct scsi_cmnd *);
137	int (* eh_host_reset_handler)(struct scsi_cmnd *);
138
139	/*
140	 * Before the mid layer attempts to scan for a new device where none
141	 * currently exists, it will call this entry in your driver.  Should
142	 * your driver need to allocate any structs or perform any other init
143	 * items in order to send commands to a currently unused target/lun
144	 * combo, then this is where you can perform those allocations.  This
145	 * is specifically so that drivers won't have to perform any kind of
146	 * "is this a new device" checks in their queuecommand routine,
147	 * thereby making the hot path a bit quicker.
148	 *
149	 * Return values: 0 on success, non-0 on failure
150	 *
151	 * Deallocation:  If we didn't find any devices at this ID, you will
152	 * get an immediate call to slave_destroy().  If we find something
153	 * here then you will get a call to slave_configure(), then the
154	 * device will be used for however long it is kept around, then when
155	 * the device is removed from the system (or * possibly at reboot
156	 * time), you will then get a call to slave_destroy().  This is
157	 * assuming you implement slave_configure and slave_destroy.
158	 * However, if you allocate memory and hang it off the device struct,
159	 * then you must implement the slave_destroy() routine at a minimum
160	 * in order to avoid leaking memory
161	 * each time a device is tore down.
162	 *
163	 * Status: OPTIONAL
164	 */
165	int (* slave_alloc)(struct scsi_device *);
166
167	/*
168	 * Once the device has responded to an INQUIRY and we know the
169	 * device is online, we call into the low level driver with the
170	 * struct scsi_device *.  If the low level device driver implements
171	 * this function, it *must* perform the task of setting the queue
172	 * depth on the device.  All other tasks are optional and depend
173	 * on what the driver supports and various implementation details.
174	 * 
175	 * Things currently recommended to be handled at this time include:
176	 *
177	 * 1.  Setting the device queue depth.  Proper setting of this is
178	 *     described in the comments for scsi_change_queue_depth.
179	 * 2.  Determining if the device supports the various synchronous
180	 *     negotiation protocols.  The device struct will already have
181	 *     responded to INQUIRY and the results of the standard items
182	 *     will have been shoved into the various device flag bits, eg.
183	 *     device->sdtr will be true if the device supports SDTR messages.
184	 * 3.  Allocating command structs that the device will need.
185	 * 4.  Setting the default timeout on this device (if needed).
186	 * 5.  Anything else the low level driver might want to do on a device
187	 *     specific setup basis...
188	 * 6.  Return 0 on success, non-0 on error.  The device will be marked
189	 *     as offline on error so that no access will occur.  If you return
190	 *     non-0, your slave_destroy routine will never get called for this
191	 *     device, so don't leave any loose memory hanging around, clean
192	 *     up after yourself before returning non-0
193	 *
194	 * Status: OPTIONAL
195	 */
196	int (* slave_configure)(struct scsi_device *);
197
198	/*
199	 * Immediately prior to deallocating the device and after all activity
200	 * has ceased the mid layer calls this point so that the low level
201	 * driver may completely detach itself from the scsi device and vice
202	 * versa.  The low level driver is responsible for freeing any memory
203	 * it allocated in the slave_alloc or slave_configure calls. 
204	 *
205	 * Status: OPTIONAL
206	 */
207	void (* slave_destroy)(struct scsi_device *);
208
209	/*
210	 * Before the mid layer attempts to scan for a new device attached
211	 * to a target where no target currently exists, it will call this
212	 * entry in your driver.  Should your driver need to allocate any
213	 * structs or perform any other init items in order to send commands
214	 * to a currently unused target, then this is where you can perform
215	 * those allocations.
216	 *
217	 * Return values: 0 on success, non-0 on failure
218	 *
219	 * Status: OPTIONAL
220	 */
221	int (* target_alloc)(struct scsi_target *);
222
223	/*
224	 * Immediately prior to deallocating the target structure, and
225	 * after all activity to attached scsi devices has ceased, the
226	 * midlayer calls this point so that the driver may deallocate
227	 * and terminate any references to the target.
228	 *
229	 * Status: OPTIONAL
230	 */
231	void (* target_destroy)(struct scsi_target *);
232
233	/*
234	 * If a host has the ability to discover targets on its own instead
235	 * of scanning the entire bus, it can fill in this function and
236	 * call scsi_scan_host().  This function will be called periodically
237	 * until it returns 1 with the scsi_host and the elapsed time of
238	 * the scan in jiffies.
239	 *
240	 * Status: OPTIONAL
241	 */
242	int (* scan_finished)(struct Scsi_Host *, unsigned long);
243
244	/*
245	 * If the host wants to be called before the scan starts, but
246	 * after the midlayer has set up ready for the scan, it can fill
247	 * in this function.
248	 *
249	 * Status: OPTIONAL
250	 */
251	void (* scan_start)(struct Scsi_Host *);
252
253	/*
254	 * Fill in this function to allow the queue depth of this host
255	 * to be changeable (on a per device basis).  Returns either
256	 * the current queue depth setting (may be different from what
257	 * was passed in) or an error.  An error should only be
258	 * returned if the requested depth is legal but the driver was
259	 * unable to set it.  If the requested depth is illegal, the
260	 * driver should set and return the closest legal queue depth.
261	 *
262	 * Status: OPTIONAL
263	 */
264	int (* change_queue_depth)(struct scsi_device *, int);
265
266	/*
267	 * This functions lets the driver expose the queue mapping
268	 * to the block layer.
 
 
 
 
 
269	 *
270	 * Status: OPTIONAL
271	 */
272	int (* map_queues)(struct Scsi_Host *shost);
273
274	/*
275	 * This function determines the BIOS parameters for a given
276	 * harddisk.  These tend to be numbers that are made up by
277	 * the host adapter.  Parameters:
278	 * size, device, list (heads, sectors, cylinders)
279	 *
280	 * Status: OPTIONAL
281	 */
282	int (* bios_param)(struct scsi_device *, struct block_device *,
283			sector_t, int []);
284
285	/*
286	 * This function is called when one or more partitions on the
287	 * device reach beyond the end of the device.
288	 *
289	 * Status: OPTIONAL
290	 */
291	void (*unlock_native_capacity)(struct scsi_device *);
292
293	/*
294	 * Can be used to export driver statistics and other infos to the
295	 * world outside the kernel ie. userspace and it also provides an
296	 * interface to feed the driver with information.
297	 *
298	 * Status: OBSOLETE
299	 */
300	int (*show_info)(struct seq_file *, struct Scsi_Host *);
301	int (*write_info)(struct Scsi_Host *, char *, int);
302
303	/*
304	 * This is an optional routine that allows the transport to become
305	 * involved when a scsi io timer fires. The return value tells the
306	 * timer routine how to finish the io timeout handling:
307	 * EH_HANDLED:		I fixed the error, please complete the command
308	 * EH_RESET_TIMER:	I need more time, reset the timer and
309	 *			begin counting again
310	 * EH_NOT_HANDLED	Begin normal error recovery
311	 *
312	 * Status: OPTIONAL
313	 */
314	enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
315
316	/* This is an optional routine that allows transport to initiate
317	 * LLD adapter or firmware reset using sysfs attribute.
318	 *
319	 * Return values: 0 on success, -ve value on failure.
320	 *
321	 * Status: OPTIONAL
322	 */
323
324	int (*host_reset)(struct Scsi_Host *shost, int reset_type);
325#define SCSI_ADAPTER_RESET	1
326#define SCSI_FIRMWARE_RESET	2
327
328
329	/*
330	 * Name of proc directory
331	 */
332	const char *proc_name;
333
334	/*
335	 * Used to store the procfs directory if a driver implements the
336	 * show_info method.
337	 */
338	struct proc_dir_entry *proc_dir;
339
340	/*
341	 * This determines if we will use a non-interrupt driven
342	 * or an interrupt driven scheme.  It is set to the maximum number
343	 * of simultaneous commands a given host adapter will accept.
344	 */
345	int can_queue;
346
347	/*
348	 * In many instances, especially where disconnect / reconnect are
349	 * supported, our host also has an ID on the SCSI bus.  If this is
350	 * the case, then it must be reserved.  Please set this_id to -1 if
351	 * your setup is in single initiator mode, and the host lacks an
352	 * ID.
353	 */
354	int this_id;
355
356	/*
357	 * This determines the degree to which the host adapter is capable
358	 * of scatter-gather.
359	 */
360	unsigned short sg_tablesize;
361	unsigned short sg_prot_tablesize;
362
363	/*
364	 * Set this if the host adapter has limitations beside segment count.
365	 */
366	unsigned int max_sectors;
367
368	/*
369	 * DMA scatter gather segment boundary limit. A segment crossing this
370	 * boundary will be split in two.
371	 */
372	unsigned long dma_boundary;
373
374	/*
375	 * This specifies "machine infinity" for host templates which don't
376	 * limit the transfer size.  Note this limit represents an absolute
377	 * maximum, and may be over the transfer limits allowed for
378	 * individual devices (e.g. 256 for SCSI-1).
379	 */
380#define SCSI_DEFAULT_MAX_SECTORS	1024
381
382	/*
383	 * True if this host adapter can make good use of linked commands.
384	 * This will allow more than one command to be queued to a given
385	 * unit on a given host.  Set this to the maximum number of command
386	 * blocks to be provided for each device.  Set this to 1 for one
387	 * command block per lun, 2 for two, etc.  Do not set this to 0.
388	 * You should make sure that the host adapter will do the right thing
389	 * before you try setting this above 1.
390	 */
391	short cmd_per_lun;
392
393	/*
394	 * present contains counter indicating how many boards of this
395	 * type were found when we did the scan.
396	 */
397	unsigned char present;
398
399	/* If use block layer to manage tags, this is tag allocation policy */
400	int tag_alloc_policy;
401
402	/*
403	 * Track QUEUE_FULL events and reduce queue depth on demand.
404	 */
405	unsigned track_queue_depth:1;
406
407	/*
408	 * This specifies the mode that a LLD supports.
409	 */
410	unsigned supported_mode:2;
411
412	/*
413	 * True if this host adapter uses unchecked DMA onto an ISA bus.
414	 */
415	unsigned unchecked_isa_dma:1;
416
417	/*
418	 * True if this host adapter can make good use of clustering.
419	 * I originally thought that if the tablesize was large that it
420	 * was a waste of CPU cycles to prepare a cluster list, but
421	 * it works out that the Buslogic is faster if you use a smaller
422	 * number of segments (i.e. use clustering).  I guess it is
423	 * inefficient.
424	 */
425	unsigned use_clustering:1;
426
427	/*
428	 * True for emulated SCSI host adapters (e.g. ATAPI).
429	 */
430	unsigned emulated:1;
431
432	/*
433	 * True if the low-level driver performs its own reset-settle delays.
434	 */
435	unsigned skip_settle_delay:1;
436
 
 
 
 
 
437	/* True if the controller does not support WRITE SAME */
438	unsigned no_write_same:1;
439
440	/* True if the low-level driver supports blk-mq only */
441	unsigned force_blk_mq:1;
 
 
442
443	/*
444	 * Countdown for host blocking with no commands outstanding.
445	 */
446	unsigned int max_host_blocked;
447
448	/*
449	 * Default value for the blocking.  If the queue is empty,
450	 * host_blocked counts down in the request_fn until it restarts
451	 * host operations as zero is reached.  
452	 *
453	 * FIXME: This should probably be a value in the template
454	 */
455#define SCSI_DEFAULT_HOST_BLOCKED	7
456
457	/*
458	 * Pointer to the sysfs class properties for this host, NULL terminated.
459	 */
460	struct device_attribute **shost_attrs;
461
462	/*
463	 * Pointer to the SCSI device properties for this host, NULL terminated.
464	 */
465	struct device_attribute **sdev_attrs;
466
467	/*
468	 * Pointer to the SCSI device attribute groups for this host,
469	 * NULL terminated.
 
 
 
470	 */
471	const struct attribute_group **sdev_groups;
472
473	/*
474	 * Vendor Identifier associated with the host
475	 *
476	 * Note: When specifying vendor_id, be sure to read the
477	 *   Vendor Type and ID formatting requirements specified in
478	 *   scsi_netlink.h
479	 */
480	u64 vendor_id;
481
482	/*
483	 * Additional per-command data allocated for the driver.
484	 */
485	unsigned int cmd_size;
486	struct scsi_host_cmd_pool *cmd_pool;
487};
488
489/*
490 * Temporary #define for host lock push down. Can be removed when all
491 * drivers have been updated to take advantage of unlocked
492 * queuecommand.
493 *
494 */
495#define DEF_SCSI_QCMD(func_name) \
496	int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)	\
497	{								\
498		unsigned long irq_flags;				\
499		int rc;							\
500		spin_lock_irqsave(shost->host_lock, irq_flags);		\
501		scsi_cmd_get_serial(shost, cmd);			\
502		rc = func_name##_lck (cmd, cmd->scsi_done);			\
503		spin_unlock_irqrestore(shost->host_lock, irq_flags);	\
504		return rc;						\
505	}
506
507
508/*
509 * shost state: If you alter this, you also need to alter scsi_sysfs.c
510 * (for the ascii descriptions) and the state model enforcer:
511 * scsi_host_set_state()
512 */
513enum scsi_host_state {
514	SHOST_CREATED = 1,
515	SHOST_RUNNING,
516	SHOST_CANCEL,
517	SHOST_DEL,
518	SHOST_RECOVERY,
519	SHOST_CANCEL_RECOVERY,
520	SHOST_DEL_RECOVERY,
521};
522
523struct Scsi_Host {
524	/*
525	 * __devices is protected by the host_lock, but you should
526	 * usually use scsi_device_lookup / shost_for_each_device
527	 * to access it and don't care about locking yourself.
528	 * In the rare case of being in irq context you can use
529	 * their __ prefixed variants with the lock held. NEVER
530	 * access this list directly from a driver.
531	 */
532	struct list_head	__devices;
533	struct list_head	__targets;
534	
 
 
 
535	struct list_head	starved_list;
536
537	spinlock_t		default_lock;
538	spinlock_t		*host_lock;
539
540	struct mutex		scan_mutex;/* serialize scanning activity */
541
542	struct list_head	eh_cmd_q;
543	struct task_struct    * ehandler;  /* Error recovery thread. */
544	struct completion     * eh_action; /* Wait for specific actions on the
545					      host. */
546	wait_queue_head_t       host_wait;
547	struct scsi_host_template *hostt;
548	struct scsi_transport_template *transportt;
549
550	/*
551	 * Area to keep a shared tag map (if needed, will be
552	 * NULL if not).
553	 */
554	union {
555		struct blk_queue_tag	*bqt;
556		struct blk_mq_tag_set	tag_set;
557	};
558
559	atomic_t host_busy;		   /* commands actually active on low-level */
560	atomic_t host_blocked;
561
562	unsigned int host_failed;	   /* commands that failed.
563					      protected by host_lock */
 
 
564	unsigned int host_eh_scheduled;    /* EH scheduled without command */
565    
566	unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
567
568	/* next two fields are used to bound the time spent in error handling */
569	int eh_deadline;
570	unsigned long last_reset;
571
572
573	/*
574	 * These three parameters can be used to allow for wide scsi,
575	 * and for host adapters that support multiple busses
576	 * The last two should be set to 1 more than the actual max id
577	 * or lun (e.g. 8 for SCSI parallel systems).
578	 */
 
 
579	unsigned int max_channel;
580	unsigned int max_id;
581	u64 max_lun;
582
583	/*
584	 * This is a unique identifier that must be assigned so that we
585	 * have some way of identifying each detected host adapter properly
586	 * and uniquely.  For hosts that do not support more than one card
587	 * in the system at one time, this does not need to be set.  It is
588	 * initialized to 0 in scsi_register.
589	 */
590	unsigned int unique_id;
591
592	/*
593	 * The maximum length of SCSI commands that this host can accept.
594	 * Probably 12 for most host adapters, but could be 16 for others.
595	 * or 260 if the driver supports variable length cdbs.
596	 * For drivers that don't set this field, a value of 12 is
597	 * assumed.
598	 */
599	unsigned short max_cmd_len;
600
601	int this_id;
602	int can_queue;
603	short cmd_per_lun;
604	short unsigned int sg_tablesize;
605	short unsigned int sg_prot_tablesize;
606	unsigned int max_sectors;
607	unsigned long dma_boundary;
608	/*
609	 * In scsi-mq mode, the number of hardware queues supported by the LLD.
610	 *
611	 * Note: it is assumed that each hardware queue has a queue depth of
612	 * can_queue. In other words, the total queue depth per host
613	 * is nr_hw_queues * can_queue.
614	 */
615	unsigned nr_hw_queues;
616	/* 
617	 * Used to assign serial numbers to the cmds.
618	 * Protected by the host lock.
619	 */
620	unsigned long cmd_serial_number;
621	
622	unsigned active_mode:2;
623	unsigned unchecked_isa_dma:1;
624	unsigned use_clustering:1;
 
625
626	/*
627	 * Host has requested that no further requests come through for the
628	 * time being.
629	 */
630	unsigned host_self_blocked:1;
631    
632	/*
633	 * Host uses correct SCSI ordering not PC ordering. The bit is
634	 * set for the minority of drivers whose authors actually read
635	 * the spec ;).
636	 */
637	unsigned reverse_ordering:1;
638
 
 
 
 
 
639	/* Task mgmt function in progress */
640	unsigned tmf_in_progress:1;
641
642	/* Asynchronous scan in progress */
643	unsigned async_scan:1;
644
645	/* Don't resume host in EH */
646	unsigned eh_noresume:1;
647
648	/* The controller does not support WRITE SAME */
649	unsigned no_write_same:1;
650
651	unsigned use_blk_mq:1;
652	unsigned use_cmd_list:1;
653
654	/* Host responded with short (<36 bytes) INQUIRY result */
655	unsigned short_inquiry:1;
656
657	/*
658	 * Optional work queue to be utilized by the transport
659	 */
660	char work_q_name[20];
661	struct workqueue_struct *work_q;
662
663	/*
664	 * Task management function work queue
665	 */
666	struct workqueue_struct *tmf_work_q;
667
668	/* The transport requires the LUN bits NOT to be stored in CDB[1] */
669	unsigned no_scsi2_lun_in_cdb:1;
 
 
670
671	/*
672	 * Value host_blocked counts down from
673	 */
674	unsigned int max_host_blocked;
675
676	/* Protection Information */
677	unsigned int prot_capabilities;
678	unsigned char prot_guard_type;
679
 
 
 
 
 
 
680	/* legacy crap */
681	unsigned long base;
682	unsigned long io_port;
683	unsigned char n_io_port;
684	unsigned char dma_channel;
685	unsigned int  irq;
686	
687
688	enum scsi_host_state shost_state;
689
690	/* ldm bits */
691	struct device		shost_gendev, shost_dev;
692
693	/*
 
 
 
 
 
 
 
 
 
694	 * Points to the transport data (if any) which is allocated
695	 * separately
696	 */
697	void *shost_data;
698
699	/*
700	 * Points to the physical bus device we'd use to do DMA
701	 * Needed just in case we have virtual hosts.
702	 */
703	struct device *dma_dev;
704
705	/*
706	 * We should ensure that this is aligned, both for better performance
707	 * and also because some compilers (m68k) don't automatically force
708	 * alignment to a long boundary.
709	 */
710	unsigned long hostdata[0]  /* Used for storage of host specific stuff */
711		__attribute__ ((aligned (sizeof(unsigned long))));
712};
713
714#define		class_to_shost(d)	\
715	container_of(d, struct Scsi_Host, shost_dev)
716
717#define shost_printk(prefix, shost, fmt, a...)	\
718	dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
719
720static inline void *shost_priv(struct Scsi_Host *shost)
721{
722	return (void *)shost->hostdata;
723}
724
725int scsi_is_host_device(const struct device *);
726
727static inline struct Scsi_Host *dev_to_shost(struct device *dev)
728{
729	while (!scsi_is_host_device(dev)) {
730		if (!dev->parent)
731			return NULL;
732		dev = dev->parent;
733	}
734	return container_of(dev, struct Scsi_Host, shost_gendev);
735}
736
737static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
738{
739	return shost->shost_state == SHOST_RECOVERY ||
740		shost->shost_state == SHOST_CANCEL_RECOVERY ||
741		shost->shost_state == SHOST_DEL_RECOVERY ||
742		shost->tmf_in_progress;
743}
744
745static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
746{
747	return shost->use_blk_mq;
748}
749
750extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
751extern void scsi_flush_work(struct Scsi_Host *);
752
753extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
754extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
755					       struct device *,
756					       struct device *);
757extern void scsi_scan_host(struct Scsi_Host *);
758extern void scsi_rescan_device(struct device *);
759extern void scsi_remove_host(struct Scsi_Host *);
760extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
761extern void scsi_host_put(struct Scsi_Host *t);
762extern struct Scsi_Host *scsi_host_lookup(unsigned short);
763extern const char *scsi_host_state_name(enum scsi_host_state);
764extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *);
765
 
 
766static inline int __must_check scsi_add_host(struct Scsi_Host *host,
767					     struct device *dev)
768{
769	return scsi_add_host_with_dma(host, dev, dev);
770}
771
772static inline struct device *scsi_get_device(struct Scsi_Host *shost)
773{
774        return shost->shost_gendev.parent;
775}
776
777/**
778 * scsi_host_scan_allowed - Is scanning of this host allowed
779 * @shost:	Pointer to Scsi_Host.
780 **/
781static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
782{
783	return shost->shost_state == SHOST_RUNNING ||
784	       shost->shost_state == SHOST_RECOVERY;
785}
786
787extern void scsi_unblock_requests(struct Scsi_Host *);
788extern void scsi_block_requests(struct Scsi_Host *);
789
790struct class_container;
791
 
 
792/*
793 * These two functions are used to allocate and free a pseudo device
794 * which will connect to the host adapter itself rather than any
795 * physical device.  You must deallocate when you are done with the
796 * thing.  This physical pseudo-device isn't real and won't be available
797 * from any high-level drivers.
798 */
799extern void scsi_free_host_dev(struct scsi_device *);
800extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
801
802/*
803 * DIF defines the exchange of protection information between
804 * initiator and SBC block device.
805 *
806 * DIX defines the exchange of protection information between OS and
807 * initiator.
808 */
809enum scsi_host_prot_capabilities {
810	SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
811	SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
812	SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
813
814	SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
815	SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
816	SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
817	SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
818};
819
820/*
821 * SCSI hosts which support the Data Integrity Extensions must
822 * indicate their capabilities by setting the prot_capabilities using
823 * this call.
824 */
825static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
826{
827	shost->prot_capabilities = mask;
828}
829
830static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
831{
832	return shost->prot_capabilities;
833}
834
835static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
836{
837	return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
838}
839
840static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
841{
842	static unsigned char cap[] = { 0,
843				       SHOST_DIF_TYPE1_PROTECTION,
844				       SHOST_DIF_TYPE2_PROTECTION,
845				       SHOST_DIF_TYPE3_PROTECTION };
846
847	if (target_type >= ARRAY_SIZE(cap))
848		return 0;
849
850	return shost->prot_capabilities & cap[target_type] ? target_type : 0;
851}
852
853static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
854{
855#if defined(CONFIG_BLK_DEV_INTEGRITY)
856	static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
857				       SHOST_DIX_TYPE1_PROTECTION,
858				       SHOST_DIX_TYPE2_PROTECTION,
859				       SHOST_DIX_TYPE3_PROTECTION };
860
861	if (target_type >= ARRAY_SIZE(cap))
862		return 0;
863
864	return shost->prot_capabilities & cap[target_type];
865#endif
866	return 0;
867}
868
869/*
870 * All DIX-capable initiators must support the T10-mandated CRC
871 * checksum.  Controllers can optionally implement the IP checksum
872 * scheme which has much lower impact on system performance.  Note
873 * that the main rationale for the checksum is to match integrity
874 * metadata with data.  Detecting bit errors are a job for ECC memory
875 * and buses.
876 */
877
878enum scsi_host_guard_type {
879	SHOST_DIX_GUARD_CRC = 1 << 0,
880	SHOST_DIX_GUARD_IP  = 1 << 1,
881};
882
883static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
884{
885	shost->prot_guard_type = type;
886}
887
888static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
889{
890	return shost->prot_guard_type;
891}
892
 
 
 
893extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
894
895#endif /* _SCSI_SCSI_HOST_H */