Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
   3 *  Copyright (C) 1992  Eric Youngdale
   4 *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
   5 *  to make sure that we are not getting blocks mixed up, and PANIC if
   6 *  anything out of the ordinary is seen.
   7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
   8 *
   9 *  This version is more generic, simulating a variable number of disk
  10 *  (or disk like devices) sharing a common amount of RAM. To be more
  11 *  realistic, the simulated devices have the transport attributes of
  12 *  SAS disks.
  13 *
  14 *
  15 *  For documentation see http://sg.danny.cz/sg/sdebug26.html
  16 *
  17 *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
  18 *   dpg: work for devfs large number of disks [20010809]
  19 *        forked for lk 2.5 series [20011216, 20020101]
  20 *        use vmalloc() more inquiry+mode_sense [20020302]
  21 *        add timers for delayed responses [20020721]
  22 *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
  23 *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
  24 *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
  25 *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
  26 */
  27
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  30
  31#include <linux/module.h>
  32
  33#include <linux/kernel.h>
  34#include <linux/errno.h>
  35#include <linux/timer.h>
  36#include <linux/slab.h>
  37#include <linux/types.h>
  38#include <linux/string.h>
  39#include <linux/genhd.h>
  40#include <linux/fs.h>
  41#include <linux/init.h>
  42#include <linux/proc_fs.h>
  43#include <linux/vmalloc.h>
  44#include <linux/moduleparam.h>
  45#include <linux/scatterlist.h>
  46#include <linux/blkdev.h>
  47#include <linux/crc-t10dif.h>
  48#include <linux/spinlock.h>
  49#include <linux/interrupt.h>
  50#include <linux/atomic.h>
  51#include <linux/hrtimer.h>
  52
  53#include <net/checksum.h>
  54
  55#include <asm/unaligned.h>
  56
  57#include <scsi/scsi.h>
  58#include <scsi/scsi_cmnd.h>
  59#include <scsi/scsi_device.h>
  60#include <scsi/scsi_host.h>
  61#include <scsi/scsicam.h>
  62#include <scsi/scsi_eh.h>
  63#include <scsi/scsi_tcq.h>
  64#include <scsi/scsi_dbg.h>
  65
  66#include "sd.h"
  67#include "scsi_logging.h"
  68
  69#define SCSI_DEBUG_VERSION "1.85"
  70static const char *scsi_debug_version_date = "20141022";
  71
  72#define MY_NAME "scsi_debug"
  73
  74/* Additional Sense Code (ASC) */
  75#define NO_ADDITIONAL_SENSE 0x0
  76#define LOGICAL_UNIT_NOT_READY 0x4
  77#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
  78#define UNRECOVERED_READ_ERR 0x11
  79#define PARAMETER_LIST_LENGTH_ERR 0x1a
  80#define INVALID_OPCODE 0x20
  81#define LBA_OUT_OF_RANGE 0x21
 
  82#define INVALID_FIELD_IN_CDB 0x24
  83#define INVALID_FIELD_IN_PARAM_LIST 0x26
  84#define UA_RESET_ASC 0x29
  85#define UA_CHANGED_ASC 0x2a
  86#define TARGET_CHANGED_ASC 0x3f
  87#define LUNS_CHANGED_ASCQ 0x0e
  88#define INSUFF_RES_ASC 0x55
  89#define INSUFF_RES_ASCQ 0x3
  90#define POWER_ON_RESET_ASCQ 0x0
  91#define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
  92#define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
  93#define CAPACITY_CHANGED_ASCQ 0x9
  94#define SAVING_PARAMS_UNSUP 0x39
  95#define TRANSPORT_PROBLEM 0x4b
  96#define THRESHOLD_EXCEEDED 0x5d
  97#define LOW_POWER_COND_ON 0x5e
  98#define MISCOMPARE_VERIFY_ASC 0x1d
  99#define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
 100#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
 101
 102/* Additional Sense Code Qualifier (ASCQ) */
 103#define ACK_NAK_TO 0x3
 104
 
 105
 106/* Default values for driver parameters */
 107#define DEF_NUM_HOST   1
 108#define DEF_NUM_TGTS   1
 109#define DEF_MAX_LUNS   1
 110/* With these defaults, this driver will make 1 host with 1 target
 111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
 112 */
 113#define DEF_ATO 1
 114#define DEF_DELAY   1		/* if > 0 unit is a jiffy */
 115#define DEF_DEV_SIZE_MB   8
 116#define DEF_DIF 0
 117#define DEF_DIX 0
 118#define DEF_D_SENSE   0
 119#define DEF_EVERY_NTH   0
 120#define DEF_FAKE_RW	0
 121#define DEF_GUARD 0
 122#define DEF_HOST_LOCK 0
 123#define DEF_LBPU 0
 124#define DEF_LBPWS 0
 125#define DEF_LBPWS10 0
 126#define DEF_LBPRZ 1
 127#define DEF_LOWEST_ALIGNED 0
 128#define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
 129#define DEF_NO_LUN_0   0
 130#define DEF_NUM_PARTS   0
 131#define DEF_OPTS   0
 132#define DEF_OPT_BLKS 1024
 133#define DEF_PHYSBLK_EXP 0
 134#define DEF_PTYPE   0
 135#define DEF_REMOVABLE false
 136#define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
 137#define DEF_SECTOR_SIZE 512
 138#define DEF_UNMAP_ALIGNMENT 0
 139#define DEF_UNMAP_GRANULARITY 1
 140#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
 141#define DEF_UNMAP_MAX_DESC 256
 142#define DEF_VIRTUAL_GB   0
 143#define DEF_VPD_USE_HOSTNO 1
 144#define DEF_WRITESAME_LENGTH 0xFFFF
 145#define DEF_STRICT 0
 146#define DELAY_OVERRIDDEN -9999
 147
 148/* bit mask values for scsi_debug_opts */
 149#define SCSI_DEBUG_OPT_NOISE   1
 150#define SCSI_DEBUG_OPT_MEDIUM_ERR   2
 151#define SCSI_DEBUG_OPT_TIMEOUT   4
 152#define SCSI_DEBUG_OPT_RECOVERED_ERR   8
 153#define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
 154#define SCSI_DEBUG_OPT_DIF_ERR   32
 155#define SCSI_DEBUG_OPT_DIX_ERR   64
 156#define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
 157#define SCSI_DEBUG_OPT_SHORT_TRANSFER	0x100
 158#define SCSI_DEBUG_OPT_Q_NOISE	0x200
 159#define SCSI_DEBUG_OPT_ALL_TSF	0x400
 160#define SCSI_DEBUG_OPT_RARE_TSF	0x800
 161#define SCSI_DEBUG_OPT_N_WCE	0x1000
 162#define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
 163#define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
 164#define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
 165/* When "every_nth" > 0 then modulo "every_nth" commands:
 166 *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
 167 *   - a RECOVERED_ERROR is simulated on successful read and write
 168 *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
 169 *   - a TRANSPORT_ERROR is simulated on successful read and write
 170 *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
 171 *
 172 * When "every_nth" < 0 then after "- every_nth" commands:
 173 *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
 174 *   - a RECOVERED_ERROR is simulated on successful read and write
 175 *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
 176 *   - a TRANSPORT_ERROR is simulated on successful read and write
 177 *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
 178 * This will continue until some other action occurs (e.g. the user
 179 * writing a new value (other than -1 or 1) to every_nth via sysfs).
 180 */
 181
 182/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
 183 * priority order. In the subset implemented here lower numbers have higher
 184 * priority. The UA numbers should be a sequence starting from 0 with
 185 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
 186#define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
 187#define SDEBUG_UA_BUS_RESET 1
 188#define SDEBUG_UA_MODE_CHANGED 2
 189#define SDEBUG_UA_CAPACITY_CHANGED 3
 190#define SDEBUG_UA_LUNS_CHANGED 4
 191#define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
 192#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
 193#define SDEBUG_NUM_UAS 7
 194
 195/* for check_readiness() */
 196#define UAS_ONLY 1	/* check for UAs only */
 197#define UAS_TUR 0	/* if no UAs then check if media access possible */
 198
 199/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
 200 * sector on read commands: */
 201#define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
 202#define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
 203
 204/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
 205 * or "peripheral device" addressing (value 0) */
 206#define SAM2_LUN_ADDRESS_METHOD 0
 
 207
 208/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
 209 * (for response) at one time. Can be reduced by max_queue option. Command
 210 * responses are not queued when delay=0 and ndelay=0. The per-device
 211 * DEF_CMD_PER_LUN can be changed via sysfs:
 212 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
 213 * SCSI_DEBUG_CANQUEUE. */
 214#define SCSI_DEBUG_CANQUEUE_WORDS  9	/* a WORD is bits in a long */
 215#define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
 216#define DEF_CMD_PER_LUN  255
 217
 218#if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
 219#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
 220#endif
 221
 222/* SCSI opcodes (first byte of cdb) mapped onto these indexes */
 223enum sdeb_opcode_index {
 224	SDEB_I_INVALID_OPCODE =	0,
 225	SDEB_I_INQUIRY = 1,
 226	SDEB_I_REPORT_LUNS = 2,
 227	SDEB_I_REQUEST_SENSE = 3,
 228	SDEB_I_TEST_UNIT_READY = 4,
 229	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
 230	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
 231	SDEB_I_LOG_SENSE = 7,
 232	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
 233	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
 234	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
 235	SDEB_I_START_STOP = 11,
 236	SDEB_I_SERV_ACT_IN = 12,	/* 12, 16 */
 237	SDEB_I_SERV_ACT_OUT = 13,	/* 12, 16 */
 238	SDEB_I_MAINT_IN = 14,
 239	SDEB_I_MAINT_OUT = 15,
 240	SDEB_I_VERIFY = 16,		/* 10 only */
 241	SDEB_I_VARIABLE_LEN = 17,
 242	SDEB_I_RESERVE = 18,		/* 6, 10 */
 243	SDEB_I_RELEASE = 19,		/* 6, 10 */
 244	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
 245	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
 246	SDEB_I_ATA_PT = 22,		/* 12, 16 */
 247	SDEB_I_SEND_DIAG = 23,
 248	SDEB_I_UNMAP = 24,
 249	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
 250	SDEB_I_WRITE_BUFFER = 26,
 251	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
 252	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
 253	SDEB_I_COMP_WRITE = 29,
 254	SDEB_I_LAST_ELEMENT = 30,	/* keep this last */
 255};
 256
 257static const unsigned char opcode_ind_arr[256] = {
 258/* 0x0; 0x0->0x1f: 6 byte cdbs */
 259	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
 260	    0, 0, 0, 0,
 261	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 262	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 263	    SDEB_I_RELEASE,
 264	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
 265	    SDEB_I_ALLOW_REMOVAL, 0,
 266/* 0x20; 0x20->0x3f: 10 byte cdbs */
 267	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
 268	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
 269	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
 270	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
 271/* 0x40; 0x40->0x5f: 10 byte cdbs */
 272	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
 273	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
 274	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 275	    SDEB_I_RELEASE,
 276	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
 277/* 0x60; 0x60->0x7d are reserved */
 278	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 279	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 280	0, SDEB_I_VARIABLE_LEN,
 281/* 0x80; 0x80->0x9f: 16 byte cdbs */
 282	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
 283	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 284	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
 285	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
 286/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
 287	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
 288	     SDEB_I_MAINT_OUT, 0, 0, 0,
 289	SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
 290	     0, 0, 0, 0,
 291	0, 0, 0, 0, 0, 0, 0, 0,
 292	0, 0, 0, 0, 0, 0, 0, 0,
 293/* 0xc0; 0xc0->0xff: vendor specific */
 294	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 295	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 296	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 297	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 298};
 299
 300#define F_D_IN			1
 301#define F_D_OUT			2
 302#define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
 303#define F_D_UNKN		8
 304#define F_RL_WLUN_OK		0x10
 305#define F_SKIP_UA		0x20
 306#define F_DELAY_OVERR		0x40
 307#define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
 308#define F_SA_HIGH		0x100	/* as used by variable length cdbs */
 309#define F_INV_OP		0x200
 310#define F_FAKE_RW		0x400
 311#define F_M_ACCESS		0x800	/* media access */
 312
 313#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
 314#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
 315#define FF_SA (F_SA_HIGH | F_SA_LOW)
 316
 317struct sdebug_dev_info;
 318static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
 319static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
 320static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
 321static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 322static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
 323static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 324static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
 325static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 326static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 327static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
 328static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
 329static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
 330static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
 331static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
 332static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
 333static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
 334static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 335static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
 336static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 337static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
 338static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
 339
 340struct opcode_info_t {
 341	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff
 342				 * for terminating element */
 343	u8 opcode;		/* if num_attached > 0, preferred */
 344	u16 sa;			/* service action */
 345	u32 flags;		/* OR-ed set of SDEB_F_* */
 346	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
 347	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
 348	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
 349				/* ignore cdb bytes after position 15 */
 350};
 351
 352static const struct opcode_info_t msense_iarr[1] = {
 353	{0, 0x1a, 0, F_D_IN, NULL, NULL,
 354	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 355};
 356
 357static const struct opcode_info_t mselect_iarr[1] = {
 358	{0, 0x15, 0, F_D_OUT, NULL, NULL,
 359	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 360};
 361
 362static const struct opcode_info_t read_iarr[3] = {
 363	{0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
 364	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
 365	     0, 0, 0, 0} },
 366	{0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
 367	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 368	{0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
 369	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
 370	     0xc7, 0, 0, 0, 0} },
 371};
 372
 373static const struct opcode_info_t write_iarr[3] = {
 374	{0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
 375	    {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
 376	     0, 0, 0, 0} },
 377	{0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
 378	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 379	{0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
 380	    {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
 381	     0xc7, 0, 0, 0, 0} },
 382};
 383
 384static const struct opcode_info_t sa_in_iarr[1] = {
 385	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
 386	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 387	     0xff, 0xff, 0xff, 0, 0xc7} },
 388};
 389
 390static const struct opcode_info_t vl_iarr[1] = {	/* VARIABLE LENGTH */
 391	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
 392	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
 393		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
 394};
 395
 396static const struct opcode_info_t maint_in_iarr[2] = {
 397	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
 398	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
 399	     0xc7, 0, 0, 0, 0} },
 400	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
 401	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 402	     0, 0} },
 403};
 404
 405static const struct opcode_info_t write_same_iarr[1] = {
 406	{0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
 407	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 408	     0xff, 0xff, 0xff, 0x1f, 0xc7} },
 409};
 410
 411static const struct opcode_info_t reserve_iarr[1] = {
 412	{0, 0x16, 0, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
 413	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 414};
 415
 416static const struct opcode_info_t release_iarr[1] = {
 417	{0, 0x17, 0, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
 418	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 419};
 420
 421
 422/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
 423 * plus the terminating elements for logic that scans this table such as
 424 * REPORT SUPPORTED OPERATION CODES. */
 425static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
 426/* 0 */
 427	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
 428	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 429	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
 430	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 431	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
 432	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 433	     0, 0} },
 434	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
 435	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 436	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
 437	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 438	{1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
 439	    {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 440	     0} },
 441	{1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
 442	    {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 443	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
 444	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
 445	     0, 0, 0} },
 446	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,
 447	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
 448	     0, 0} },
 449	{3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
 450	    {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 451	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* READ(16) */
 452/* 10 */
 453	{3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
 454	    {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 455	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* WRITE(16) */
 456	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
 457	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 458	{1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
 459	    {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 460	     0xff, 0xff, 0xff, 0x1, 0xc7} },	/* READ CAPACITY(16) */
 461	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
 462	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 463	{2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
 464	    {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
 465	     0} },
 466	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
 467	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 468	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
 469	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
 470	     0, 0, 0, 0, 0, 0} },
 471	{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
 472	    vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
 473		      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
 474	{1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
 475	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 476	     0} },
 477	{1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
 478	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 479	     0} },
 480/* 20 */
 481	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
 482	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 483	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
 484	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 485	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
 486	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 487	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
 488	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 489	{0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
 490	    {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 491	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
 492	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
 493		   0, 0, 0, 0, 0, 0} },
 494	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
 495	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
 496	     0, 0, 0, 0} },			/* WRITE_BUFFER */
 497	{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
 498	    write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
 499			      0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 500	{0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
 501	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
 502	     0, 0, 0, 0} },
 503	{0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
 504	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
 505	     0, 0xff, 0x1f, 0xc7} },		/* COMPARE AND WRITE */
 506
 507/* 30 */
 508	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
 509	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 510};
 511
 512struct sdebug_scmd_extra_t {
 513	bool inj_recovered;
 514	bool inj_transport;
 515	bool inj_dif;
 516	bool inj_dix;
 517	bool inj_short;
 518};
 519
 520static int scsi_debug_add_host = DEF_NUM_HOST;
 521static int scsi_debug_ato = DEF_ATO;
 522static int scsi_debug_delay = DEF_DELAY;
 523static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
 524static int scsi_debug_dif = DEF_DIF;
 525static int scsi_debug_dix = DEF_DIX;
 526static int scsi_debug_dsense = DEF_D_SENSE;
 527static int scsi_debug_every_nth = DEF_EVERY_NTH;
 528static int scsi_debug_fake_rw = DEF_FAKE_RW;
 529static unsigned int scsi_debug_guard = DEF_GUARD;
 530static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
 531static int scsi_debug_max_luns = DEF_MAX_LUNS;
 532static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
 533static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
 534static int scsi_debug_ndelay = DEF_NDELAY;
 535static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
 536static int scsi_debug_no_uld = 0;
 537static int scsi_debug_num_parts = DEF_NUM_PARTS;
 538static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
 539static int scsi_debug_opt_blks = DEF_OPT_BLKS;
 540static int scsi_debug_opts = DEF_OPTS;
 541static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
 542static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
 543static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
 544static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
 545static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
 546static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
 547static unsigned int scsi_debug_lbpu = DEF_LBPU;
 548static unsigned int scsi_debug_lbpws = DEF_LBPWS;
 549static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
 550static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
 551static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 552static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 553static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
 554static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
 555static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
 556static bool scsi_debug_removable = DEF_REMOVABLE;
 557static bool scsi_debug_clustering;
 558static bool scsi_debug_host_lock = DEF_HOST_LOCK;
 559static bool scsi_debug_strict = DEF_STRICT;
 560static bool sdebug_any_injecting_opt;
 561
 562static atomic_t sdebug_cmnd_count;
 563static atomic_t sdebug_completions;
 564static atomic_t sdebug_a_tsf;		/* counter of 'almost' TSFs */
 565
 566#define DEV_READONLY(TGT)      (0)
 
 567
 568static unsigned int sdebug_store_sectors;
 569static sector_t sdebug_capacity;	/* in sectors */
 570
 571/* old BIOS stuff, kernel may get rid of them but some mode sense pages
 572   may still need them */
 573static int sdebug_heads;		/* heads per disk */
 574static int sdebug_cylinders_per;	/* cylinders per surface */
 575static int sdebug_sectors_per;		/* sectors per cylinder */
 576
 577#define SDEBUG_MAX_PARTS 4
 578
 
 
 579#define SCSI_DEBUG_MAX_CMD_LEN 32
 580
 581static unsigned int scsi_debug_lbp(void)
 582{
 583	return ((0 == scsi_debug_fake_rw) &&
 584		(scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
 585}
 586
 587struct sdebug_dev_info {
 588	struct list_head dev_list;
 
 589	unsigned int channel;
 590	unsigned int target;
 591	u64 lun;
 592	struct sdebug_host_info *sdbg_host;
 593	unsigned long uas_bm[1];
 594	atomic_t num_in_q;
 595	char stopped;		/* TODO: should be atomic */
 596	bool used;
 597};
 598
 599struct sdebug_host_info {
 600	struct list_head host_list;
 601	struct Scsi_Host *shost;
 602	struct device dev;
 603	struct list_head dev_info_list;
 604};
 605
 606#define to_sdebug_host(d)	\
 607	container_of(d, struct sdebug_host_info, dev)
 608
 609static LIST_HEAD(sdebug_host_list);
 610static DEFINE_SPINLOCK(sdebug_host_list_lock);
 611
 612
 613struct sdebug_hrtimer {		/* ... is derived from hrtimer */
 614	struct hrtimer hrt;	/* must be first element */
 615	int qa_indx;
 616};
 617
 618struct sdebug_queued_cmd {
 619	/* in_use flagged by a bit in queued_in_use_bm[] */
 620	struct timer_list *cmnd_timerp;
 621	struct tasklet_struct *tletp;
 622	struct sdebug_hrtimer *sd_hrtp;
 623	struct scsi_cmnd * a_cmnd;
 
 624};
 625static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
 626static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
 627
 628
 629static unsigned char * fake_storep;	/* ramdisk storage */
 630static struct sd_dif_tuple *dif_storep;	/* protection info */
 631static void *map_storep;		/* provisioning map */
 632
 633static unsigned long map_size;
 634static int num_aborts;
 635static int num_dev_resets;
 636static int num_target_resets;
 637static int num_bus_resets;
 638static int num_host_resets;
 639static int dix_writes;
 640static int dix_reads;
 641static int dif_errors;
 642
 643static DEFINE_SPINLOCK(queued_arr_lock);
 644static DEFINE_RWLOCK(atomic_rw);
 645
 646static char sdebug_proc_name[] = MY_NAME;
 647static const char *my_name = MY_NAME;
 648
 649static struct bus_type pseudo_lld_bus;
 650
 
 
 
 
 
 651static struct device_driver sdebug_driverfs_driver = {
 652	.name 		= sdebug_proc_name,
 653	.bus		= &pseudo_lld_bus,
 654};
 655
 656static const int check_condition_result =
 657		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 658
 659static const int illegal_condition_result =
 660	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
 661
 662static const int device_qfull_result =
 663	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
 664
 665static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
 666				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
 667				     0, 0, 0, 0};
 668static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
 669				    0, 0, 0x2, 0x4b};
 670static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
 671			           0, 0, 0x0, 0x0};
 672
 673static void *fake_store(unsigned long long lba)
 674{
 675	lba = do_div(lba, sdebug_store_sectors);
 676
 677	return fake_storep + lba * scsi_debug_sector_size;
 678}
 679
 680static struct sd_dif_tuple *dif_store(sector_t sector)
 681{
 682	sector = sector_div(sector, sdebug_store_sectors);
 683
 684	return dif_storep + sector;
 685}
 686
 687static int sdebug_add_adapter(void);
 688static void sdebug_remove_adapter(void);
 689
 690static void sdebug_max_tgts_luns(void)
 691{
 692	struct sdebug_host_info *sdbg_host;
 693	struct Scsi_Host *hpnt;
 694
 695	spin_lock(&sdebug_host_list_lock);
 696	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 697		hpnt = sdbg_host->shost;
 698		if ((hpnt->this_id >= 0) &&
 699		    (scsi_debug_num_tgts > hpnt->this_id))
 700			hpnt->max_id = scsi_debug_num_tgts + 1;
 701		else
 702			hpnt->max_id = scsi_debug_num_tgts;
 703		/* scsi_debug_max_luns; */
 704		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
 705	}
 706	spin_unlock(&sdebug_host_list_lock);
 707}
 708
 709enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
 710
 711/* Set in_bit to -1 to indicate no bit position of invalid field */
 712static void
 713mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
 714		     int in_byte, int in_bit)
 715{
 716	unsigned char *sbuff;
 717	u8 sks[4];
 718	int sl, asc;
 719
 720	sbuff = scp->sense_buffer;
 721	if (!sbuff) {
 722		sdev_printk(KERN_ERR, scp->device,
 723			    "%s: sense_buffer is NULL\n", __func__);
 724		return;
 725	}
 726	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
 727	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 728	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
 729				asc, 0);
 730	memset(sks, 0, sizeof(sks));
 731	sks[0] = 0x80;
 732	if (c_d)
 733		sks[0] |= 0x40;
 734	if (in_bit >= 0) {
 735		sks[0] |= 0x8;
 736		sks[0] |= 0x7 & in_bit;
 737	}
 738	put_unaligned_be16(in_byte, sks + 1);
 739	if (scsi_debug_dsense) {
 740		sl = sbuff[7] + 8;
 741		sbuff[7] = sl;
 742		sbuff[sl] = 0x2;
 743		sbuff[sl + 1] = 0x6;
 744		memcpy(sbuff + sl + 4, sks, 3);
 745	} else
 746		memcpy(sbuff + 15, sks, 3);
 747	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 748		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
 749			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
 750			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
 751}
 752
 753static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
 
 
 754{
 755	unsigned char *sbuff;
 756
 757	sbuff = scp->sense_buffer;
 758	if (!sbuff) {
 759		sdev_printk(KERN_ERR, scp->device,
 760			    "%s: sense_buffer is NULL\n", __func__);
 761		return;
 762	}
 763	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 764
 765	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
 
 766
 767	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 768		sdev_printk(KERN_INFO, scp->device,
 769			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
 770			    my_name, key, asc, asq);
 771}
 772
 773static void
 774mk_sense_invalid_opcode(struct scsi_cmnd *scp)
 775{
 776	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 777}
 778
 779static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
 780{
 781	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
 782		if (0x1261 == cmd)
 783			sdev_printk(KERN_INFO, dev,
 784				    "%s: BLKFLSBUF [0x1261]\n", __func__);
 785		else if (0x5331 == cmd)
 786			sdev_printk(KERN_INFO, dev,
 787				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
 788				    __func__);
 789		else
 790			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
 791				    __func__, cmd);
 792	}
 793	return -EINVAL;
 794	/* return -ENOTTY; // correct return but upsets fdisk */
 795}
 796
 797static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
 798{
 799	struct sdebug_host_info *sdhp;
 800	struct sdebug_dev_info *dp;
 801
 802	spin_lock(&sdebug_host_list_lock);
 803	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
 804		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
 805			if ((devip->sdbg_host == dp->sdbg_host) &&
 806			    (devip->target == dp->target))
 807				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
 808		}
 809	}
 810	spin_unlock(&sdebug_host_list_lock);
 811}
 812
 813static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
 814			   struct sdebug_dev_info * devip)
 815{
 816	int k;
 817	bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
 818
 819	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
 820	if (k != SDEBUG_NUM_UAS) {
 821		const char *cp = NULL;
 822
 823		switch (k) {
 824		case SDEBUG_UA_POR:
 825			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
 826					UA_RESET_ASC, POWER_ON_RESET_ASCQ);
 827			if (debug)
 828				cp = "power on reset";
 829			break;
 830		case SDEBUG_UA_BUS_RESET:
 831			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
 832					UA_RESET_ASC, BUS_RESET_ASCQ);
 833			if (debug)
 834				cp = "bus reset";
 835			break;
 836		case SDEBUG_UA_MODE_CHANGED:
 837			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
 838					UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
 839			if (debug)
 840				cp = "mode parameters changed";
 841			break;
 842		case SDEBUG_UA_CAPACITY_CHANGED:
 843			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
 844					UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
 845			if (debug)
 846				cp = "capacity data changed";
 847			break;
 848		case SDEBUG_UA_MICROCODE_CHANGED:
 849			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
 850				 TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
 851			if (debug)
 852				cp = "microcode has been changed";
 853			break;
 854		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
 855			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
 856					TARGET_CHANGED_ASC,
 857					MICROCODE_CHANGED_WO_RESET_ASCQ);
 858			if (debug)
 859				cp = "microcode has been changed without reset";
 860			break;
 861		case SDEBUG_UA_LUNS_CHANGED:
 862			/*
 863			 * SPC-3 behavior is to report a UNIT ATTENTION with
 864			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
 865			 * on the target, until a REPORT LUNS command is
 866			 * received.  SPC-4 behavior is to report it only once.
 867			 * NOTE:  scsi_debug_scsi_level does not use the same
 868			 * values as struct scsi_device->scsi_level.
 869			 */
 870			if (scsi_debug_scsi_level >= 6)	/* SPC-4 and above */
 871				clear_luns_changed_on_target(devip);
 872			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
 873					TARGET_CHANGED_ASC,
 874					LUNS_CHANGED_ASCQ);
 875			if (debug)
 876				cp = "reported luns data has changed";
 877			break;
 878		default:
 879			pr_warn("%s: unexpected unit attention code=%d\n",
 880				__func__, k);
 881			if (debug)
 882				cp = "unknown";
 883			break;
 884		}
 885		clear_bit(k, devip->uas_bm);
 886		if (debug)
 887			sdev_printk(KERN_INFO, SCpnt->device,
 888				   "%s reports: Unit attention: %s\n",
 889				   my_name, cp);
 890		return check_condition_result;
 891	}
 892	if ((UAS_TUR == uas_only) && devip->stopped) {
 893		mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
 
 
 
 894				0x2);
 895		if (debug)
 896			sdev_printk(KERN_INFO, SCpnt->device,
 897				    "%s reports: Not ready: %s\n", my_name,
 898				    "initializing command required");
 899		return check_condition_result;
 900	}
 901	return 0;
 902}
 903
 904/* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
 905static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 906				int arr_len)
 907{
 908	int act_len;
 909	struct scsi_data_buffer *sdb = scsi_in(scp);
 910
 911	if (!sdb->length)
 912		return 0;
 913	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
 914		return (DID_ERROR << 16);
 915
 916	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
 917				      arr, arr_len);
 918	sdb->resid = scsi_bufflen(scp) - act_len;
 
 
 
 919
 920	return 0;
 921}
 922
 923/* Returns number of bytes fetched into 'arr' or -1 if error. */
 924static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 925			       int arr_len)
 926{
 927	if (!scsi_bufflen(scp))
 928		return 0;
 929	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
 930		return -1;
 931
 932	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
 933}
 934
 935
 936static const char * inq_vendor_id = "Linux   ";
 937static const char * inq_product_id = "scsi_debug      ";
 938static const char *inq_product_rev = "0184";	/* version less '.' */
 939
 940/* Device identification VPD page. Returns number of bytes placed in arr */
 941static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
 942			   int target_dev_id, int dev_id_num,
 943			   const char * dev_id_str,
 944			   int dev_id_str_len)
 945{
 946	int num, port_a;
 947	char b[32];
 948
 949	port_a = target_dev_id + 1;
 950	/* T10 vendor identifier field format (faked) */
 951	arr[0] = 0x2;	/* ASCII */
 952	arr[1] = 0x1;
 953	arr[2] = 0x0;
 954	memcpy(&arr[4], inq_vendor_id, 8);
 955	memcpy(&arr[12], inq_product_id, 16);
 956	memcpy(&arr[28], dev_id_str, dev_id_str_len);
 957	num = 8 + 16 + dev_id_str_len;
 958	arr[3] = num;
 959	num += 4;
 960	if (dev_id_num >= 0) {
 961		/* NAA-5, Logical unit identifier (binary) */
 962		arr[num++] = 0x1;	/* binary (not necessarily sas) */
 963		arr[num++] = 0x3;	/* PIV=0, lu, naa */
 964		arr[num++] = 0x0;
 965		arr[num++] = 0x8;
 966		arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
 967		arr[num++] = 0x33;
 968		arr[num++] = 0x33;
 969		arr[num++] = 0x30;
 970		arr[num++] = (dev_id_num >> 24);
 971		arr[num++] = (dev_id_num >> 16) & 0xff;
 972		arr[num++] = (dev_id_num >> 8) & 0xff;
 973		arr[num++] = dev_id_num & 0xff;
 974		/* Target relative port number */
 975		arr[num++] = 0x61;	/* proto=sas, binary */
 976		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
 977		arr[num++] = 0x0;	/* reserved */
 978		arr[num++] = 0x4;	/* length */
 979		arr[num++] = 0x0;	/* reserved */
 980		arr[num++] = 0x0;	/* reserved */
 981		arr[num++] = 0x0;
 982		arr[num++] = 0x1;	/* relative port A */
 983	}
 984	/* NAA-5, Target port identifier */
 985	arr[num++] = 0x61;	/* proto=sas, binary */
 986	arr[num++] = 0x93;	/* piv=1, target port, naa */
 987	arr[num++] = 0x0;
 988	arr[num++] = 0x8;
 989	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
 990	arr[num++] = 0x22;
 991	arr[num++] = 0x22;
 992	arr[num++] = 0x20;
 993	arr[num++] = (port_a >> 24);
 994	arr[num++] = (port_a >> 16) & 0xff;
 995	arr[num++] = (port_a >> 8) & 0xff;
 996	arr[num++] = port_a & 0xff;
 997	/* NAA-5, Target port group identifier */
 998	arr[num++] = 0x61;	/* proto=sas, binary */
 999	arr[num++] = 0x95;	/* piv=1, target port group id */
1000	arr[num++] = 0x0;
1001	arr[num++] = 0x4;
1002	arr[num++] = 0;
1003	arr[num++] = 0;
1004	arr[num++] = (port_group_id >> 8) & 0xff;
1005	arr[num++] = port_group_id & 0xff;
1006	/* NAA-5, Target device identifier */
1007	arr[num++] = 0x61;	/* proto=sas, binary */
1008	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1009	arr[num++] = 0x0;
1010	arr[num++] = 0x8;
1011	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
1012	arr[num++] = 0x22;
1013	arr[num++] = 0x22;
1014	arr[num++] = 0x20;
1015	arr[num++] = (target_dev_id >> 24);
1016	arr[num++] = (target_dev_id >> 16) & 0xff;
1017	arr[num++] = (target_dev_id >> 8) & 0xff;
1018	arr[num++] = target_dev_id & 0xff;
1019	/* SCSI name string: Target device identifier */
1020	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1021	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1022	arr[num++] = 0x0;
1023	arr[num++] = 24;
1024	memcpy(arr + num, "naa.52222220", 12);
1025	num += 12;
1026	snprintf(b, sizeof(b), "%08X", target_dev_id);
1027	memcpy(arr + num, b, 8);
1028	num += 8;
1029	memset(arr + num, 0, 4);
1030	num += 4;
1031	return num;
1032}
1033
1034
1035static unsigned char vpd84_data[] = {
1036/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1037    0x22,0x22,0x22,0x0,0xbb,0x1,
1038    0x22,0x22,0x22,0x0,0xbb,0x2,
1039};
1040
1041/*  Software interface identification VPD page */
1042static int inquiry_evpd_84(unsigned char * arr)
1043{
1044	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1045	return sizeof(vpd84_data);
1046}
1047
1048/* Management network addresses VPD page */
1049static int inquiry_evpd_85(unsigned char * arr)
1050{
1051	int num = 0;
1052	const char * na1 = "https://www.kernel.org/config";
1053	const char * na2 = "http://www.kernel.org/log";
1054	int plen, olen;
1055
1056	arr[num++] = 0x1;	/* lu, storage config */
1057	arr[num++] = 0x0;	/* reserved */
1058	arr[num++] = 0x0;
1059	olen = strlen(na1);
1060	plen = olen + 1;
1061	if (plen % 4)
1062		plen = ((plen / 4) + 1) * 4;
1063	arr[num++] = plen;	/* length, null termianted, padded */
1064	memcpy(arr + num, na1, olen);
1065	memset(arr + num + olen, 0, plen - olen);
1066	num += plen;
1067
1068	arr[num++] = 0x4;	/* lu, logging */
1069	arr[num++] = 0x0;	/* reserved */
1070	arr[num++] = 0x0;
1071	olen = strlen(na2);
1072	plen = olen + 1;
1073	if (plen % 4)
1074		plen = ((plen / 4) + 1) * 4;
1075	arr[num++] = plen;	/* length, null terminated, padded */
1076	memcpy(arr + num, na2, olen);
1077	memset(arr + num + olen, 0, plen - olen);
1078	num += plen;
1079
1080	return num;
1081}
1082
1083/* SCSI ports VPD page */
1084static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1085{
1086	int num = 0;
1087	int port_a, port_b;
1088
1089	port_a = target_dev_id + 1;
1090	port_b = port_a + 1;
1091	arr[num++] = 0x0;	/* reserved */
1092	arr[num++] = 0x0;	/* reserved */
1093	arr[num++] = 0x0;
1094	arr[num++] = 0x1;	/* relative port 1 (primary) */
1095	memset(arr + num, 0, 6);
1096	num += 6;
1097	arr[num++] = 0x0;
1098	arr[num++] = 12;	/* length tp descriptor */
1099	/* naa-5 target port identifier (A) */
1100	arr[num++] = 0x61;	/* proto=sas, binary */
1101	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1102	arr[num++] = 0x0;	/* reserved */
1103	arr[num++] = 0x8;	/* length */
1104	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
1105	arr[num++] = 0x22;
1106	arr[num++] = 0x22;
1107	arr[num++] = 0x20;
1108	arr[num++] = (port_a >> 24);
1109	arr[num++] = (port_a >> 16) & 0xff;
1110	arr[num++] = (port_a >> 8) & 0xff;
1111	arr[num++] = port_a & 0xff;
1112
1113	arr[num++] = 0x0;	/* reserved */
1114	arr[num++] = 0x0;	/* reserved */
1115	arr[num++] = 0x0;
1116	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1117	memset(arr + num, 0, 6);
1118	num += 6;
1119	arr[num++] = 0x0;
1120	arr[num++] = 12;	/* length tp descriptor */
1121	/* naa-5 target port identifier (B) */
1122	arr[num++] = 0x61;	/* proto=sas, binary */
1123	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1124	arr[num++] = 0x0;	/* reserved */
1125	arr[num++] = 0x8;	/* length */
1126	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
1127	arr[num++] = 0x22;
1128	arr[num++] = 0x22;
1129	arr[num++] = 0x20;
1130	arr[num++] = (port_b >> 24);
1131	arr[num++] = (port_b >> 16) & 0xff;
1132	arr[num++] = (port_b >> 8) & 0xff;
1133	arr[num++] = port_b & 0xff;
1134
1135	return num;
1136}
1137
1138
1139static unsigned char vpd89_data[] = {
1140/* from 4th byte */ 0,0,0,0,
1141'l','i','n','u','x',' ',' ',' ',
1142'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1143'1','2','3','4',
11440x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
11450xec,0,0,0,
11460x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
11470,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
11480x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
11490x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
11500x53,0x41,
11510x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
11520x20,0x20,
11530x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
11540x10,0x80,
11550,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
11560x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
11570x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
11580,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
11590x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
11600x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
11610,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
11620,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11630,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11640,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11650x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
11660,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
11670xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
11680,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
11690,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11700,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11710,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11720,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11730,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11740,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11750,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11760,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11770,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11780,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11790,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11800,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1181};
1182
1183/* ATA Information VPD page */
1184static int inquiry_evpd_89(unsigned char * arr)
1185{
1186	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1187	return sizeof(vpd89_data);
1188}
1189
1190
 
1191static unsigned char vpdb0_data[] = {
1192	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1193	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1194	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1195	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1196};
1197
1198/* Block limits VPD page (SBC-3) */
1199static int inquiry_evpd_b0(unsigned char * arr)
1200{
1201	unsigned int gran;
1202
1203	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1204
1205	/* Optimal transfer length granularity */
1206	gran = 1 << scsi_debug_physblk_exp;
1207	arr[2] = (gran >> 8) & 0xff;
1208	arr[3] = gran & 0xff;
1209
1210	/* Maximum Transfer Length */
1211	if (sdebug_store_sectors > 0x400) {
1212		arr[4] = (sdebug_store_sectors >> 24) & 0xff;
1213		arr[5] = (sdebug_store_sectors >> 16) & 0xff;
1214		arr[6] = (sdebug_store_sectors >> 8) & 0xff;
1215		arr[7] = sdebug_store_sectors & 0xff;
1216	}
1217
1218	/* Optimal Transfer Length */
1219	put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
1220
1221	if (scsi_debug_lbpu) {
1222		/* Maximum Unmap LBA Count */
1223		put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
1224
1225		/* Maximum Unmap Block Descriptor Count */
1226		put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
1227	}
1228
1229	/* Unmap Granularity Alignment */
1230	if (scsi_debug_unmap_alignment) {
1231		put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
1232		arr[28] |= 0x80; /* UGAVALID */
1233	}
1234
1235	/* Optimal Unmap Granularity */
1236	put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
1237
1238	/* Maximum WRITE SAME Length */
1239	put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
1240
1241	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1242
1243	return sizeof(vpdb0_data);
1244}
1245
1246/* Block device characteristics VPD page (SBC-3) */
1247static int inquiry_evpd_b1(unsigned char *arr)
1248{
1249	memset(arr, 0, 0x3c);
1250	arr[0] = 0;
1251	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1252	arr[2] = 0;
1253	arr[3] = 5;	/* less than 1.8" */
1254
1255	return 0x3c;
1256}
1257
1258/* Logical block provisioning VPD page (SBC-3) */
1259static int inquiry_evpd_b2(unsigned char *arr)
1260{
1261	memset(arr, 0, 0x4);
1262	arr[0] = 0;			/* threshold exponent */
1263
1264	if (scsi_debug_lbpu)
1265		arr[1] = 1 << 7;
1266
1267	if (scsi_debug_lbpws)
1268		arr[1] |= 1 << 6;
1269
1270	if (scsi_debug_lbpws10)
1271		arr[1] |= 1 << 5;
1272
1273	if (scsi_debug_lbprz)
1274		arr[1] |= 1 << 2;
1275
1276	return 0x4;
1277}
1278
1279#define SDEBUG_LONG_INQ_SZ 96
1280#define SDEBUG_MAX_INQ_ARR_SZ 584
1281
1282static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 
1283{
1284	unsigned char pq_pdt;
1285	unsigned char * arr;
1286	unsigned char *cmd = scp->cmnd;
1287	int alloc_len, n, ret;
1288	bool have_wlun;
1289
1290	alloc_len = (cmd[3] << 8) + cmd[4];
1291	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1292	if (! arr)
1293		return DID_REQUEUE << 16;
1294	have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
1295	if (have_wlun)
1296		pq_pdt = 0x1e;	/* present, wlun */
1297	else if (scsi_debug_no_lun_0 && (0 == devip->lun))
1298		pq_pdt = 0x7f;	/* not present, no device type */
1299	else
1300		pq_pdt = (scsi_debug_ptype & 0x1f);
1301	arr[0] = pq_pdt;
1302	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1303		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
 
1304		kfree(arr);
1305		return check_condition_result;
1306	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1307		int lu_id_num, port_group_id, target_dev_id, len;
1308		char lu_id_str[6];
1309		int host_no = devip->sdbg_host->shost->host_no;
1310		
1311		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1312		    (devip->channel & 0x7f);
1313		if (0 == scsi_debug_vpd_use_hostno)
1314			host_no = 0;
1315		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1316			    (devip->target * 1000) + devip->lun);
1317		target_dev_id = ((host_no + 1) * 2000) +
1318				 (devip->target * 1000) - 3;
1319		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1320		if (0 == cmd[2]) { /* supported vital product data pages */
1321			arr[1] = cmd[2];	/*sanity */
1322			n = 4;
1323			arr[n++] = 0x0;   /* this page */
1324			arr[n++] = 0x80;  /* unit serial number */
1325			arr[n++] = 0x83;  /* device identification */
1326			arr[n++] = 0x84;  /* software interface ident. */
1327			arr[n++] = 0x85;  /* management network addresses */
1328			arr[n++] = 0x86;  /* extended inquiry */
1329			arr[n++] = 0x87;  /* mode page policy */
1330			arr[n++] = 0x88;  /* SCSI ports */
1331			arr[n++] = 0x89;  /* ATA information */
1332			arr[n++] = 0xb0;  /* Block limits (SBC) */
1333			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1334			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1335				arr[n++] = 0xb2;
1336			arr[3] = n - 4;	  /* number of supported VPD pages */
1337		} else if (0x80 == cmd[2]) { /* unit serial number */
1338			arr[1] = cmd[2];	/*sanity */
1339			arr[3] = len;
1340			memcpy(&arr[4], lu_id_str, len);
1341		} else if (0x83 == cmd[2]) { /* device identification */
1342			arr[1] = cmd[2];	/*sanity */
1343			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1344						 target_dev_id, lu_id_num,
1345						 lu_id_str, len);
1346		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1347			arr[1] = cmd[2];	/*sanity */
1348			arr[3] = inquiry_evpd_84(&arr[4]);
1349		} else if (0x85 == cmd[2]) { /* Management network addresses */
1350			arr[1] = cmd[2];	/*sanity */
1351			arr[3] = inquiry_evpd_85(&arr[4]);
1352		} else if (0x86 == cmd[2]) { /* extended inquiry */
1353			arr[1] = cmd[2];	/*sanity */
1354			arr[3] = 0x3c;	/* number of following entries */
1355			if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1356				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1357			else if (scsi_debug_dif)
1358				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1359			else
1360				arr[4] = 0x0;   /* no protection stuff */
1361			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1362		} else if (0x87 == cmd[2]) { /* mode page policy */
1363			arr[1] = cmd[2];	/*sanity */
1364			arr[3] = 0x8;	/* number of following entries */
1365			arr[4] = 0x2;	/* disconnect-reconnect mp */
1366			arr[6] = 0x80;	/* mlus, shared */
1367			arr[8] = 0x18;	 /* protocol specific lu */
1368			arr[10] = 0x82;	 /* mlus, per initiator port */
1369		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1370			arr[1] = cmd[2];	/*sanity */
1371			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1372		} else if (0x89 == cmd[2]) { /* ATA information */
1373			arr[1] = cmd[2];        /*sanity */
1374			n = inquiry_evpd_89(&arr[4]);
1375			arr[2] = (n >> 8);
1376			arr[3] = (n & 0xff);
1377		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1378			arr[1] = cmd[2];        /*sanity */
1379			arr[3] = inquiry_evpd_b0(&arr[4]);
1380		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1381			arr[1] = cmd[2];        /*sanity */
1382			arr[3] = inquiry_evpd_b1(&arr[4]);
1383		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1384			arr[1] = cmd[2];        /*sanity */
1385			arr[3] = inquiry_evpd_b2(&arr[4]);
1386		} else {
1387			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
 
 
1388			kfree(arr);
1389			return check_condition_result;
1390		}
1391		len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1392		ret = fill_from_dev_buffer(scp, arr,
1393			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1394		kfree(arr);
1395		return ret;
1396	}
1397	/* drops through here for a standard inquiry */
1398	arr[1] = scsi_debug_removable ? 0x80 : 0;	/* Removable disk */
1399	arr[2] = scsi_debug_scsi_level;
1400	arr[3] = 2;    /* response_data_format==2 */
1401	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1402	arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1403	if (0 == scsi_debug_vpd_use_hostno)
1404		arr[5] = 0x10; /* claim: implicit TGPS */
1405	arr[6] = 0x10; /* claim: MultiP */
1406	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1407	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1408	memcpy(&arr[8], inq_vendor_id, 8);
1409	memcpy(&arr[16], inq_product_id, 16);
1410	memcpy(&arr[32], inq_product_rev, 4);
1411	/* version descriptors (2 bytes each) follow */
1412	arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1413	arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1414	n = 62;
1415	if (scsi_debug_ptype == 0) {
1416		arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1417	} else if (scsi_debug_ptype == 1) {
1418		arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1419	}
1420	arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1421	ret = fill_from_dev_buffer(scp, arr,
1422			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1423	kfree(arr);
1424	return ret;
1425}
1426
1427static int resp_requests(struct scsi_cmnd * scp,
1428			 struct sdebug_dev_info * devip)
1429{
1430	unsigned char * sbuff;
1431	unsigned char *cmd = scp->cmnd;
1432	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1433	bool dsense;
1434	int len = 18;
1435
1436	memset(arr, 0, sizeof(arr));
1437	dsense = !!(cmd[1] & 1);
1438	sbuff = scp->sense_buffer;
 
 
1439	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1440		if (dsense) {
1441			arr[0] = 0x72;
1442			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1443			arr[2] = THRESHOLD_EXCEEDED;
1444			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1445			len = 8;
1446		} else {
1447			arr[0] = 0x70;
1448			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1449			arr[7] = 0xa;   	/* 18 byte sense buffer */
1450			arr[12] = THRESHOLD_EXCEEDED;
1451			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1452		}
1453	} else {
1454		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1455		if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1456			;	/* have sense and formats match */
1457		else if (arr[0] <= 0x70) {
1458			if (dsense) {
1459				memset(arr, 0, 8);
1460				arr[0] = 0x72;
1461				len = 8;
1462			} else {
1463				memset(arr, 0, 18);
1464				arr[0] = 0x70;
1465				arr[7] = 0xa;
1466			}
1467		} else if (dsense) {
1468			memset(arr, 0, 8);
1469			arr[0] = 0x72;
1470			arr[1] = sbuff[2];     /* sense key */
1471			arr[2] = sbuff[12];    /* asc */
1472			arr[3] = sbuff[13];    /* ascq */
1473			len = 8;
1474		} else {
1475			memset(arr, 0, 18);
1476			arr[0] = 0x70;
1477			arr[2] = sbuff[1];
1478			arr[7] = 0xa;
1479			arr[12] = sbuff[1];
1480			arr[13] = sbuff[3];
1481		}
1482
1483	}
1484	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1485	return fill_from_dev_buffer(scp, arr, len);
1486}
1487
1488static int resp_start_stop(struct scsi_cmnd * scp,
1489			   struct sdebug_dev_info * devip)
1490{
1491	unsigned char *cmd = scp->cmnd;
1492	int power_cond, start;
1493
 
 
1494	power_cond = (cmd[4] & 0xf0) >> 4;
1495	if (power_cond) {
1496		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
 
1497		return check_condition_result;
1498	}
1499	start = cmd[4] & 1;
1500	if (start == devip->stopped)
1501		devip->stopped = !start;
1502	return 0;
1503}
1504
1505static sector_t get_sdebug_capacity(void)
1506{
1507	if (scsi_debug_virtual_gb > 0)
1508		return (sector_t)scsi_debug_virtual_gb *
1509			(1073741824 / scsi_debug_sector_size);
1510	else
1511		return sdebug_store_sectors;
1512}
1513
1514#define SDEBUG_READCAP_ARR_SZ 8
1515static int resp_readcap(struct scsi_cmnd * scp,
1516			struct sdebug_dev_info * devip)
1517{
1518	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1519	unsigned int capac;
 
1520
 
 
1521	/* following just in case virtual_gb changed */
1522	sdebug_capacity = get_sdebug_capacity();
1523	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1524	if (sdebug_capacity < 0xffffffff) {
1525		capac = (unsigned int)sdebug_capacity - 1;
1526		arr[0] = (capac >> 24);
1527		arr[1] = (capac >> 16) & 0xff;
1528		arr[2] = (capac >> 8) & 0xff;
1529		arr[3] = capac & 0xff;
1530	} else {
1531		arr[0] = 0xff;
1532		arr[1] = 0xff;
1533		arr[2] = 0xff;
1534		arr[3] = 0xff;
1535	}
1536	arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1537	arr[7] = scsi_debug_sector_size & 0xff;
1538	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1539}
1540
1541#define SDEBUG_READCAP16_ARR_SZ 32
1542static int resp_readcap16(struct scsi_cmnd * scp,
1543			  struct sdebug_dev_info * devip)
1544{
1545	unsigned char *cmd = scp->cmnd;
1546	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1547	unsigned long long capac;
1548	int k, alloc_len;
1549
 
 
1550	alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1551		     + cmd[13]);
1552	/* following just in case virtual_gb changed */
1553	sdebug_capacity = get_sdebug_capacity();
1554	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1555	capac = sdebug_capacity - 1;
1556	for (k = 0; k < 8; ++k, capac >>= 8)
1557		arr[7 - k] = capac & 0xff;
1558	arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1559	arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1560	arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1561	arr[11] = scsi_debug_sector_size & 0xff;
1562	arr[13] = scsi_debug_physblk_exp & 0xf;
1563	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1564
1565	if (scsi_debug_lbp()) {
1566		arr[14] |= 0x80; /* LBPME */
1567		if (scsi_debug_lbprz)
1568			arr[14] |= 0x40; /* LBPRZ */
1569	}
1570
1571	arr[15] = scsi_debug_lowest_aligned & 0xff;
1572
1573	if (scsi_debug_dif) {
1574		arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1575		arr[12] |= 1; /* PROT_EN */
1576	}
1577
1578	return fill_from_dev_buffer(scp, arr,
1579				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1580}
1581
1582#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1583
1584static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1585			      struct sdebug_dev_info * devip)
1586{
1587	unsigned char *cmd = scp->cmnd;
1588	unsigned char * arr;
1589	int host_no = devip->sdbg_host->shost->host_no;
1590	int n, ret, alen, rlen;
1591	int port_group_a, port_group_b, port_a, port_b;
1592
1593	alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1594		+ cmd[9]);
1595
1596	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1597	if (! arr)
1598		return DID_REQUEUE << 16;
1599	/*
1600	 * EVPD page 0x88 states we have two ports, one
1601	 * real and a fake port with no device connected.
1602	 * So we create two port groups with one port each
1603	 * and set the group with port B to unavailable.
1604	 */
1605	port_a = 0x1; /* relative port A */
1606	port_b = 0x2; /* relative port B */
1607	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1608	    (devip->channel & 0x7f);
1609	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1610	    (devip->channel & 0x7f) + 0x80;
1611
1612	/*
1613	 * The asymmetric access state is cycled according to the host_id.
1614	 */
1615	n = 4;
1616	if (0 == scsi_debug_vpd_use_hostno) {
1617	    arr[n++] = host_no % 3; /* Asymm access state */
1618	    arr[n++] = 0x0F; /* claim: all states are supported */
1619	} else {
1620	    arr[n++] = 0x0; /* Active/Optimized path */
1621	    arr[n++] = 0x01; /* claim: only support active/optimized paths */
1622	}
1623	arr[n++] = (port_group_a >> 8) & 0xff;
1624	arr[n++] = port_group_a & 0xff;
1625	arr[n++] = 0;    /* Reserved */
1626	arr[n++] = 0;    /* Status code */
1627	arr[n++] = 0;    /* Vendor unique */
1628	arr[n++] = 0x1;  /* One port per group */
1629	arr[n++] = 0;    /* Reserved */
1630	arr[n++] = 0;    /* Reserved */
1631	arr[n++] = (port_a >> 8) & 0xff;
1632	arr[n++] = port_a & 0xff;
1633	arr[n++] = 3;    /* Port unavailable */
1634	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1635	arr[n++] = (port_group_b >> 8) & 0xff;
1636	arr[n++] = port_group_b & 0xff;
1637	arr[n++] = 0;    /* Reserved */
1638	arr[n++] = 0;    /* Status code */
1639	arr[n++] = 0;    /* Vendor unique */
1640	arr[n++] = 0x1;  /* One port per group */
1641	arr[n++] = 0;    /* Reserved */
1642	arr[n++] = 0;    /* Reserved */
1643	arr[n++] = (port_b >> 8) & 0xff;
1644	arr[n++] = port_b & 0xff;
1645
1646	rlen = n - 4;
1647	arr[0] = (rlen >> 24) & 0xff;
1648	arr[1] = (rlen >> 16) & 0xff;
1649	arr[2] = (rlen >> 8) & 0xff;
1650	arr[3] = rlen & 0xff;
1651
1652	/*
1653	 * Return the smallest value of either
1654	 * - The allocated length
1655	 * - The constructed command length
1656	 * - The maximum array size
1657	 */
1658	rlen = min(alen,n);
1659	ret = fill_from_dev_buffer(scp, arr,
1660				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1661	kfree(arr);
1662	return ret;
1663}
1664
1665static int
1666resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1667{
1668	bool rctd;
1669	u8 reporting_opts, req_opcode, sdeb_i, supp;
1670	u16 req_sa, u;
1671	u32 alloc_len, a_len;
1672	int k, offset, len, errsts, count, bump, na;
1673	const struct opcode_info_t *oip;
1674	const struct opcode_info_t *r_oip;
1675	u8 *arr;
1676	u8 *cmd = scp->cmnd;
1677
1678	rctd = !!(cmd[2] & 0x80);
1679	reporting_opts = cmd[2] & 0x7;
1680	req_opcode = cmd[3];
1681	req_sa = get_unaligned_be16(cmd + 4);
1682	alloc_len = get_unaligned_be32(cmd + 6);
1683	if (alloc_len < 4 || alloc_len > 0xffff) {
1684		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1685		return check_condition_result;
1686	}
1687	if (alloc_len > 8192)
1688		a_len = 8192;
1689	else
1690		a_len = alloc_len;
1691	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1692	if (NULL == arr) {
1693		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1694				INSUFF_RES_ASCQ);
1695		return check_condition_result;
1696	}
1697	switch (reporting_opts) {
1698	case 0:	/* all commands */
1699		/* count number of commands */
1700		for (count = 0, oip = opcode_info_arr;
1701		     oip->num_attached != 0xff; ++oip) {
1702			if (F_INV_OP & oip->flags)
1703				continue;
1704			count += (oip->num_attached + 1);
1705		}
1706		bump = rctd ? 20 : 8;
1707		put_unaligned_be32(count * bump, arr);
1708		for (offset = 4, oip = opcode_info_arr;
1709		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1710			if (F_INV_OP & oip->flags)
1711				continue;
1712			na = oip->num_attached;
1713			arr[offset] = oip->opcode;
1714			put_unaligned_be16(oip->sa, arr + offset + 2);
1715			if (rctd)
1716				arr[offset + 5] |= 0x2;
1717			if (FF_SA & oip->flags)
1718				arr[offset + 5] |= 0x1;
1719			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1720			if (rctd)
1721				put_unaligned_be16(0xa, arr + offset + 8);
1722			r_oip = oip;
1723			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1724				if (F_INV_OP & oip->flags)
1725					continue;
1726				offset += bump;
1727				arr[offset] = oip->opcode;
1728				put_unaligned_be16(oip->sa, arr + offset + 2);
1729				if (rctd)
1730					arr[offset + 5] |= 0x2;
1731				if (FF_SA & oip->flags)
1732					arr[offset + 5] |= 0x1;
1733				put_unaligned_be16(oip->len_mask[0],
1734						   arr + offset + 6);
1735				if (rctd)
1736					put_unaligned_be16(0xa,
1737							   arr + offset + 8);
1738			}
1739			oip = r_oip;
1740			offset += bump;
1741		}
1742		break;
1743	case 1:	/* one command: opcode only */
1744	case 2:	/* one command: opcode plus service action */
1745	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1746		sdeb_i = opcode_ind_arr[req_opcode];
1747		oip = &opcode_info_arr[sdeb_i];
1748		if (F_INV_OP & oip->flags) {
1749			supp = 1;
1750			offset = 4;
1751		} else {
1752			if (1 == reporting_opts) {
1753				if (FF_SA & oip->flags) {
1754					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1755							     2, 2);
1756					kfree(arr);
1757					return check_condition_result;
1758				}
1759				req_sa = 0;
1760			} else if (2 == reporting_opts &&
1761				   0 == (FF_SA & oip->flags)) {
1762				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1763				kfree(arr);	/* point at requested sa */
1764				return check_condition_result;
1765			}
1766			if (0 == (FF_SA & oip->flags) &&
1767			    req_opcode == oip->opcode)
1768				supp = 3;
1769			else if (0 == (FF_SA & oip->flags)) {
1770				na = oip->num_attached;
1771				for (k = 0, oip = oip->arrp; k < na;
1772				     ++k, ++oip) {
1773					if (req_opcode == oip->opcode)
1774						break;
1775				}
1776				supp = (k >= na) ? 1 : 3;
1777			} else if (req_sa != oip->sa) {
1778				na = oip->num_attached;
1779				for (k = 0, oip = oip->arrp; k < na;
1780				     ++k, ++oip) {
1781					if (req_sa == oip->sa)
1782						break;
1783				}
1784				supp = (k >= na) ? 1 : 3;
1785			} else
1786				supp = 3;
1787			if (3 == supp) {
1788				u = oip->len_mask[0];
1789				put_unaligned_be16(u, arr + 2);
1790				arr[4] = oip->opcode;
1791				for (k = 1; k < u; ++k)
1792					arr[4 + k] = (k < 16) ?
1793						 oip->len_mask[k] : 0xff;
1794				offset = 4 + u;
1795			} else
1796				offset = 4;
1797		}
1798		arr[1] = (rctd ? 0x80 : 0) | supp;
1799		if (rctd) {
1800			put_unaligned_be16(0xa, arr + offset);
1801			offset += 12;
1802		}
1803		break;
1804	default:
1805		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1806		kfree(arr);
1807		return check_condition_result;
1808	}
1809	offset = (offset < a_len) ? offset : a_len;
1810	len = (offset < alloc_len) ? offset : alloc_len;
1811	errsts = fill_from_dev_buffer(scp, arr, len);
1812	kfree(arr);
1813	return errsts;
1814}
1815
1816static int
1817resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1818{
1819	bool repd;
1820	u32 alloc_len, len;
1821	u8 arr[16];
1822	u8 *cmd = scp->cmnd;
1823
1824	memset(arr, 0, sizeof(arr));
1825	repd = !!(cmd[2] & 0x80);
1826	alloc_len = get_unaligned_be32(cmd + 6);
1827	if (alloc_len < 4) {
1828		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1829		return check_condition_result;
1830	}
1831	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1832	arr[1] = 0x1;		/* ITNRS */
1833	if (repd) {
1834		arr[3] = 0xc;
1835		len = 16;
1836	} else
1837		len = 4;
1838
1839	len = (len < alloc_len) ? len : alloc_len;
1840	return fill_from_dev_buffer(scp, arr, len);
1841}
1842
1843/* <<Following mode page info copied from ST318451LW>> */
1844
1845static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1846{	/* Read-Write Error Recovery page for mode_sense */
1847	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1848					5, 0, 0xff, 0xff};
1849
1850	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1851	if (1 == pcontrol)
1852		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1853	return sizeof(err_recov_pg);
1854}
1855
1856static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1857{ 	/* Disconnect-Reconnect page for mode_sense */
1858	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1859					 0, 0, 0, 0, 0, 0, 0, 0};
1860
1861	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1862	if (1 == pcontrol)
1863		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1864	return sizeof(disconnect_pg);
1865}
1866
1867static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1868{       /* Format device page for mode_sense */
1869	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1870				     0, 0, 0, 0, 0, 0, 0, 0,
1871				     0, 0, 0, 0, 0x40, 0, 0, 0};
1872
1873	memcpy(p, format_pg, sizeof(format_pg));
1874	p[10] = (sdebug_sectors_per >> 8) & 0xff;
1875	p[11] = sdebug_sectors_per & 0xff;
1876	p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1877	p[13] = scsi_debug_sector_size & 0xff;
1878	if (scsi_debug_removable)
1879		p[20] |= 0x20; /* should agree with INQUIRY */
1880	if (1 == pcontrol)
1881		memset(p + 2, 0, sizeof(format_pg) - 2);
1882	return sizeof(format_pg);
1883}
1884
1885static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1886{ 	/* Caching page for mode_sense */
1887	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1888		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1889	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1890		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1891
1892	if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1893		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1894	memcpy(p, caching_pg, sizeof(caching_pg));
1895	if (1 == pcontrol)
1896		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1897	else if (2 == pcontrol)
1898		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1899	return sizeof(caching_pg);
1900}
1901
1902static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1903{ 	/* Control mode page for mode_sense */
1904	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1905				        0, 0, 0, 0};
1906	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1907				     0, 0, 0x2, 0x4b};
1908
1909	if (scsi_debug_dsense)
1910		ctrl_m_pg[2] |= 0x4;
1911	else
1912		ctrl_m_pg[2] &= ~0x4;
1913
1914	if (scsi_debug_ato)
1915		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1916
1917	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1918	if (1 == pcontrol)
1919		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1920	else if (2 == pcontrol)
1921		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1922	return sizeof(ctrl_m_pg);
1923}
1924
1925
1926static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1927{	/* Informational Exceptions control mode page for mode_sense */
1928	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1929				       0, 0, 0x0, 0x0};
1930	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1931				      0, 0, 0x0, 0x0};
1932
1933	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1934	if (1 == pcontrol)
1935		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1936	else if (2 == pcontrol)
1937		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1938	return sizeof(iec_m_pg);
1939}
1940
1941static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1942{	/* SAS SSP mode page - short format for mode_sense */
1943	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1944		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1945
1946	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1947	if (1 == pcontrol)
1948		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1949	return sizeof(sas_sf_m_pg);
1950}
1951
1952
1953static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1954			      int target_dev_id)
1955{	/* SAS phy control and discover mode page for mode_sense */
1956	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1957		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1958		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1959		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1960		    0x2, 0, 0, 0, 0, 0, 0, 0,
1961		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1962		    0, 0, 0, 0, 0, 0, 0, 0,
1963		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1964		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1965		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1966		    0x3, 0, 0, 0, 0, 0, 0, 0,
1967		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1968		    0, 0, 0, 0, 0, 0, 0, 0,
1969		};
1970	int port_a, port_b;
1971
1972	port_a = target_dev_id + 1;
1973	port_b = port_a + 1;
1974	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1975	p[20] = (port_a >> 24);
1976	p[21] = (port_a >> 16) & 0xff;
1977	p[22] = (port_a >> 8) & 0xff;
1978	p[23] = port_a & 0xff;
1979	p[48 + 20] = (port_b >> 24);
1980	p[48 + 21] = (port_b >> 16) & 0xff;
1981	p[48 + 22] = (port_b >> 8) & 0xff;
1982	p[48 + 23] = port_b & 0xff;
1983	if (1 == pcontrol)
1984		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1985	return sizeof(sas_pcd_m_pg);
1986}
1987
1988static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1989{	/* SAS SSP shared protocol specific port mode subpage */
1990	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1991		    0, 0, 0, 0, 0, 0, 0, 0,
1992		};
1993
1994	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1995	if (1 == pcontrol)
1996		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1997	return sizeof(sas_sha_m_pg);
1998}
1999
2000#define SDEBUG_MAX_MSENSE_SZ 256
2001
2002static int
2003resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2004{
2005	unsigned char dbd, llbaa;
2006	int pcontrol, pcode, subpcode, bd_len;
2007	unsigned char dev_spec;
2008	int k, alloc_len, msense_6, offset, len, target_dev_id;
2009	int target = scp->device->id;
2010	unsigned char * ap;
2011	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2012	unsigned char *cmd = scp->cmnd;
2013
 
 
2014	dbd = !!(cmd[1] & 0x8);
2015	pcontrol = (cmd[2] & 0xc0) >> 6;
2016	pcode = cmd[2] & 0x3f;
2017	subpcode = cmd[3];
2018	msense_6 = (MODE_SENSE == cmd[0]);
2019	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
2020	if ((0 == scsi_debug_ptype) && (0 == dbd))
2021		bd_len = llbaa ? 16 : 8;
2022	else
2023		bd_len = 0;
2024	alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
2025	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2026	if (0x3 == pcontrol) {  /* Saving values not supported */
2027		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
 
2028		return check_condition_result;
2029	}
2030	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2031			(devip->target * 1000) - 3;
2032	/* set DPOFUA bit for disks */
2033	if (0 == scsi_debug_ptype)
2034		dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
2035	else
2036		dev_spec = 0x0;
2037	if (msense_6) {
2038		arr[2] = dev_spec;
2039		arr[3] = bd_len;
2040		offset = 4;
2041	} else {
2042		arr[3] = dev_spec;
2043		if (16 == bd_len)
2044			arr[4] = 0x1;	/* set LONGLBA bit */
2045		arr[7] = bd_len;	/* assume 255 or less */
2046		offset = 8;
2047	}
2048	ap = arr + offset;
2049	if ((bd_len > 0) && (!sdebug_capacity))
2050		sdebug_capacity = get_sdebug_capacity();
2051
2052	if (8 == bd_len) {
2053		if (sdebug_capacity > 0xfffffffe) {
2054			ap[0] = 0xff;
2055			ap[1] = 0xff;
2056			ap[2] = 0xff;
2057			ap[3] = 0xff;
2058		} else {
2059			ap[0] = (sdebug_capacity >> 24) & 0xff;
2060			ap[1] = (sdebug_capacity >> 16) & 0xff;
2061			ap[2] = (sdebug_capacity >> 8) & 0xff;
2062			ap[3] = sdebug_capacity & 0xff;
2063		}
2064		ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
2065		ap[7] = scsi_debug_sector_size & 0xff;
2066		offset += bd_len;
2067		ap = arr + offset;
2068	} else if (16 == bd_len) {
2069		unsigned long long capac = sdebug_capacity;
2070
2071        	for (k = 0; k < 8; ++k, capac >>= 8)
2072                	ap[7 - k] = capac & 0xff;
2073		ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
2074		ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
2075		ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
2076		ap[15] = scsi_debug_sector_size & 0xff;
2077		offset += bd_len;
2078		ap = arr + offset;
2079	}
2080
2081	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2082		/* TODO: Control Extension page */
2083		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 
2084		return check_condition_result;
2085	}
2086	switch (pcode) {
2087	case 0x1:	/* Read-Write error recovery page, direct access */
2088		len = resp_err_recov_pg(ap, pcontrol, target);
2089		offset += len;
2090		break;
2091	case 0x2:	/* Disconnect-Reconnect page, all devices */
2092		len = resp_disconnect_pg(ap, pcontrol, target);
2093		offset += len;
2094		break;
2095        case 0x3:       /* Format device page, direct access */
2096                len = resp_format_pg(ap, pcontrol, target);
2097                offset += len;
2098                break;
2099	case 0x8:	/* Caching page, direct access */
2100		len = resp_caching_pg(ap, pcontrol, target);
2101		offset += len;
2102		break;
2103	case 0xa:	/* Control Mode page, all devices */
2104		len = resp_ctrl_m_pg(ap, pcontrol, target);
2105		offset += len;
2106		break;
2107	case 0x19:	/* if spc==1 then sas phy, control+discover */
2108		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2109			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 
2110			return check_condition_result;
2111	        }
2112		len = 0;
2113		if ((0x0 == subpcode) || (0xff == subpcode))
2114			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2115		if ((0x1 == subpcode) || (0xff == subpcode))
2116			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2117						  target_dev_id);
2118		if ((0x2 == subpcode) || (0xff == subpcode))
2119			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2120		offset += len;
2121		break;
2122	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2123		len = resp_iec_m_pg(ap, pcontrol, target);
2124		offset += len;
2125		break;
2126	case 0x3f:	/* Read all Mode pages */
2127		if ((0 == subpcode) || (0xff == subpcode)) {
2128			len = resp_err_recov_pg(ap, pcontrol, target);
2129			len += resp_disconnect_pg(ap + len, pcontrol, target);
2130			len += resp_format_pg(ap + len, pcontrol, target);
2131			len += resp_caching_pg(ap + len, pcontrol, target);
2132			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2133			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2134			if (0xff == subpcode) {
2135				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2136						  target, target_dev_id);
2137				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2138			}
2139			len += resp_iec_m_pg(ap + len, pcontrol, target);
2140		} else {
2141			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 
2142			return check_condition_result;
2143                }
2144		offset += len;
2145		break;
2146	default:
2147		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
 
2148		return check_condition_result;
2149	}
2150	if (msense_6)
2151		arr[0] = offset - 1;
2152	else {
2153		arr[0] = ((offset - 2) >> 8) & 0xff;
2154		arr[1] = (offset - 2) & 0xff;
2155	}
2156	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2157}
2158
2159#define SDEBUG_MAX_MSELECT_SZ 512
2160
2161static int
2162resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2163{
2164	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2165	int param_len, res, mpage;
2166	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2167	unsigned char *cmd = scp->cmnd;
2168	int mselect6 = (MODE_SELECT == cmd[0]);
2169
 
 
2170	memset(arr, 0, sizeof(arr));
2171	pf = cmd[1] & 0x10;
2172	sp = cmd[1] & 0x1;
2173	param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
2174	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2175		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
 
2176		return check_condition_result;
2177	}
2178        res = fetch_to_dev_buffer(scp, arr, param_len);
2179        if (-1 == res)
2180                return (DID_ERROR << 16);
2181        else if ((res < param_len) &&
2182                 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2183		sdev_printk(KERN_INFO, scp->device,
2184			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2185			    __func__, param_len, res);
2186	md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
2187	bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
2188	if (md_len > 2) {
2189		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
 
2190		return check_condition_result;
2191	}
2192	off = bd_len + (mselect6 ? 4 : 8);
2193	mpage = arr[off] & 0x3f;
2194	ps = !!(arr[off] & 0x80);
2195	if (ps) {
2196		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
 
2197		return check_condition_result;
2198	}
2199	spf = !!(arr[off] & 0x40);
2200	pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
2201		       (arr[off + 1] + 2);
2202	if ((pg_len + off) > param_len) {
2203		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2204				PARAMETER_LIST_LENGTH_ERR, 0);
2205		return check_condition_result;
2206	}
2207	switch (mpage) {
2208	case 0x8:      /* Caching Mode page */
2209		if (caching_pg[1] == arr[off + 1]) {
2210			memcpy(caching_pg + 2, arr + off + 2,
2211			       sizeof(caching_pg) - 2);
2212			goto set_mode_changed_ua;
2213		}
2214		break;
2215	case 0xa:      /* Control Mode page */
2216		if (ctrl_m_pg[1] == arr[off + 1]) {
2217			memcpy(ctrl_m_pg + 2, arr + off + 2,
2218			       sizeof(ctrl_m_pg) - 2);
2219			scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
2220			goto set_mode_changed_ua;
2221		}
2222		break;
2223	case 0x1c:      /* Informational Exceptions Mode page */
2224		if (iec_m_pg[1] == arr[off + 1]) {
2225			memcpy(iec_m_pg + 2, arr + off + 2,
2226			       sizeof(iec_m_pg) - 2);
2227			goto set_mode_changed_ua;
2228		}
2229		break;
2230	default:
2231		break;
2232	}
2233	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
 
2234	return check_condition_result;
2235set_mode_changed_ua:
2236	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2237	return 0;
2238}
2239
2240static int resp_temp_l_pg(unsigned char * arr)
2241{
2242	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2243				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2244		};
2245
2246        memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2247        return sizeof(temp_l_pg);
2248}
2249
2250static int resp_ie_l_pg(unsigned char * arr)
2251{
2252	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2253		};
2254
2255        memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2256	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2257		arr[4] = THRESHOLD_EXCEEDED;
2258		arr[5] = 0xff;
2259	}
2260        return sizeof(ie_l_pg);
2261}
2262
2263#define SDEBUG_MAX_LSENSE_SZ 512
2264
2265static int resp_log_sense(struct scsi_cmnd * scp,
2266                          struct sdebug_dev_info * devip)
2267{
2268	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2269	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2270	unsigned char *cmd = scp->cmnd;
2271
 
 
2272	memset(arr, 0, sizeof(arr));
2273	ppc = cmd[1] & 0x2;
2274	sp = cmd[1] & 0x1;
2275	if (ppc || sp) {
2276		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
 
2277		return check_condition_result;
2278	}
2279	pcontrol = (cmd[2] & 0xc0) >> 6;
2280	pcode = cmd[2] & 0x3f;
2281	subpcode = cmd[3] & 0xff;
2282	alloc_len = (cmd[7] << 8) + cmd[8];
2283	arr[0] = pcode;
2284	if (0 == subpcode) {
2285		switch (pcode) {
2286		case 0x0:	/* Supported log pages log page */
2287			n = 4;
2288			arr[n++] = 0x0;		/* this page */
2289			arr[n++] = 0xd;		/* Temperature */
2290			arr[n++] = 0x2f;	/* Informational exceptions */
2291			arr[3] = n - 4;
2292			break;
2293		case 0xd:	/* Temperature log page */
2294			arr[3] = resp_temp_l_pg(arr + 4);
2295			break;
2296		case 0x2f:	/* Informational exceptions log page */
2297			arr[3] = resp_ie_l_pg(arr + 4);
2298			break;
2299		default:
2300			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
 
2301			return check_condition_result;
2302		}
2303	} else if (0xff == subpcode) {
2304		arr[0] |= 0x40;
2305		arr[1] = subpcode;
2306		switch (pcode) {
2307		case 0x0:	/* Supported log pages and subpages log page */
2308			n = 4;
2309			arr[n++] = 0x0;
2310			arr[n++] = 0x0;		/* 0,0 page */
2311			arr[n++] = 0x0;
2312			arr[n++] = 0xff;	/* this page */
2313			arr[n++] = 0xd;
2314			arr[n++] = 0x0;		/* Temperature */
2315			arr[n++] = 0x2f;
2316			arr[n++] = 0x0;	/* Informational exceptions */
2317			arr[3] = n - 4;
2318			break;
2319		case 0xd:	/* Temperature subpages */
2320			n = 4;
2321			arr[n++] = 0xd;
2322			arr[n++] = 0x0;		/* Temperature */
2323			arr[3] = n - 4;
2324			break;
2325		case 0x2f:	/* Informational exceptions subpages */
2326			n = 4;
2327			arr[n++] = 0x2f;
2328			arr[n++] = 0x0;		/* Informational exceptions */
2329			arr[3] = n - 4;
2330			break;
2331		default:
2332			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
 
2333			return check_condition_result;
2334		}
2335	} else {
2336		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 
2337		return check_condition_result;
2338	}
2339	len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
2340	return fill_from_dev_buffer(scp, arr,
2341		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2342}
2343
2344static int check_device_access_params(struct scsi_cmnd *scp,
2345				      unsigned long long lba, unsigned int num)
2346{
2347	if (lba + num > sdebug_capacity) {
2348		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2349		return check_condition_result;
2350	}
2351	/* transfer length excessive (tie in to block limits VPD page) */
2352	if (num > sdebug_store_sectors) {
2353		/* needs work to find which cdb byte 'num' comes from */
2354		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2355		return check_condition_result;
2356	}
2357	return 0;
2358}
2359
2360/* Returns number of bytes copied or -1 if error. */
2361static int
2362do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2363{
2364	int ret;
2365	u64 block, rest = 0;
2366	struct scsi_data_buffer *sdb;
2367	enum dma_data_direction dir;
2368
2369	if (do_write) {
2370		sdb = scsi_out(scmd);
2371		dir = DMA_TO_DEVICE;
2372	} else {
2373		sdb = scsi_in(scmd);
2374		dir = DMA_FROM_DEVICE;
2375	}
2376
2377	if (!sdb->length)
2378		return 0;
2379	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2380		return -1;
2381
2382	block = do_div(lba, sdebug_store_sectors);
2383	if (block + num > sdebug_store_sectors)
2384		rest = block + num - sdebug_store_sectors;
2385
2386	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2387		   fake_storep + (block * scsi_debug_sector_size),
2388		   (num - rest) * scsi_debug_sector_size, 0, do_write);
2389	if (ret != (num - rest) * scsi_debug_sector_size)
2390		return ret;
2391
2392	if (rest) {
2393		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2394			    fake_storep, rest * scsi_debug_sector_size,
2395			    (num - rest) * scsi_debug_sector_size, do_write);
2396	}
2397
2398	return ret;
2399}
2400
2401/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2402 * arr into fake_store(lba,num) and return true. If comparison fails then
2403 * return false. */
2404static bool
2405comp_write_worker(u64 lba, u32 num, const u8 *arr)
2406{
2407	bool res;
2408	u64 block, rest = 0;
2409	u32 store_blks = sdebug_store_sectors;
2410	u32 lb_size = scsi_debug_sector_size;
2411
2412	block = do_div(lba, store_blks);
2413	if (block + num > store_blks)
2414		rest = block + num - store_blks;
2415
2416	res = !memcmp(fake_storep + (block * lb_size), arr,
2417		      (num - rest) * lb_size);
2418	if (!res)
2419		return res;
2420	if (rest)
2421		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2422			     rest * lb_size);
2423	if (!res)
2424		return res;
2425	arr += num * lb_size;
2426	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2427	if (rest)
2428		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2429		       rest * lb_size);
2430	return res;
2431}
2432
2433static __be16 dif_compute_csum(const void *buf, int len)
2434{
2435	__be16 csum;
2436
2437	if (scsi_debug_guard)
2438		csum = (__force __be16)ip_compute_csum(buf, len);
2439	else
2440		csum = cpu_to_be16(crc_t10dif(buf, len));
2441
2442	return csum;
2443}
2444
2445static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2446		      sector_t sector, u32 ei_lba)
2447{
2448	__be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2449
2450	if (sdt->guard_tag != csum) {
2451		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2452			(unsigned long)sector,
2453			be16_to_cpu(sdt->guard_tag),
2454			be16_to_cpu(csum));
2455		return 0x01;
2456	}
2457	if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2458	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2459		pr_err("REF check failed on sector %lu\n",
2460			(unsigned long)sector);
2461		return 0x03;
2462	}
2463	if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2464	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2465		pr_err("REF check failed on sector %lu\n",
2466			(unsigned long)sector);
2467		return 0x03;
2468	}
2469	return 0;
2470}
2471
2472static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2473			  unsigned int sectors, bool read)
2474{
2475	size_t resid;
2476	void *paddr;
2477	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2478	struct sg_mapping_iter miter;
2479
2480	/* Bytes of protection data to copy into sgl */
2481	resid = sectors * sizeof(*dif_storep);
2482
2483	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2484			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2485			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2486
2487	while (sg_miter_next(&miter) && resid > 0) {
2488		size_t len = min(miter.length, resid);
2489		void *start = dif_store(sector);
2490		size_t rest = 0;
2491
2492		if (dif_store_end < start + len)
2493			rest = start + len - dif_store_end;
 
 
 
 
 
 
 
 
 
 
 
 
 
2494
2495		paddr = miter.addr;
 
 
 
 
 
 
 
 
2496
2497		if (read)
2498			memcpy(paddr, start, len - rest);
2499		else
2500			memcpy(start, paddr, len - rest);
 
 
 
2501
2502		if (rest) {
2503			if (read)
2504				memcpy(paddr + len - rest, dif_storep, rest);
2505			else
2506				memcpy(dif_storep, paddr + len - rest, rest);
 
2507		}
2508
2509		sector += len / sizeof(*dif_storep);
2510		resid -= len;
2511	}
2512	sg_miter_stop(&miter);
2513}
2514
2515static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2516			    unsigned int sectors, u32 ei_lba)
2517{
2518	unsigned int i;
2519	struct sd_dif_tuple *sdt;
2520	sector_t sector;
2521
2522	for (i = 0; i < sectors; i++, ei_lba++) {
2523		int ret;
2524
2525		sector = start_sec + i;
2526		sdt = dif_store(sector);
2527
2528		if (sdt->app_tag == cpu_to_be16(0xffff))
2529			continue;
2530
2531		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2532		if (ret) {
2533			dif_errors++;
2534			return ret;
 
2535		}
 
 
2536	}
2537
2538	dif_copy_prot(SCpnt, start_sec, sectors, true);
2539	dix_reads++;
2540
2541	return 0;
2542}
2543
2544static int
2545resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 
2546{
2547	u8 *cmd = scp->cmnd;
2548	u64 lba;
2549	u32 num;
2550	u32 ei_lba;
2551	unsigned long iflags;
2552	int ret;
2553	bool check_prot;
2554
2555	switch (cmd[0]) {
2556	case READ_16:
2557		ei_lba = 0;
2558		lba = get_unaligned_be64(cmd + 2);
2559		num = get_unaligned_be32(cmd + 10);
2560		check_prot = true;
2561		break;
2562	case READ_10:
2563		ei_lba = 0;
2564		lba = get_unaligned_be32(cmd + 2);
2565		num = get_unaligned_be16(cmd + 7);
2566		check_prot = true;
2567		break;
2568	case READ_6:
2569		ei_lba = 0;
2570		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2571		      (u32)(cmd[1] & 0x1f) << 16;
2572		num = (0 == cmd[4]) ? 256 : cmd[4];
2573		check_prot = true;
2574		break;
2575	case READ_12:
2576		ei_lba = 0;
2577		lba = get_unaligned_be32(cmd + 2);
2578		num = get_unaligned_be32(cmd + 6);
2579		check_prot = true;
2580		break;
2581	case XDWRITEREAD_10:
2582		ei_lba = 0;
2583		lba = get_unaligned_be32(cmd + 2);
2584		num = get_unaligned_be16(cmd + 7);
2585		check_prot = false;
2586		break;
2587	default:	/* assume READ(32) */
2588		lba = get_unaligned_be64(cmd + 12);
2589		ei_lba = get_unaligned_be32(cmd + 20);
2590		num = get_unaligned_be32(cmd + 28);
2591		check_prot = false;
2592		break;
2593	}
2594	if (check_prot) {
2595		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2596		    (cmd[1] & 0xe0)) {
2597			mk_sense_invalid_opcode(scp);
2598			return check_condition_result;
2599		}
2600		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2601		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2602		    (cmd[1] & 0xe0) == 0)
2603			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2604				    "to DIF device\n");
2605	}
2606	if (sdebug_any_injecting_opt) {
2607		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2608
2609		if (ep->inj_short)
2610			num /= 2;
2611	}
2612
2613	/* inline check_device_access_params() */
2614	if (lba + num > sdebug_capacity) {
2615		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2616		return check_condition_result;
2617	}
2618	/* transfer length excessive (tie in to block limits VPD page) */
2619	if (num > sdebug_store_sectors) {
2620		/* needs work to find which cdb byte 'num' comes from */
2621		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2622		return check_condition_result;
2623	}
2624
2625	if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2626	    (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2627	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2628		/* claim unrecoverable read error */
2629		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2630		/* set info field and valid bit for fixed descriptor */
2631		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2632			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2633			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2634			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2635			put_unaligned_be32(ret, scp->sense_buffer + 3);
 
 
 
2636		}
2637		scsi_set_resid(scp, scsi_bufflen(scp));
2638		return check_condition_result;
2639	}
2640
2641	read_lock_irqsave(&atomic_rw, iflags);
2642
2643	/* DIX + T10 DIF */
2644	if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2645		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2646
2647		if (prot_ret) {
2648			read_unlock_irqrestore(&atomic_rw, iflags);
2649			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2650			return illegal_condition_result;
2651		}
2652	}
2653
2654	ret = do_device_access(scp, lba, num, false);
 
2655	read_unlock_irqrestore(&atomic_rw, iflags);
2656	if (ret == -1)
2657		return DID_ERROR << 16;
2658
2659	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2660
2661	if (sdebug_any_injecting_opt) {
2662		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2663
2664		if (ep->inj_recovered) {
2665			mk_sense_buffer(scp, RECOVERED_ERROR,
2666					THRESHOLD_EXCEEDED, 0);
2667			return check_condition_result;
2668		} else if (ep->inj_transport) {
2669			mk_sense_buffer(scp, ABORTED_COMMAND,
2670					TRANSPORT_PROBLEM, ACK_NAK_TO);
2671			return check_condition_result;
2672		} else if (ep->inj_dif) {
2673			/* Logical block guard check failed */
2674			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2675			return illegal_condition_result;
2676		} else if (ep->inj_dix) {
2677			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2678			return illegal_condition_result;
2679		}
2680	}
2681	return 0;
2682}
2683
2684static void dump_sector(unsigned char *buf, int len)
2685{
2686	int i, j, n;
 
 
2687
2688	pr_err(">>> Sector Dump <<<\n");
2689	for (i = 0 ; i < len ; i += 16) {
2690		char b[128];
2691
2692		for (j = 0, n = 0; j < 16; j++) {
2693			unsigned char c = buf[i+j];
2694
2695			if (c >= 0x20 && c < 0x7e)
2696				n += scnprintf(b + n, sizeof(b) - n,
2697					       " %c ", buf[i+j]);
2698			else
2699				n += scnprintf(b + n, sizeof(b) - n,
2700					       "%02x ", buf[i+j]);
2701		}
2702		pr_err("%04d: %s\n", i, b);
 
2703	}
2704}
2705
2706static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2707			     unsigned int sectors, u32 ei_lba)
2708{
2709	int ret;
2710	struct sd_dif_tuple *sdt;
2711	void *daddr;
2712	sector_t sector = start_sec;
 
 
 
2713	int ppage_offset;
2714	int dpage_offset;
2715	struct sg_mapping_iter diter;
2716	struct sg_mapping_iter piter;
2717
2718	BUG_ON(scsi_sg_count(SCpnt) == 0);
2719	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2720
2721	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2722			scsi_prot_sg_count(SCpnt),
2723			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2724	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2725			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2726
2727	/* For each protection page */
2728	while (sg_miter_next(&piter)) {
2729		dpage_offset = 0;
2730		if (WARN_ON(!sg_miter_next(&diter))) {
2731			ret = 0x01;
2732			goto out;
2733		}
2734
2735		for (ppage_offset = 0; ppage_offset < piter.length;
2736		     ppage_offset += sizeof(struct sd_dif_tuple)) {
2737			/* If we're at the end of the current
2738			 * data page advance to the next one
2739			 */
2740			if (dpage_offset >= diter.length) {
2741				if (WARN_ON(!sg_miter_next(&diter))) {
2742					ret = 0x01;
2743					goto out;
2744				}
2745				dpage_offset = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2746			}
2747
2748			sdt = piter.addr + ppage_offset;
2749			daddr = diter.addr + dpage_offset;
 
 
 
 
 
 
 
 
 
2750
2751			ret = dif_verify(sdt, daddr, sector, ei_lba);
2752			if (ret) {
 
 
 
 
 
2753				dump_sector(daddr, scsi_debug_sector_size);
2754				goto out;
2755			}
2756
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2757			sector++;
 
 
 
 
 
2758			ei_lba++;
2759			dpage_offset += scsi_debug_sector_size;
 
2760		}
2761		diter.consumed = dpage_offset;
2762		sg_miter_stop(&diter);
2763	}
2764	sg_miter_stop(&piter);
2765
2766	dif_copy_prot(SCpnt, start_sec, sectors, false);
 
2767	dix_writes++;
2768
2769	return 0;
2770
2771out:
2772	dif_errors++;
2773	sg_miter_stop(&diter);
2774	sg_miter_stop(&piter);
2775	return ret;
2776}
2777
2778static unsigned long lba_to_map_index(sector_t lba)
2779{
2780	if (scsi_debug_unmap_alignment) {
2781		lba += scsi_debug_unmap_granularity -
2782			scsi_debug_unmap_alignment;
2783	}
2784	sector_div(lba, scsi_debug_unmap_granularity);
2785
2786	return lba;
2787}
2788
2789static sector_t map_index_to_lba(unsigned long index)
2790{
2791	sector_t lba = index * scsi_debug_unmap_granularity;
2792
2793	if (scsi_debug_unmap_alignment) {
2794		lba -= scsi_debug_unmap_granularity -
2795			scsi_debug_unmap_alignment;
2796	}
2797
2798	return lba;
2799}
2800
2801static unsigned int map_state(sector_t lba, unsigned int *num)
2802{
2803	sector_t end;
2804	unsigned int mapped;
2805	unsigned long index;
2806	unsigned long next;
 
 
 
2807
2808	index = lba_to_map_index(lba);
2809	mapped = test_bit(index, map_storep);
2810
2811	if (mapped)
2812		next = find_next_zero_bit(map_storep, map_size, index);
2813	else
2814		next = find_next_bit(map_storep, map_size, index);
2815
2816	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2817	*num = end - lba;
2818
2819	return mapped;
2820}
2821
2822static void map_region(sector_t lba, unsigned int len)
2823{
 
2824	sector_t end = lba + len;
2825
 
 
 
2826	while (lba < end) {
2827		unsigned long index = lba_to_map_index(lba);
2828
2829		if (index < map_size)
2830			set_bit(index, map_storep);
2831
2832		lba = map_index_to_lba(index + 1);
 
 
 
2833	}
2834}
2835
2836static void unmap_region(sector_t lba, unsigned int len)
2837{
 
2838	sector_t end = lba + len;
2839
 
 
 
2840	while (lba < end) {
2841		unsigned long index = lba_to_map_index(lba);
2842
2843		if (lba == map_index_to_lba(index) &&
2844		    lba + scsi_debug_unmap_granularity <= end &&
2845		    index < map_size) {
2846			clear_bit(index, map_storep);
2847			if (scsi_debug_lbprz) {
 
2848				memset(fake_storep +
2849				       lba * scsi_debug_sector_size, 0,
2850				       scsi_debug_sector_size *
2851				       scsi_debug_unmap_granularity);
2852			}
2853			if (dif_storep) {
2854				memset(dif_storep + lba, 0xff,
2855				       sizeof(*dif_storep) *
2856				       scsi_debug_unmap_granularity);
2857			}
2858		}
2859		lba = map_index_to_lba(index + 1);
2860	}
2861}
2862
2863static int
2864resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 
2865{
2866	u8 *cmd = scp->cmnd;
2867	u64 lba;
2868	u32 num;
2869	u32 ei_lba;
2870	unsigned long iflags;
2871	int ret;
2872	bool check_prot;
2873
2874	switch (cmd[0]) {
2875	case WRITE_16:
2876		ei_lba = 0;
2877		lba = get_unaligned_be64(cmd + 2);
2878		num = get_unaligned_be32(cmd + 10);
2879		check_prot = true;
2880		break;
2881	case WRITE_10:
2882		ei_lba = 0;
2883		lba = get_unaligned_be32(cmd + 2);
2884		num = get_unaligned_be16(cmd + 7);
2885		check_prot = true;
2886		break;
2887	case WRITE_6:
2888		ei_lba = 0;
2889		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2890		      (u32)(cmd[1] & 0x1f) << 16;
2891		num = (0 == cmd[4]) ? 256 : cmd[4];
2892		check_prot = true;
2893		break;
2894	case WRITE_12:
2895		ei_lba = 0;
2896		lba = get_unaligned_be32(cmd + 2);
2897		num = get_unaligned_be32(cmd + 6);
2898		check_prot = true;
2899		break;
2900	case 0x53:	/* XDWRITEREAD(10) */
2901		ei_lba = 0;
2902		lba = get_unaligned_be32(cmd + 2);
2903		num = get_unaligned_be16(cmd + 7);
2904		check_prot = false;
2905		break;
2906	default:	/* assume WRITE(32) */
2907		lba = get_unaligned_be64(cmd + 12);
2908		ei_lba = get_unaligned_be32(cmd + 20);
2909		num = get_unaligned_be32(cmd + 28);
2910		check_prot = false;
2911		break;
2912	}
2913	if (check_prot) {
2914		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2915		    (cmd[1] & 0xe0)) {
2916			mk_sense_invalid_opcode(scp);
2917			return check_condition_result;
2918		}
2919		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2920		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2921		    (cmd[1] & 0xe0) == 0)
2922			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2923				    "to DIF device\n");
2924	}
2925
2926	/* inline check_device_access_params() */
2927	if (lba + num > sdebug_capacity) {
2928		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2929		return check_condition_result;
2930	}
2931	/* transfer length excessive (tie in to block limits VPD page) */
2932	if (num > sdebug_store_sectors) {
2933		/* needs work to find which cdb byte 'num' comes from */
2934		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2935		return check_condition_result;
2936	}
2937
2938	write_lock_irqsave(&atomic_rw, iflags);
 
 
2939
2940	/* DIX + T10 DIF */
2941	if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2942		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2943
2944		if (prot_ret) {
2945			write_unlock_irqrestore(&atomic_rw, iflags);
2946			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2947			return illegal_condition_result;
2948		}
2949	}
2950
2951	ret = do_device_access(scp, lba, num, true);
2952	if (scsi_debug_lbp())
 
2953		map_region(lba, num);
2954	write_unlock_irqrestore(&atomic_rw, iflags);
2955	if (-1 == ret)
2956		return (DID_ERROR << 16);
2957	else if ((ret < (num * scsi_debug_sector_size)) &&
2958		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2959		sdev_printk(KERN_INFO, scp->device,
2960			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2961			    my_name, num * scsi_debug_sector_size, ret);
2962
2963	if (sdebug_any_injecting_opt) {
2964		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2965
2966		if (ep->inj_recovered) {
2967			mk_sense_buffer(scp, RECOVERED_ERROR,
2968					THRESHOLD_EXCEEDED, 0);
2969			return check_condition_result;
2970		} else if (ep->inj_dif) {
2971			/* Logical block guard check failed */
2972			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2973			return illegal_condition_result;
2974		} else if (ep->inj_dix) {
2975			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2976			return illegal_condition_result;
2977		}
2978	}
2979	return 0;
2980}
2981
2982static int
2983resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2984		bool unmap, bool ndob)
2985{
2986	unsigned long iflags;
2987	unsigned long long i;
2988	int ret;
2989
2990	ret = check_device_access_params(scp, lba, num);
2991	if (ret)
2992		return ret;
2993
 
 
 
 
 
 
2994	write_lock_irqsave(&atomic_rw, iflags);
2995
2996	if (unmap && scsi_debug_lbp()) {
2997		unmap_region(lba, num);
2998		goto out;
2999	}
3000
3001	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3002	if (ndob) {
3003		memset(fake_storep + (lba * scsi_debug_sector_size), 0,
3004		       scsi_debug_sector_size);
3005		ret = 0;
3006	} else
3007		ret = fetch_to_dev_buffer(scp, fake_storep +
3008					       (lba * scsi_debug_sector_size),
3009					  scsi_debug_sector_size);
3010
3011	if (-1 == ret) {
3012		write_unlock_irqrestore(&atomic_rw, iflags);
3013		return (DID_ERROR << 16);
3014	} else if ((ret < (num * scsi_debug_sector_size)) &&
3015		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3016		sdev_printk(KERN_INFO, scp->device,
3017			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3018			    my_name, "write same",
3019			    num * scsi_debug_sector_size, ret);
3020
3021	/* Copy first sector to remaining blocks */
3022	for (i = 1 ; i < num ; i++)
3023		memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
3024		       fake_storep + (lba * scsi_debug_sector_size),
3025		       scsi_debug_sector_size);
3026
3027	if (scsi_debug_lbp())
3028		map_region(lba, num);
3029out:
3030	write_unlock_irqrestore(&atomic_rw, iflags);
3031
3032	return 0;
3033}
3034
3035static int
3036resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3037{
3038	u8 *cmd = scp->cmnd;
3039	u32 lba;
3040	u16 num;
3041	u32 ei_lba = 0;
3042	bool unmap = false;
3043
3044	if (cmd[1] & 0x8) {
3045		if (scsi_debug_lbpws10 == 0) {
3046			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3047			return check_condition_result;
3048		} else
3049			unmap = true;
3050	}
3051	lba = get_unaligned_be32(cmd + 2);
3052	num = get_unaligned_be16(cmd + 7);
3053	if (num > scsi_debug_write_same_length) {
3054		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3055		return check_condition_result;
3056	}
3057	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3058}
3059
3060static int
3061resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3062{
3063	u8 *cmd = scp->cmnd;
3064	u64 lba;
3065	u32 num;
3066	u32 ei_lba = 0;
3067	bool unmap = false;
3068	bool ndob = false;
3069
3070	if (cmd[1] & 0x8) {	/* UNMAP */
3071		if (scsi_debug_lbpws == 0) {
3072			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3073			return check_condition_result;
3074		} else
3075			unmap = true;
3076	}
3077	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3078		ndob = true;
3079	lba = get_unaligned_be64(cmd + 2);
3080	num = get_unaligned_be32(cmd + 10);
3081	if (num > scsi_debug_write_same_length) {
3082		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3083		return check_condition_result;
3084	}
3085	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3086}
3087
3088/* Note the mode field is in the same position as the (lower) service action
3089 * field. For the Report supported operation codes command, SPC-4 suggests
3090 * each mode of this command should be reported separately; for future. */
3091static int
3092resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3093{
3094	u8 *cmd = scp->cmnd;
3095	struct scsi_device *sdp = scp->device;
3096	struct sdebug_dev_info *dp;
3097	u8 mode;
3098
3099	mode = cmd[1] & 0x1f;
3100	switch (mode) {
3101	case 0x4:	/* download microcode (MC) and activate (ACT) */
3102		/* set UAs on this device only */
3103		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3104		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3105		break;
3106	case 0x5:	/* download MC, save and ACT */
3107		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3108		break;
3109	case 0x6:	/* download MC with offsets and ACT */
3110		/* set UAs on most devices (LUs) in this target */
3111		list_for_each_entry(dp,
3112				    &devip->sdbg_host->dev_info_list,
3113				    dev_list)
3114			if (dp->target == sdp->id) {
3115				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3116				if (devip != dp)
3117					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3118						dp->uas_bm);
3119			}
3120		break;
3121	case 0x7:	/* download MC with offsets, save, and ACT */
3122		/* set UA on all devices (LUs) in this target */
3123		list_for_each_entry(dp,
3124				    &devip->sdbg_host->dev_info_list,
3125				    dev_list)
3126			if (dp->target == sdp->id)
3127				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3128					dp->uas_bm);
3129		break;
3130	default:
3131		/* do nothing for this command for other mode values */
3132		break;
3133	}
3134	return 0;
3135}
3136
3137static int
3138resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3139{
3140	u8 *cmd = scp->cmnd;
3141	u8 *arr;
3142	u8 *fake_storep_hold;
3143	u64 lba;
3144	u32 dnum;
3145	u32 lb_size = scsi_debug_sector_size;
3146	u8 num;
3147	unsigned long iflags;
3148	int ret;
3149	int retval = 0;
3150
3151	lba = get_unaligned_be64(cmd + 2);
3152	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3153	if (0 == num)
3154		return 0;	/* degenerate case, not an error */
3155	if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3156	    (cmd[1] & 0xe0)) {
3157		mk_sense_invalid_opcode(scp);
3158		return check_condition_result;
3159	}
3160	if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3161	     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3162	    (cmd[1] & 0xe0) == 0)
3163		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3164			    "to DIF device\n");
3165
3166	/* inline check_device_access_params() */
3167	if (lba + num > sdebug_capacity) {
3168		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3169		return check_condition_result;
3170	}
3171	/* transfer length excessive (tie in to block limits VPD page) */
3172	if (num > sdebug_store_sectors) {
3173		/* needs work to find which cdb byte 'num' comes from */
3174		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3175		return check_condition_result;
3176	}
3177	dnum = 2 * num;
3178	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3179	if (NULL == arr) {
3180		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3181				INSUFF_RES_ASCQ);
3182		return check_condition_result;
3183	}
3184
3185	write_lock_irqsave(&atomic_rw, iflags);
3186
3187	/* trick do_device_access() to fetch both compare and write buffers
3188	 * from data-in into arr. Safe (atomic) since write_lock held. */
3189	fake_storep_hold = fake_storep;
3190	fake_storep = arr;
3191	ret = do_device_access(scp, 0, dnum, true);
3192	fake_storep = fake_storep_hold;
3193	if (ret == -1) {
3194		retval = DID_ERROR << 16;
3195		goto cleanup;
3196	} else if ((ret < (dnum * lb_size)) &&
3197		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3198		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3199			    "indicated=%u, IO sent=%d bytes\n", my_name,
3200			    dnum * lb_size, ret);
3201	if (!comp_write_worker(lba, num, arr)) {
3202		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3203		retval = check_condition_result;
3204		goto cleanup;
3205	}
3206	if (scsi_debug_lbp())
3207		map_region(lba, num);
3208cleanup:
3209	write_unlock_irqrestore(&atomic_rw, iflags);
3210	kfree(arr);
3211	return retval;
3212}
3213
3214struct unmap_block_desc {
3215	__be64	lba;
3216	__be32	blocks;
3217	__be32	__reserved;
3218};
3219
3220static int
3221resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3222{
3223	unsigned char *buf;
3224	struct unmap_block_desc *desc;
3225	unsigned int i, payload_len, descriptors;
3226	int ret;
3227	unsigned long iflags;
3228
 
 
 
3229
3230	if (!scsi_debug_lbp())
3231		return 0;	/* fib and say its done */
3232	payload_len = get_unaligned_be16(scp->cmnd + 7);
3233	BUG_ON(scsi_bufflen(scp) != payload_len);
3234
3235	descriptors = (payload_len - 8) / 16;
3236	if (descriptors > scsi_debug_unmap_max_desc) {
3237		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3238		return check_condition_result;
3239	}
3240
3241	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3242	if (!buf) {
3243		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3244				INSUFF_RES_ASCQ);
3245		return check_condition_result;
3246	}
3247
3248	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3249
3250	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3251	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3252
3253	desc = (void *)&buf[8];
3254
3255	write_lock_irqsave(&atomic_rw, iflags);
3256
3257	for (i = 0 ; i < descriptors ; i++) {
3258		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3259		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3260
3261		ret = check_device_access_params(scp, lba, num);
3262		if (ret)
3263			goto out;
3264
3265		unmap_region(lba, num);
3266	}
3267
3268	ret = 0;
3269
3270out:
3271	write_unlock_irqrestore(&atomic_rw, iflags);
3272	kfree(buf);
3273
3274	return ret;
3275}
3276
3277#define SDEBUG_GET_LBA_STATUS_LEN 32
3278
3279static int
3280resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3281{
3282	u8 *cmd = scp->cmnd;
3283	u64 lba;
3284	u32 alloc_len, mapped, num;
3285	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3286	int ret;
3287
3288	lba = get_unaligned_be64(cmd + 2);
3289	alloc_len = get_unaligned_be32(cmd + 10);
 
 
 
 
3290
3291	if (alloc_len < 24)
3292		return 0;
3293
3294	ret = check_device_access_params(scp, lba, 1);
3295	if (ret)
3296		return ret;
3297
3298	if (scsi_debug_lbp())
3299		mapped = map_state(lba, &num);
3300	else {
3301		mapped = 1;
3302		/* following just in case virtual_gb changed */
3303		sdebug_capacity = get_sdebug_capacity();
3304		if (sdebug_capacity - lba <= 0xffffffff)
3305			num = sdebug_capacity - lba;
3306		else
3307			num = 0xffffffff;
3308	}
3309
3310	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3311	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3312	put_unaligned_be64(lba, arr + 8);	/* LBA */
3313	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3314	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3315
3316	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3317}
3318
3319#define SDEBUG_RLUN_ARR_SZ 256
3320
3321static int resp_report_luns(struct scsi_cmnd * scp,
3322			    struct sdebug_dev_info * devip)
3323{
3324	unsigned int alloc_len;
3325	int lun_cnt, i, upper, num, n, want_wlun, shortish;
3326	u64 lun;
3327	unsigned char *cmd = scp->cmnd;
3328	int select_report = (int)cmd[2];
3329	struct scsi_lun *one_lun;
3330	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3331	unsigned char * max_addr;
3332
3333	clear_luns_changed_on_target(devip);
3334	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3335	shortish = (alloc_len < 4);
3336	if (shortish || (select_report > 2)) {
3337		mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3338		return check_condition_result;
3339	}
3340	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
3341	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3342	lun_cnt = scsi_debug_max_luns;
3343	if (1 == select_report)
3344		lun_cnt = 0;
3345	else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
3346		--lun_cnt;
3347	want_wlun = (select_report > 0) ? 1 : 0;
3348	num = lun_cnt + want_wlun;
3349	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3350	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3351	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3352			    sizeof(struct scsi_lun)), num);
3353	if (n < num) {
3354		want_wlun = 0;
3355		lun_cnt = n;
3356	}
3357	one_lun = (struct scsi_lun *) &arr[8];
3358	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3359	for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
3360             ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3361	     i++, lun++) {
3362		upper = (lun >> 8) & 0x3f;
3363		if (upper)
3364			one_lun[i].scsi_lun[0] =
3365			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3366		one_lun[i].scsi_lun[1] = lun & 0xff;
3367	}
3368	if (want_wlun) {
3369		one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
3370		one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
3371		i++;
3372	}
3373	alloc_len = (unsigned char *)(one_lun + i) - arr;
3374	return fill_from_dev_buffer(scp, arr,
3375				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3376}
3377
3378static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3379			    unsigned int num, struct sdebug_dev_info *devip)
3380{
3381	int j;
3382	unsigned char *kaddr, *buf;
3383	unsigned int offset;
 
3384	struct scsi_data_buffer *sdb = scsi_in(scp);
3385	struct sg_mapping_iter miter;
3386
3387	/* better not to use temporary buffer. */
3388	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3389	if (!buf) {
3390		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3391				INSUFF_RES_ASCQ);
3392		return check_condition_result;
3393	}
3394
3395	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3396
3397	offset = 0;
3398	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3399			SG_MITER_ATOMIC | SG_MITER_TO_SG);
 
 
3400
3401	while (sg_miter_next(&miter)) {
3402		kaddr = miter.addr;
3403		for (j = 0; j < miter.length; j++)
3404			*(kaddr + j) ^= *(buf + offset + j);
3405
3406		offset += miter.length;
 
3407	}
3408	sg_miter_stop(&miter);
 
3409	kfree(buf);
3410
3411	return 0;
3412}
3413
3414static int
3415resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3416{
3417	u8 *cmd = scp->cmnd;
3418	u64 lba;
3419	u32 num;
3420	int errsts;
3421
3422	if (!scsi_bidi_cmnd(scp)) {
3423		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3424				INSUFF_RES_ASCQ);
3425		return check_condition_result;
3426	}
3427	errsts = resp_read_dt0(scp, devip);
3428	if (errsts)
3429		return errsts;
3430	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3431		errsts = resp_write_dt0(scp, devip);
3432		if (errsts)
3433			return errsts;
3434	}
3435	lba = get_unaligned_be32(cmd + 2);
3436	num = get_unaligned_be16(cmd + 7);
3437	return resp_xdwriteread(scp, lba, num, devip);
3438}
3439
3440/* When timer or tasklet goes off this function is called. */
3441static void sdebug_q_cmd_complete(unsigned long indx)
3442{
3443	int qa_indx;
3444	int retiring = 0;
3445	unsigned long iflags;
3446	struct sdebug_queued_cmd *sqcp;
3447	struct scsi_cmnd *scp;
3448	struct sdebug_dev_info *devip;
3449
3450	atomic_inc(&sdebug_completions);
3451	qa_indx = indx;
3452	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3453		pr_err("wild qa_indx=%d\n", qa_indx);
3454		return;
3455	}
3456	spin_lock_irqsave(&queued_arr_lock, iflags);
3457	sqcp = &queued_arr[qa_indx];
3458	scp = sqcp->a_cmnd;
3459	if (NULL == scp) {
 
3460		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3461		pr_err("scp is NULL\n");
3462		return;
3463	}
3464	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3465	if (devip)
3466		atomic_dec(&devip->num_in_q);
3467	else
3468		pr_err("devip=NULL\n");
3469	if (atomic_read(&retired_max_queue) > 0)
3470		retiring = 1;
3471
3472	sqcp->a_cmnd = NULL;
3473	if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3474		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3475		pr_err("Unexpected completion\n");
3476		return;
3477	}
3478
3479	if (unlikely(retiring)) {	/* user has reduced max_queue */
3480		int k, retval;
3481
3482		retval = atomic_read(&retired_max_queue);
3483		if (qa_indx >= retval) {
3484			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3485			pr_err("index %d too large\n", retval);
3486			return;
3487		}
3488		k = find_last_bit(queued_in_use_bm, retval);
3489		if ((k < scsi_debug_max_queue) || (k == retval))
3490			atomic_set(&retired_max_queue, 0);
3491		else
3492			atomic_set(&retired_max_queue, k + 1);
3493	}
 
3494	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3495	scp->scsi_done(scp); /* callback to mid level */
3496}
3497
3498/* When high resolution timer goes off this function is called. */
3499static enum hrtimer_restart
3500sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3501{
3502	int qa_indx;
3503	int retiring = 0;
3504	unsigned long iflags;
3505	struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3506	struct sdebug_queued_cmd *sqcp;
3507	struct scsi_cmnd *scp;
3508	struct sdebug_dev_info *devip;
3509
3510	atomic_inc(&sdebug_completions);
3511	qa_indx = sd_hrtp->qa_indx;
3512	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3513		pr_err("wild qa_indx=%d\n", qa_indx);
3514		goto the_end;
3515	}
3516	spin_lock_irqsave(&queued_arr_lock, iflags);
3517	sqcp = &queued_arr[qa_indx];
3518	scp = sqcp->a_cmnd;
3519	if (NULL == scp) {
3520		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3521		pr_err("scp is NULL\n");
3522		goto the_end;
3523	}
3524	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3525	if (devip)
3526		atomic_dec(&devip->num_in_q);
3527	else
3528		pr_err("devip=NULL\n");
3529	if (atomic_read(&retired_max_queue) > 0)
3530		retiring = 1;
3531
3532	sqcp->a_cmnd = NULL;
3533	if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3534		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3535		pr_err("Unexpected completion\n");
3536		goto the_end;
3537	}
3538
3539	if (unlikely(retiring)) {	/* user has reduced max_queue */
3540		int k, retval;
3541
3542		retval = atomic_read(&retired_max_queue);
3543		if (qa_indx >= retval) {
3544			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3545			pr_err("index %d too large\n", retval);
3546			goto the_end;
3547		}
3548		k = find_last_bit(queued_in_use_bm, retval);
3549		if ((k < scsi_debug_max_queue) || (k == retval))
3550			atomic_set(&retired_max_queue, 0);
3551		else
3552			atomic_set(&retired_max_queue, k + 1);
3553	}
3554	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3555	scp->scsi_done(scp); /* callback to mid level */
3556the_end:
3557	return HRTIMER_NORESTART;
3558}
3559
3560static struct sdebug_dev_info *
3561sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3562{
3563	struct sdebug_dev_info *devip;
3564
3565	devip = kzalloc(sizeof(*devip), flags);
3566	if (devip) {
3567		devip->sdbg_host = sdbg_host;
3568		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3569	}
3570	return devip;
3571}
3572
3573static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3574{
3575	struct sdebug_host_info * sdbg_host;
3576	struct sdebug_dev_info * open_devip = NULL;
3577	struct sdebug_dev_info * devip =
3578			(struct sdebug_dev_info *)sdev->hostdata;
3579
3580	if (devip)
3581		return devip;
3582	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3583	if (!sdbg_host) {
3584		pr_err("Host info NULL\n");
3585		return NULL;
3586        }
3587	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3588		if ((devip->used) && (devip->channel == sdev->channel) &&
3589                    (devip->target == sdev->id) &&
3590                    (devip->lun == sdev->lun))
3591                        return devip;
3592		else {
3593			if ((!devip->used) && (!open_devip))
3594				open_devip = devip;
3595		}
3596	}
3597	if (!open_devip) { /* try and make a new one */
3598		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3599		if (!open_devip) {
3600			pr_err("out of memory at line %d\n", __LINE__);
 
3601			return NULL;
3602		}
3603	}
3604
3605	open_devip->channel = sdev->channel;
3606	open_devip->target = sdev->id;
3607	open_devip->lun = sdev->lun;
3608	open_devip->sdbg_host = sdbg_host;
3609	atomic_set(&open_devip->num_in_q, 0);
3610	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3611	open_devip->used = true;
 
 
 
 
 
 
 
 
 
3612	return open_devip;
3613}
3614
3615static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3616{
3617	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3618		pr_info("slave_alloc <%u %u %u %llu>\n",
3619		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3620	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3621	return 0;
3622}
3623
3624static int scsi_debug_slave_configure(struct scsi_device *sdp)
3625{
3626	struct sdebug_dev_info *devip;
3627
3628	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3629		pr_info("slave_configure <%u %u %u %llu>\n",
3630		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3631	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3632		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3633	devip = devInfoReg(sdp);
3634	if (NULL == devip)
3635		return 1;	/* no resources, will be marked offline */
3636	sdp->hostdata = devip;
3637	blk_queue_max_segment_size(sdp->request_queue, -1U);
 
 
 
3638	if (scsi_debug_no_uld)
3639		sdp->no_uld_attach = 1;
3640	return 0;
3641}
3642
3643static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3644{
3645	struct sdebug_dev_info *devip =
3646		(struct sdebug_dev_info *)sdp->hostdata;
3647
3648	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3649		pr_info("slave_destroy <%u %u %u %llu>\n",
3650		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3651	if (devip) {
3652		/* make this slot available for re-use */
3653		devip->used = false;
3654		sdp->hostdata = NULL;
3655	}
3656}
3657
3658/* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3659static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3660{
3661	unsigned long iflags;
3662	int k, qmax, r_qmax;
3663	struct sdebug_queued_cmd *sqcp;
3664	struct sdebug_dev_info *devip;
3665
3666	spin_lock_irqsave(&queued_arr_lock, iflags);
3667	qmax = scsi_debug_max_queue;
3668	r_qmax = atomic_read(&retired_max_queue);
3669	if (r_qmax > qmax)
3670		qmax = r_qmax;
3671	for (k = 0; k < qmax; ++k) {
3672		if (test_bit(k, queued_in_use_bm)) {
3673			sqcp = &queued_arr[k];
3674			if (cmnd == sqcp->a_cmnd) {
3675				devip = (struct sdebug_dev_info *)
3676					cmnd->device->hostdata;
3677				if (devip)
3678					atomic_dec(&devip->num_in_q);
3679				sqcp->a_cmnd = NULL;
3680				spin_unlock_irqrestore(&queued_arr_lock,
3681						       iflags);
3682				if (scsi_debug_ndelay > 0) {
3683					if (sqcp->sd_hrtp)
3684						hrtimer_cancel(
3685							&sqcp->sd_hrtp->hrt);
3686				} else if (scsi_debug_delay > 0) {
3687					if (sqcp->cmnd_timerp)
3688						del_timer_sync(
3689							sqcp->cmnd_timerp);
3690				} else if (scsi_debug_delay < 0) {
3691					if (sqcp->tletp)
3692						tasklet_kill(sqcp->tletp);
3693				}
3694				clear_bit(k, queued_in_use_bm);
3695				return 1;
3696			}
3697		}
3698	}
3699	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3700	return 0;
3701}
3702
3703/* Deletes (stops) timers or tasklets of all queued commands */
3704static void stop_all_queued(void)
3705{
3706	unsigned long iflags;
3707	int k;
3708	struct sdebug_queued_cmd *sqcp;
3709	struct sdebug_dev_info *devip;
3710
3711	spin_lock_irqsave(&queued_arr_lock, iflags);
3712	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3713		if (test_bit(k, queued_in_use_bm)) {
3714			sqcp = &queued_arr[k];
3715			if (sqcp->a_cmnd) {
3716				devip = (struct sdebug_dev_info *)
3717					sqcp->a_cmnd->device->hostdata;
3718				if (devip)
3719					atomic_dec(&devip->num_in_q);
3720				sqcp->a_cmnd = NULL;
3721				spin_unlock_irqrestore(&queued_arr_lock,
3722						       iflags);
3723				if (scsi_debug_ndelay > 0) {
3724					if (sqcp->sd_hrtp)
3725						hrtimer_cancel(
3726							&sqcp->sd_hrtp->hrt);
3727				} else if (scsi_debug_delay > 0) {
3728					if (sqcp->cmnd_timerp)
3729						del_timer_sync(
3730							sqcp->cmnd_timerp);
3731				} else if (scsi_debug_delay < 0) {
3732					if (sqcp->tletp)
3733						tasklet_kill(sqcp->tletp);
3734				}
3735				clear_bit(k, queued_in_use_bm);
3736				spin_lock_irqsave(&queued_arr_lock, iflags);
3737			}
3738		}
3739	}
3740	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3741}
3742
3743/* Free queued command memory on heap */
3744static void free_all_queued(void)
3745{
3746	unsigned long iflags;
3747	int k;
3748	struct sdebug_queued_cmd *sqcp;
3749
3750	spin_lock_irqsave(&queued_arr_lock, iflags);
3751	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3752		sqcp = &queued_arr[k];
3753		kfree(sqcp->cmnd_timerp);
3754		sqcp->cmnd_timerp = NULL;
3755		kfree(sqcp->tletp);
3756		sqcp->tletp = NULL;
3757		kfree(sqcp->sd_hrtp);
3758		sqcp->sd_hrtp = NULL;
3759	}
3760	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3761}
3762
3763static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
 
3764{
3765	++num_aborts;
3766	if (SCpnt) {
3767		if (SCpnt->device &&
3768		    (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3769			sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3770				    __func__);
3771		stop_queued_cmnd(SCpnt);
3772	}
3773	return SUCCESS;
 
 
 
 
 
 
 
 
3774}
3775
3776static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3777{
3778	struct sdebug_dev_info * devip;
3779
 
 
3780	++num_dev_resets;
3781	if (SCpnt && SCpnt->device) {
3782		struct scsi_device *sdp = SCpnt->device;
3783
3784		if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3785			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3786		devip = devInfoReg(sdp);
3787		if (devip)
3788			set_bit(SDEBUG_UA_POR, devip->uas_bm);
3789	}
3790	return SUCCESS;
3791}
3792
3793static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3794{
3795	struct sdebug_host_info *sdbg_host;
3796	struct sdebug_dev_info *devip;
3797	struct scsi_device *sdp;
3798	struct Scsi_Host *hp;
3799	int k = 0;
3800
3801	++num_target_resets;
3802	if (!SCpnt)
3803		goto lie;
3804	sdp = SCpnt->device;
3805	if (!sdp)
3806		goto lie;
3807	if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3808		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3809	hp = sdp->host;
3810	if (!hp)
3811		goto lie;
3812	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3813	if (sdbg_host) {
3814		list_for_each_entry(devip,
3815				    &sdbg_host->dev_info_list,
3816				    dev_list)
3817			if (devip->target == sdp->id) {
3818				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3819				++k;
3820			}
3821	}
3822	if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3823		sdev_printk(KERN_INFO, sdp,
3824			    "%s: %d device(s) found in target\n", __func__, k);
3825lie:
3826	return SUCCESS;
3827}
3828
3829static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3830{
3831	struct sdebug_host_info *sdbg_host;
3832	struct sdebug_dev_info *devip;
3833        struct scsi_device * sdp;
3834        struct Scsi_Host * hp;
3835	int k = 0;
3836
 
 
3837	++num_bus_resets;
3838	if (!(SCpnt && SCpnt->device))
3839		goto lie;
3840	sdp = SCpnt->device;
3841	if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3842		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3843	hp = sdp->host;
3844	if (hp) {
3845		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3846		if (sdbg_host) {
3847			list_for_each_entry(devip,
3848                                            &sdbg_host->dev_info_list,
3849					    dev_list) {
3850				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3851				++k;
3852			}
3853		}
3854	}
3855	if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3856		sdev_printk(KERN_INFO, sdp,
3857			    "%s: %d device(s) found in host\n", __func__, k);
3858lie:
3859	return SUCCESS;
3860}
3861
3862static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3863{
3864	struct sdebug_host_info * sdbg_host;
3865	struct sdebug_dev_info *devip;
3866	int k = 0;
3867
 
 
3868	++num_host_resets;
3869	if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3870		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3871        spin_lock(&sdebug_host_list_lock);
3872        list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3873		list_for_each_entry(devip, &sdbg_host->dev_info_list,
3874				    dev_list) {
3875			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3876			++k;
3877		}
3878        }
3879        spin_unlock(&sdebug_host_list_lock);
3880	stop_all_queued();
3881	if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3882		sdev_printk(KERN_INFO, SCpnt->device,
3883			    "%s: %d device(s) found\n", __func__, k);
3884	return SUCCESS;
3885}
3886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3887static void __init sdebug_build_parts(unsigned char *ramp,
3888				      unsigned long store_size)
3889{
3890	struct partition * pp;
3891	int starts[SDEBUG_MAX_PARTS + 2];
3892	int sectors_per_part, num_sectors, k;
3893	int heads_by_sects, start_sec, end_sec;
3894
3895	/* assume partition table already zeroed */
3896	if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3897		return;
3898	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3899		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3900		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
 
3901	}
3902	num_sectors = (int)sdebug_store_sectors;
3903	sectors_per_part = (num_sectors - sdebug_sectors_per)
3904			   / scsi_debug_num_parts;
3905	heads_by_sects = sdebug_heads * sdebug_sectors_per;
3906        starts[0] = sdebug_sectors_per;
3907	for (k = 1; k < scsi_debug_num_parts; ++k)
3908		starts[k] = ((k * sectors_per_part) / heads_by_sects)
3909			    * heads_by_sects;
3910	starts[scsi_debug_num_parts] = num_sectors;
3911	starts[scsi_debug_num_parts + 1] = 0;
3912
3913	ramp[510] = 0x55;	/* magic partition markings */
3914	ramp[511] = 0xAA;
3915	pp = (struct partition *)(ramp + 0x1be);
3916	for (k = 0; starts[k + 1]; ++k, ++pp) {
3917		start_sec = starts[k];
3918		end_sec = starts[k + 1] - 1;
3919		pp->boot_ind = 0;
3920
3921		pp->cyl = start_sec / heads_by_sects;
3922		pp->head = (start_sec - (pp->cyl * heads_by_sects))
3923			   / sdebug_sectors_per;
3924		pp->sector = (start_sec % sdebug_sectors_per) + 1;
3925
3926		pp->end_cyl = end_sec / heads_by_sects;
3927		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3928			       / sdebug_sectors_per;
3929		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3930
3931		pp->start_sect = cpu_to_le32(start_sec);
3932		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3933		pp->sys_ind = 0x83;	/* plain Linux partition */
3934	}
3935}
3936
3937static int
3938schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3939	      int scsi_result, int delta_jiff)
3940{
3941	unsigned long iflags;
3942	int k, num_in_q, qdepth, inject;
3943	struct sdebug_queued_cmd *sqcp = NULL;
3944	struct scsi_device *sdp;
3945
3946	/* this should never happen */
3947	if (WARN_ON(!cmnd))
3948		return SCSI_MLQUEUE_HOST_BUSY;
3949
3950	if (NULL == devip) {
3951		pr_warn("called devip == NULL\n");
3952		/* no particularly good error to report back */
3953		return SCSI_MLQUEUE_HOST_BUSY;
3954	}
3955
3956	sdp = cmnd->device;
3957
3958	if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3959		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3960			    __func__, scsi_result);
3961	if (delta_jiff == 0)
3962		goto respond_in_thread;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3963
3964	/* schedule the response at a later time if resources permit */
3965	spin_lock_irqsave(&queued_arr_lock, iflags);
3966	num_in_q = atomic_read(&devip->num_in_q);
3967	qdepth = cmnd->device->queue_depth;
3968	inject = 0;
3969	if ((qdepth > 0) && (num_in_q >= qdepth)) {
3970		if (scsi_result) {
3971			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3972			goto respond_in_thread;
3973		} else
3974			scsi_result = device_qfull_result;
3975	} else if ((scsi_debug_every_nth != 0) &&
3976		   (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3977		   (scsi_result == 0)) {
3978		if ((num_in_q == (qdepth - 1)) &&
3979		    (atomic_inc_return(&sdebug_a_tsf) >=
3980		     abs(scsi_debug_every_nth))) {
3981			atomic_set(&sdebug_a_tsf, 0);
3982			inject = 1;
3983			scsi_result = device_qfull_result;
3984		}
3985	}
3986
3987	k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3988	if (k >= scsi_debug_max_queue) {
 
 
 
 
3989		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3990		if (scsi_result)
3991			goto respond_in_thread;
3992		else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3993			scsi_result = device_qfull_result;
3994		if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3995			sdev_printk(KERN_INFO, sdp,
3996				    "%s: max_queue=%d exceeded, %s\n",
3997				    __func__, scsi_debug_max_queue,
3998				    (scsi_result ?  "status: TASK SET FULL" :
3999						    "report: host busy"));
4000		if (scsi_result)
4001			goto respond_in_thread;
4002		else
4003			return SCSI_MLQUEUE_HOST_BUSY;
4004	}
4005	__set_bit(k, queued_in_use_bm);
4006	atomic_inc(&devip->num_in_q);
4007	sqcp = &queued_arr[k];
4008	sqcp->a_cmnd = cmnd;
4009	cmnd->result = scsi_result;
4010	spin_unlock_irqrestore(&queued_arr_lock, iflags);
4011	if (delta_jiff > 0) {
4012		if (NULL == sqcp->cmnd_timerp) {
4013			sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
4014						    GFP_ATOMIC);
4015			if (NULL == sqcp->cmnd_timerp)
4016				return SCSI_MLQUEUE_HOST_BUSY;
4017			init_timer(sqcp->cmnd_timerp);
4018		}
4019		sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
4020		sqcp->cmnd_timerp->data = k;
4021		sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
4022		add_timer(sqcp->cmnd_timerp);
4023	} else if (scsi_debug_ndelay > 0) {
4024		ktime_t kt = ktime_set(0, scsi_debug_ndelay);
4025		struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
4026
4027		if (NULL == sd_hp) {
4028			sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
4029			if (NULL == sd_hp)
4030				return SCSI_MLQUEUE_HOST_BUSY;
4031			sqcp->sd_hrtp = sd_hp;
4032			hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
4033				     HRTIMER_MODE_REL);
4034			sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
4035			sd_hp->qa_indx = k;
4036		}
4037		hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
4038	} else {	/* delay < 0 */
4039		if (NULL == sqcp->tletp) {
4040			sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
4041					      GFP_ATOMIC);
4042			if (NULL == sqcp->tletp)
4043				return SCSI_MLQUEUE_HOST_BUSY;
4044			tasklet_init(sqcp->tletp,
4045				     sdebug_q_cmd_complete, k);
4046		}
4047		if (-1 == delta_jiff)
4048			tasklet_hi_schedule(sqcp->tletp);
4049		else
4050			tasklet_schedule(sqcp->tletp);
4051	}
4052	if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
4053	    (scsi_result == device_qfull_result))
4054		sdev_printk(KERN_INFO, sdp,
4055			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4056			    num_in_q, (inject ? "<inject> " : ""),
4057			    "status: TASK SET FULL");
4058	return 0;
4059
4060respond_in_thread:	/* call back to mid-layer using invocation thread */
4061	cmnd->result = scsi_result;
4062	cmnd->scsi_done(cmnd);
4063	return 0;
4064}
4065
4066/* Note: The following macros create attribute files in the
4067   /sys/module/scsi_debug/parameters directory. Unfortunately this
4068   driver is unaware of a change and cannot trigger auxiliary actions
4069   as it can when the corresponding attribute in the
4070   /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4071 */
4072module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
4073module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
4074module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
4075module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
4076module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
4077module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
4078module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
4079module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
4080module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
4081module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
4082module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
4083module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
4084module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
4085module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
4086module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
4087module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
4088module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
4089module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
4090module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
4091module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
4092module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
4093module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
4094module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
4095module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
4096module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
4097module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
4098module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
4099module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
4100module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
4101module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
4102module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
4103module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
4104module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
4105module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
4106module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
4107module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
4108module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
4109module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
4110		   S_IRUGO | S_IWUSR);
4111module_param_named(write_same_length, scsi_debug_write_same_length, int,
4112		   S_IRUGO | S_IWUSR);
4113
4114MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4115MODULE_DESCRIPTION("SCSI debug adapter driver");
4116MODULE_LICENSE("GPL");
4117MODULE_VERSION(SCSI_DEBUG_VERSION);
4118
4119MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4120MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4121MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4122MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4123MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4124MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4125MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4126MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4127MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4128MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4129MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4130MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4131MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4132MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4133MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4134MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4135MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4136MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4137MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4138MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4139MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4140MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4141MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4142MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4143MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4144MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4145MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4146MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4147MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4148MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4149MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4150MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4151MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4152MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4153MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4154MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4155MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4156MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4157MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4158
4159static char sdebug_info[256];
4160
4161static const char * scsi_debug_info(struct Scsi_Host * shp)
4162{
4163	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4164		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4165		scsi_debug_version_date, scsi_debug_dev_size_mb,
4166		scsi_debug_opts);
4167	return sdebug_info;
4168}
4169
4170/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4171static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4172{
4173	char arr[16];
4174	int opts;
4175	int minLen = length > 15 ? 15 : length;
4176
4177	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4178		return -EACCES;
4179	memcpy(arr, buffer, minLen);
4180	arr[minLen] = '\0';
4181	if (1 != sscanf(arr, "%d", &opts))
4182		return -EINVAL;
4183	scsi_debug_opts = opts;
4184	if (scsi_debug_every_nth != 0)
4185		atomic_set(&sdebug_cmnd_count, 0);
4186	return length;
4187}
4188
4189/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4190 * same for each scsi_debug host (if more than one). Some of the counters
4191 * output are not atomics so might be inaccurate in a busy system. */
4192static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4193{
4194	int f, l;
4195	char b[32];
4196
4197	if (scsi_debug_every_nth > 0)
4198		snprintf(b, sizeof(b), " (curr:%d)",
4199			 ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
4200				atomic_read(&sdebug_a_tsf) :
4201				atomic_read(&sdebug_cmnd_count)));
4202	else
4203		b[0] = '\0';
4204
4205	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4206		"num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4207		"every_nth=%d%s\n"
4208		"delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4209		"sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4210		"command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4211		"host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4212		"usec_in_jiffy=%lu\n",
4213		SCSI_DEBUG_VERSION, scsi_debug_version_date,
4214		scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
4215		scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
4216		scsi_debug_max_luns, atomic_read(&sdebug_completions),
4217		scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
4218		sdebug_sectors_per, num_aborts, num_dev_resets,
4219		num_target_resets, num_bus_resets, num_host_resets,
4220		dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4221
4222	f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
4223	if (f != scsi_debug_max_queue) {
4224		l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
4225		seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
4226			   "queued_in_use_bm", f, l);
 
 
 
 
 
 
 
 
 
 
 
 
4227	}
4228	return 0;
 
 
 
 
4229}
4230
4231static ssize_t delay_show(struct device_driver *ddp, char *buf)
4232{
4233        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
4234}
4235/* Returns -EBUSY if delay is being changed and commands are queued */
4236static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4237			   size_t count)
4238{
4239	int delay, res;
4240
4241	if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4242		res = count;
4243		if (scsi_debug_delay != delay) {
4244			unsigned long iflags;
4245			int k;
4246
4247			spin_lock_irqsave(&queued_arr_lock, iflags);
4248			k = find_first_bit(queued_in_use_bm,
4249					   scsi_debug_max_queue);
4250			if (k != scsi_debug_max_queue)
4251				res = -EBUSY;	/* have queued commands */
4252			else {
4253				scsi_debug_delay = delay;
4254				scsi_debug_ndelay = 0;
4255			}
4256			spin_unlock_irqrestore(&queued_arr_lock, iflags);
4257		}
4258		return res;
4259	}
4260	return -EINVAL;
4261}
4262static DRIVER_ATTR_RW(delay);
4263
4264static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4265{
4266	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
4267}
4268/* Returns -EBUSY if ndelay is being changed and commands are queued */
4269/* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
4270static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4271			   size_t count)
4272{
4273	unsigned long iflags;
4274	int ndelay, res, k;
4275
4276	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4277	    (ndelay >= 0) && (ndelay < 1000000000)) {
4278		res = count;
4279		if (scsi_debug_ndelay != ndelay) {
4280			spin_lock_irqsave(&queued_arr_lock, iflags);
4281			k = find_first_bit(queued_in_use_bm,
4282					   scsi_debug_max_queue);
4283			if (k != scsi_debug_max_queue)
4284				res = -EBUSY;	/* have queued commands */
4285			else {
4286				scsi_debug_ndelay = ndelay;
4287				scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
4288							  : DEF_DELAY;
4289			}
4290			spin_unlock_irqrestore(&queued_arr_lock, iflags);
4291		}
4292		return res;
4293	}
4294	return -EINVAL;
4295}
4296static DRIVER_ATTR_RW(ndelay);
 
4297
4298static ssize_t opts_show(struct device_driver *ddp, char *buf)
4299{
4300        return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
4301}
4302
4303static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4304			  size_t count)
4305{
4306        int opts;
4307	char work[20];
4308
4309        if (1 == sscanf(buf, "%10s", work)) {
4310		if (0 == strncasecmp(work,"0x", 2)) {
4311			if (1 == sscanf(&work[2], "%x", &opts))
4312				goto opts_done;
4313		} else {
4314			if (1 == sscanf(work, "%d", &opts))
4315				goto opts_done;
4316		}
4317	}
4318	return -EINVAL;
4319opts_done:
4320	scsi_debug_opts = opts;
4321	if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4322		sdebug_any_injecting_opt = true;
4323	else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4324		sdebug_any_injecting_opt = true;
4325	else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4326		sdebug_any_injecting_opt = true;
4327	else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4328		sdebug_any_injecting_opt = true;
4329	else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4330		sdebug_any_injecting_opt = true;
4331	atomic_set(&sdebug_cmnd_count, 0);
4332	atomic_set(&sdebug_a_tsf, 0);
4333	return count;
4334}
4335static DRIVER_ATTR_RW(opts);
 
4336
4337static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4338{
4339        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
4340}
4341static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4342			   size_t count)
4343{
4344        int n;
4345
4346	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4347		scsi_debug_ptype = n;
4348		return count;
4349	}
4350	return -EINVAL;
4351}
4352static DRIVER_ATTR_RW(ptype);
4353
4354static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4355{
4356        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
4357}
4358static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4359			    size_t count)
4360{
4361        int n;
4362
4363	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4364		scsi_debug_dsense = n;
4365		return count;
4366	}
4367	return -EINVAL;
4368}
4369static DRIVER_ATTR_RW(dsense);
 
4370
4371static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4372{
4373        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
4374}
4375static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4376			     size_t count)
4377{
4378        int n;
4379
4380	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4381		n = (n > 0);
4382		scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
4383		if (scsi_debug_fake_rw != n) {
4384			if ((0 == n) && (NULL == fake_storep)) {
4385				unsigned long sz =
4386					(unsigned long)scsi_debug_dev_size_mb *
4387					1048576;
4388
4389				fake_storep = vmalloc(sz);
4390				if (NULL == fake_storep) {
4391					pr_err("out of memory, 9\n");
4392					return -ENOMEM;
4393				}
4394				memset(fake_storep, 0, sz);
4395			}
4396			scsi_debug_fake_rw = n;
4397		}
4398		return count;
4399	}
4400	return -EINVAL;
4401}
4402static DRIVER_ATTR_RW(fake_rw);
 
4403
4404static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4405{
4406        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
4407}
4408static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4409			      size_t count)
4410{
4411        int n;
4412
4413	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4414		scsi_debug_no_lun_0 = n;
4415		return count;
4416	}
4417	return -EINVAL;
4418}
4419static DRIVER_ATTR_RW(no_lun_0);
 
4420
4421static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4422{
4423        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
4424}
4425static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4426			      size_t count)
4427{
4428        int n;
4429
4430	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4431		scsi_debug_num_tgts = n;
4432		sdebug_max_tgts_luns();
4433		return count;
4434	}
4435	return -EINVAL;
4436}
4437static DRIVER_ATTR_RW(num_tgts);
 
4438
4439static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4440{
4441        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
4442}
4443static DRIVER_ATTR_RO(dev_size_mb);
4444
4445static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4446{
4447        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
4448}
4449static DRIVER_ATTR_RO(num_parts);
4450
4451static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4452{
4453        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
4454}
4455static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4456			       size_t count)
4457{
4458        int nth;
4459
4460	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4461		scsi_debug_every_nth = nth;
4462		atomic_set(&sdebug_cmnd_count, 0);
4463		return count;
4464	}
4465	return -EINVAL;
4466}
4467static DRIVER_ATTR_RW(every_nth);
 
4468
4469static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4470{
4471        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
4472}
4473static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4474			      size_t count)
4475{
4476        int n;
4477	bool changed;
4478
4479	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4480		changed = (scsi_debug_max_luns != n);
4481		scsi_debug_max_luns = n;
4482		sdebug_max_tgts_luns();
4483		if (changed && (scsi_debug_scsi_level >= 5)) {	/* >= SPC-3 */
4484			struct sdebug_host_info *sdhp;
4485			struct sdebug_dev_info *dp;
4486
4487			spin_lock(&sdebug_host_list_lock);
4488			list_for_each_entry(sdhp, &sdebug_host_list,
4489					    host_list) {
4490				list_for_each_entry(dp, &sdhp->dev_info_list,
4491						    dev_list) {
4492					set_bit(SDEBUG_UA_LUNS_CHANGED,
4493						dp->uas_bm);
4494				}
4495			}
4496			spin_unlock(&sdebug_host_list_lock);
4497		}
4498		return count;
4499	}
4500	return -EINVAL;
4501}
4502static DRIVER_ATTR_RW(max_luns);
 
4503
4504static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4505{
4506        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
4507}
4508/* N.B. max_queue can be changed while there are queued commands. In flight
4509 * commands beyond the new max_queue will be completed. */
4510static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4511			       size_t count)
4512{
4513	unsigned long iflags;
4514	int n, k;
4515
4516	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4517	    (n <= SCSI_DEBUG_CANQUEUE)) {
4518		spin_lock_irqsave(&queued_arr_lock, iflags);
4519		k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4520		scsi_debug_max_queue = n;
4521		if (SCSI_DEBUG_CANQUEUE == k)
4522			atomic_set(&retired_max_queue, 0);
4523		else if (k >= n)
4524			atomic_set(&retired_max_queue, k + 1);
4525		else
4526			atomic_set(&retired_max_queue, 0);
4527		spin_unlock_irqrestore(&queued_arr_lock, iflags);
4528		return count;
4529	}
4530	return -EINVAL;
4531}
4532static DRIVER_ATTR_RW(max_queue);
 
4533
4534static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4535{
4536        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
4537}
4538static DRIVER_ATTR_RO(no_uld);
4539
4540static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4541{
4542        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
4543}
4544static DRIVER_ATTR_RO(scsi_level);
4545
4546static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4547{
4548        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
4549}
4550static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4551				size_t count)
4552{
4553        int n;
4554	bool changed;
4555
4556	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4557		changed = (scsi_debug_virtual_gb != n);
4558		scsi_debug_virtual_gb = n;
 
4559		sdebug_capacity = get_sdebug_capacity();
4560		if (changed) {
4561			struct sdebug_host_info *sdhp;
4562			struct sdebug_dev_info *dp;
4563
4564			spin_lock(&sdebug_host_list_lock);
4565			list_for_each_entry(sdhp, &sdebug_host_list,
4566					    host_list) {
4567				list_for_each_entry(dp, &sdhp->dev_info_list,
4568						    dev_list) {
4569					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4570						dp->uas_bm);
4571				}
4572			}
4573			spin_unlock(&sdebug_host_list_lock);
4574		}
4575		return count;
4576	}
4577	return -EINVAL;
4578}
4579static DRIVER_ATTR_RW(virtual_gb);
 
4580
4581static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4582{
4583        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
4584}
4585
4586static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4587			      size_t count)
4588{
4589	int delta_hosts;
4590
4591	if (sscanf(buf, "%d", &delta_hosts) != 1)
4592		return -EINVAL;
4593	if (delta_hosts > 0) {
4594		do {
4595			sdebug_add_adapter();
4596		} while (--delta_hosts);
4597	} else if (delta_hosts < 0) {
4598		do {
4599			sdebug_remove_adapter();
4600		} while (++delta_hosts);
4601	}
4602	return count;
4603}
4604static DRIVER_ATTR_RW(add_host);
 
4605
4606static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
 
4607{
4608	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
4609}
4610static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4611				    size_t count)
4612{
4613	int n;
4614
4615	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4616		scsi_debug_vpd_use_hostno = n;
4617		return count;
4618	}
4619	return -EINVAL;
4620}
4621static DRIVER_ATTR_RW(vpd_use_hostno);
 
4622
4623static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4624{
4625	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
4626}
4627static DRIVER_ATTR_RO(sector_size);
4628
4629static ssize_t dix_show(struct device_driver *ddp, char *buf)
4630{
4631	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
4632}
4633static DRIVER_ATTR_RO(dix);
4634
4635static ssize_t dif_show(struct device_driver *ddp, char *buf)
4636{
4637	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
4638}
4639static DRIVER_ATTR_RO(dif);
4640
4641static ssize_t guard_show(struct device_driver *ddp, char *buf)
4642{
4643	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
4644}
4645static DRIVER_ATTR_RO(guard);
4646
4647static ssize_t ato_show(struct device_driver *ddp, char *buf)
4648{
4649	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
4650}
4651static DRIVER_ATTR_RO(ato);
4652
4653static ssize_t map_show(struct device_driver *ddp, char *buf)
4654{
4655	ssize_t count;
4656
4657	if (!scsi_debug_lbp())
4658		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4659				 sdebug_store_sectors);
4660
4661	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4662			  (int)map_size, map_storep);
4663	buf[count++] = '\n';
4664	buf[count] = '\0';
4665
4666	return count;
4667}
4668static DRIVER_ATTR_RO(map);
4669
4670static ssize_t removable_show(struct device_driver *ddp, char *buf)
4671{
4672	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
4673}
4674static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4675			       size_t count)
4676{
4677	int n;
4678
4679	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4680		scsi_debug_removable = (n > 0);
4681		return count;
4682	}
4683	return -EINVAL;
4684}
4685static DRIVER_ATTR_RW(removable);
4686
4687static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4688{
4689	return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
4690}
4691/* Returns -EBUSY if host_lock is being changed and commands are queued */
4692static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4693			       size_t count)
4694{
4695	int n, res;
4696
4697	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4698		bool new_host_lock = (n > 0);
4699
4700		res = count;
4701		if (new_host_lock != scsi_debug_host_lock) {
4702			unsigned long iflags;
4703			int k;
4704
4705			spin_lock_irqsave(&queued_arr_lock, iflags);
4706			k = find_first_bit(queued_in_use_bm,
4707					   scsi_debug_max_queue);
4708			if (k != scsi_debug_max_queue)
4709				res = -EBUSY;	/* have queued commands */
4710			else
4711				scsi_debug_host_lock = new_host_lock;
4712			spin_unlock_irqrestore(&queued_arr_lock, iflags);
4713		}
4714		return res;
4715	}
4716	return -EINVAL;
4717}
4718static DRIVER_ATTR_RW(host_lock);
4719
4720static ssize_t strict_show(struct device_driver *ddp, char *buf)
4721{
4722	return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4723}
4724static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4725			    size_t count)
4726{
4727	int n;
4728
4729	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4730		scsi_debug_strict = (n > 0);
4731		return count;
4732	}
4733	return -EINVAL;
4734}
4735static DRIVER_ATTR_RW(strict);
4736
4737
4738/* Note: The following array creates attribute files in the
4739   /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4740   files (over those found in the /sys/module/scsi_debug/parameters
4741   directory) is that auxiliary actions can be triggered when an attribute
4742   is changed. For example see: sdebug_add_host_store() above.
4743 */
 
 
 
4744
4745static struct attribute *sdebug_drv_attrs[] = {
4746	&driver_attr_delay.attr,
4747	&driver_attr_opts.attr,
4748	&driver_attr_ptype.attr,
4749	&driver_attr_dsense.attr,
4750	&driver_attr_fake_rw.attr,
4751	&driver_attr_no_lun_0.attr,
4752	&driver_attr_num_tgts.attr,
4753	&driver_attr_dev_size_mb.attr,
4754	&driver_attr_num_parts.attr,
4755	&driver_attr_every_nth.attr,
4756	&driver_attr_max_luns.attr,
4757	&driver_attr_max_queue.attr,
4758	&driver_attr_no_uld.attr,
4759	&driver_attr_scsi_level.attr,
4760	&driver_attr_virtual_gb.attr,
4761	&driver_attr_add_host.attr,
4762	&driver_attr_vpd_use_hostno.attr,
4763	&driver_attr_sector_size.attr,
4764	&driver_attr_dix.attr,
4765	&driver_attr_dif.attr,
4766	&driver_attr_guard.attr,
4767	&driver_attr_ato.attr,
4768	&driver_attr_map.attr,
4769	&driver_attr_removable.attr,
4770	&driver_attr_host_lock.attr,
4771	&driver_attr_ndelay.attr,
4772	&driver_attr_strict.attr,
4773	NULL,
4774};
4775ATTRIBUTE_GROUPS(sdebug_drv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4776
4777static struct device *pseudo_primary;
4778
4779static int __init scsi_debug_init(void)
4780{
4781	unsigned long sz;
4782	int host_to_add;
4783	int k;
4784	int ret;
4785
4786	atomic_set(&sdebug_cmnd_count, 0);
4787	atomic_set(&sdebug_completions, 0);
4788	atomic_set(&retired_max_queue, 0);
4789
4790	if (scsi_debug_ndelay >= 1000000000) {
4791		pr_warn("ndelay must be less than 1 second, ignored\n");
4792		scsi_debug_ndelay = 0;
4793	} else if (scsi_debug_ndelay > 0)
4794		scsi_debug_delay = DELAY_OVERRIDDEN;
4795
4796	switch (scsi_debug_sector_size) {
4797	case  512:
4798	case 1024:
4799	case 2048:
4800	case 4096:
4801		break;
4802	default:
4803		pr_err("invalid sector_size %d\n", scsi_debug_sector_size);
 
4804		return -EINVAL;
4805	}
4806
4807	switch (scsi_debug_dif) {
4808
4809	case SD_DIF_TYPE0_PROTECTION:
4810	case SD_DIF_TYPE1_PROTECTION:
4811	case SD_DIF_TYPE2_PROTECTION:
4812	case SD_DIF_TYPE3_PROTECTION:
4813		break;
4814
4815	default:
4816		pr_err("dif must be 0, 1, 2 or 3\n");
4817		return -EINVAL;
4818	}
4819
4820	if (scsi_debug_guard > 1) {
4821		pr_err("guard must be 0 or 1\n");
4822		return -EINVAL;
4823	}
4824
4825	if (scsi_debug_ato > 1) {
4826		pr_err("ato must be 0 or 1\n");
4827		return -EINVAL;
4828	}
4829
4830	if (scsi_debug_physblk_exp > 15) {
4831		pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp);
 
4832		return -EINVAL;
4833	}
4834
4835	if (scsi_debug_lowest_aligned > 0x3fff) {
4836		pr_err("lowest_aligned too big: %u\n",
4837			scsi_debug_lowest_aligned);
4838		return -EINVAL;
4839	}
4840
4841	if (scsi_debug_dev_size_mb < 1)
4842		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4843	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
4844	sdebug_store_sectors = sz / scsi_debug_sector_size;
4845	sdebug_capacity = get_sdebug_capacity();
4846
4847	/* play around with geometry, don't waste too much on track 0 */
4848	sdebug_heads = 8;
4849	sdebug_sectors_per = 32;
4850	if (scsi_debug_dev_size_mb >= 256)
4851		sdebug_heads = 64;
4852	else if (scsi_debug_dev_size_mb >= 16)
4853		sdebug_heads = 32;
 
 
4854	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4855			       (sdebug_sectors_per * sdebug_heads);
4856	if (sdebug_cylinders_per >= 1024) {
4857		/* other LLDs do this; implies >= 1GB ram disk ... */
4858		sdebug_heads = 255;
4859		sdebug_sectors_per = 63;
4860		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4861			       (sdebug_sectors_per * sdebug_heads);
4862	}
4863
4864	if (0 == scsi_debug_fake_rw) {
4865		fake_storep = vmalloc(sz);
4866		if (NULL == fake_storep) {
4867			pr_err("out of memory, 1\n");
4868			return -ENOMEM;
4869		}
4870		memset(fake_storep, 0, sz);
4871		if (scsi_debug_num_parts > 0)
4872			sdebug_build_parts(fake_storep, sz);
4873	}
4874
4875	if (scsi_debug_dix) {
4876		int dif_size;
4877
4878		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4879		dif_storep = vmalloc(dif_size);
4880
4881		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
 
4882
4883		if (dif_storep == NULL) {
4884			pr_err("out of mem. (DIX)\n");
4885			ret = -ENOMEM;
4886			goto free_vm;
4887		}
4888
4889		memset(dif_storep, 0xff, dif_size);
4890	}
4891
4892	/* Logical Block Provisioning */
4893	if (scsi_debug_lbp()) {
 
 
4894		scsi_debug_unmap_max_blocks =
4895			clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
4896
4897		scsi_debug_unmap_max_desc =
4898			clamp(scsi_debug_unmap_max_desc, 0U, 256U);
4899
4900		scsi_debug_unmap_granularity =
4901			clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
4902
4903		if (scsi_debug_unmap_alignment &&
4904		    scsi_debug_unmap_granularity <=
4905		    scsi_debug_unmap_alignment) {
4906			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
 
4907			return -EINVAL;
4908		}
4909
4910		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4911		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
 
4912
4913		pr_info("%lu provisioning blocks\n", map_size);
 
4914
4915		if (map_storep == NULL) {
4916			pr_err("out of mem. (MAP)\n");
4917			ret = -ENOMEM;
4918			goto free_vm;
4919		}
4920
4921		bitmap_zero(map_storep, map_size);
4922
4923		/* Map first 1KB for partition table */
4924		if (scsi_debug_num_parts)
4925			map_region(0, 2);
4926	}
4927
4928	pseudo_primary = root_device_register("pseudo_0");
4929	if (IS_ERR(pseudo_primary)) {
4930		pr_warn("root_device_register() error\n");
4931		ret = PTR_ERR(pseudo_primary);
4932		goto free_vm;
4933	}
4934	ret = bus_register(&pseudo_lld_bus);
4935	if (ret < 0) {
4936		pr_warn("bus_register error: %d\n", ret);
 
4937		goto dev_unreg;
4938	}
4939	ret = driver_register(&sdebug_driverfs_driver);
4940	if (ret < 0) {
4941		pr_warn("driver_register error: %d\n", ret);
 
4942		goto bus_unreg;
4943	}
 
 
 
 
 
 
 
 
4944
4945	host_to_add = scsi_debug_add_host;
4946        scsi_debug_add_host = 0;
4947
4948        for (k = 0; k < host_to_add; k++) {
4949                if (sdebug_add_adapter()) {
4950			pr_err("sdebug_add_adapter failed k=%d\n", k);
 
4951                        break;
4952                }
4953        }
4954
4955	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4956		pr_info("built %d host(s)\n", scsi_debug_add_host);
4957
 
4958	return 0;
4959
 
 
 
4960bus_unreg:
4961	bus_unregister(&pseudo_lld_bus);
4962dev_unreg:
4963	root_device_unregister(pseudo_primary);
4964free_vm:
4965	vfree(map_storep);
4966	vfree(dif_storep);
 
 
4967	vfree(fake_storep);
4968
4969	return ret;
4970}
4971
4972static void __exit scsi_debug_exit(void)
4973{
4974	int k = scsi_debug_add_host;
4975
4976	stop_all_queued();
4977	free_all_queued();
4978	for (; k; k--)
4979		sdebug_remove_adapter();
 
4980	driver_unregister(&sdebug_driverfs_driver);
4981	bus_unregister(&pseudo_lld_bus);
4982	root_device_unregister(pseudo_primary);
4983
4984	vfree(dif_storep);
 
 
4985	vfree(fake_storep);
4986}
4987
4988device_initcall(scsi_debug_init);
4989module_exit(scsi_debug_exit);
4990
4991static void sdebug_release_adapter(struct device * dev)
4992{
4993        struct sdebug_host_info *sdbg_host;
4994
4995	sdbg_host = to_sdebug_host(dev);
4996        kfree(sdbg_host);
4997}
4998
4999static int sdebug_add_adapter(void)
5000{
5001	int k, devs_per_host;
5002        int error = 0;
5003        struct sdebug_host_info *sdbg_host;
5004	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5005
5006        sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5007        if (NULL == sdbg_host) {
5008		pr_err("out of memory at line %d\n", __LINE__);
 
5009                return -ENOMEM;
5010        }
5011
5012        INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5013
5014	devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
5015        for (k = 0; k < devs_per_host; k++) {
5016		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5017		if (!sdbg_devinfo) {
5018			pr_err("out of memory at line %d\n", __LINE__);
 
5019                        error = -ENOMEM;
5020			goto clean;
5021                }
5022        }
5023
5024        spin_lock(&sdebug_host_list_lock);
5025        list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5026        spin_unlock(&sdebug_host_list_lock);
5027
5028        sdbg_host->dev.bus = &pseudo_lld_bus;
5029        sdbg_host->dev.parent = pseudo_primary;
5030        sdbg_host->dev.release = &sdebug_release_adapter;
5031        dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
5032
5033        error = device_register(&sdbg_host->dev);
5034
5035        if (error)
5036		goto clean;
5037
5038	++scsi_debug_add_host;
5039        return error;
5040
5041clean:
5042	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5043				 dev_list) {
5044		list_del(&sdbg_devinfo->dev_list);
5045		kfree(sdbg_devinfo);
5046	}
5047
5048	kfree(sdbg_host);
5049        return error;
5050}
5051
5052static void sdebug_remove_adapter(void)
5053{
5054        struct sdebug_host_info * sdbg_host = NULL;
5055
5056        spin_lock(&sdebug_host_list_lock);
5057        if (!list_empty(&sdebug_host_list)) {
5058                sdbg_host = list_entry(sdebug_host_list.prev,
5059                                       struct sdebug_host_info, host_list);
5060		list_del(&sdbg_host->host_list);
5061	}
5062        spin_unlock(&sdebug_host_list_lock);
5063
5064	if (!sdbg_host)
5065		return;
5066
5067        device_unregister(&sdbg_host->dev);
5068        --scsi_debug_add_host;
5069}
5070
5071static int
5072sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5073{
5074	int num_in_q = 0;
5075	unsigned long iflags;
5076	struct sdebug_dev_info *devip;
5077
5078	spin_lock_irqsave(&queued_arr_lock, iflags);
5079	devip = (struct sdebug_dev_info *)sdev->hostdata;
5080	if (NULL == devip) {
5081		spin_unlock_irqrestore(&queued_arr_lock, iflags);
5082		return	-ENODEV;
5083	}
5084	num_in_q = atomic_read(&devip->num_in_q);
5085	spin_unlock_irqrestore(&queued_arr_lock, iflags);
5086
5087	if (qdepth < 1)
5088		qdepth = 1;
5089	/* allow to exceed max host queued_arr elements for testing */
5090	if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
5091		qdepth = SCSI_DEBUG_CANQUEUE + 10;
5092	scsi_change_queue_depth(sdev, qdepth);
5093
5094	if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
5095		sdev_printk(KERN_INFO, sdev,
5096			    "%s: qdepth=%d, num_in_q=%d\n",
5097			    __func__, qdepth, num_in_q);
5098	}
5099	return sdev->queue_depth;
5100}
5101
5102static int
5103check_inject(struct scsi_cmnd *scp)
5104{
5105	struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
5106
5107	memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
 
 
 
 
5108
5109	if (atomic_inc_return(&sdebug_cmnd_count) >=
5110	    abs(scsi_debug_every_nth)) {
5111		atomic_set(&sdebug_cmnd_count, 0);
5112		if (scsi_debug_every_nth < -1)
5113			scsi_debug_every_nth = -1;
5114		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5115			return 1; /* ignore command causing timeout */
5116		else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5117			 scsi_medium_access_command(scp))
5118			return 1; /* time out reads and writes */
5119		if (sdebug_any_injecting_opt) {
5120			int opts = scsi_debug_opts;
5121
5122			if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5123				ep->inj_recovered = true;
5124			else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5125				ep->inj_transport = true;
5126			else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5127				ep->inj_dif = true;
5128			else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5129				ep->inj_dix = true;
5130			else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5131				ep->inj_short = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
5132		}
5133	}
5134	return 0;
5135}
5136
5137static int
5138scsi_debug_queuecommand(struct scsi_cmnd *scp)
5139{
5140	u8 sdeb_i;
5141	struct scsi_device *sdp = scp->device;
5142	const struct opcode_info_t *oip;
5143	const struct opcode_info_t *r_oip;
5144	struct sdebug_dev_info *devip;
5145	u8 *cmd = scp->cmnd;
5146	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5147	int k, na;
5148	int errsts = 0;
5149	int errsts_no_connect = DID_NO_CONNECT << 16;
5150	u32 flags;
5151	u16 sa;
5152	u8 opcode = cmd[0];
5153	bool has_wlun_rl;
5154	bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5155
5156	scsi_set_resid(scp, 0);
5157	if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5158		char b[120];
5159		int n, len, sb;
5160
5161		len = scp->cmd_len;
5162		sb = (int)sizeof(b);
5163		if (len > 32)
5164			strcpy(b, "too long, over 32 bytes");
5165		else {
5166			for (k = 0, n = 0; k < len && n < sb; ++k)
5167				n += scnprintf(b + n, sb - n, "%02x ",
5168					       (u32)cmd[k]);
5169		}
5170		sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5171	}
5172	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5173	if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5174		return schedule_resp(scp, NULL, errsts_no_connect, 0);
5175
5176	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5177	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5178	devip = (struct sdebug_dev_info *)sdp->hostdata;
5179	if (!devip) {
5180		devip = devInfoReg(sdp);
5181		if (NULL == devip)
5182			return schedule_resp(scp, NULL, errsts_no_connect, 0);
5183	}
5184	na = oip->num_attached;
5185	r_pfp = oip->pfp;
5186	if (na) {	/* multiple commands with this opcode */
5187		r_oip = oip;
5188		if (FF_SA & r_oip->flags) {
5189			if (F_SA_LOW & oip->flags)
5190				sa = 0x1f & cmd[1];
5191			else
5192				sa = get_unaligned_be16(cmd + 8);
5193			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5194				if (opcode == oip->opcode && sa == oip->sa)
5195					break;
5196			}
5197		} else {   /* since no service action only check opcode */
5198			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5199				if (opcode == oip->opcode)
5200					break;
5201			}
5202		}
5203		if (k > na) {
5204			if (F_SA_LOW & r_oip->flags)
5205				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5206			else if (F_SA_HIGH & r_oip->flags)
5207				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5208			else
5209				mk_sense_invalid_opcode(scp);
5210			goto check_cond;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5211		}
5212	}	/* else (when na==0) we assume the oip is a match */
5213	flags = oip->flags;
5214	if (F_INV_OP & flags) {
5215		mk_sense_invalid_opcode(scp);
5216		goto check_cond;
5217	}
5218	if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5219		if (debug)
5220			sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5221				    "0x%x not supported for wlun\n", opcode);
5222		mk_sense_invalid_opcode(scp);
5223		goto check_cond;
5224	}
5225	if (scsi_debug_strict) {	/* check cdb against mask */
5226		u8 rem;
5227		int j;
5228
5229		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5230			rem = ~oip->len_mask[k] & cmd[k];
5231			if (rem) {
5232				for (j = 7; j >= 0; --j, rem <<= 1) {
5233					if (0x80 & rem)
5234						break;
5235				}
5236				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5237				goto check_cond;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5238			}
5239		}
5240	}
5241	if (!(F_SKIP_UA & flags) &&
5242	    SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5243		errsts = check_readiness(scp, UAS_ONLY, devip);
 
 
 
 
 
 
 
5244		if (errsts)
5245			goto check_cond;
5246	}
5247	if ((F_M_ACCESS & flags) && devip->stopped) {
5248		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5249		if (debug)
5250			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5251				    "%s\n", my_name, "initializing command "
5252				    "required");
5253		errsts = check_condition_result;
5254		goto fini;
5255	}
5256	if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5257		goto fini;
5258	if (scsi_debug_every_nth) {
5259		if (check_inject(scp))
5260			return 0;	/* ignore command: make trouble */
5261	}
5262	if (oip->pfp)	/* if this command has a resp_* function, call it */
5263		errsts = oip->pfp(scp, devip);
5264	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5265		errsts = r_pfp(scp, devip);
5266
5267fini:
5268	return schedule_resp(scp, devip, errsts,
5269			     ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5270check_cond:
5271	return schedule_resp(scp, devip, check_condition_result, 0);
5272}
5273
5274static int
5275sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5276{
5277	if (scsi_debug_host_lock) {
5278		unsigned long iflags;
5279		int rc;
5280
5281		spin_lock_irqsave(shost->host_lock, iflags);
5282		rc = scsi_debug_queuecommand(cmd);
5283		spin_unlock_irqrestore(shost->host_lock, iflags);
5284		return rc;
5285	} else
5286		return scsi_debug_queuecommand(cmd);
5287}
5288
5289static struct scsi_host_template sdebug_driver_template = {
5290	.show_info =		scsi_debug_show_info,
5291	.write_info =		scsi_debug_write_info,
5292	.proc_name =		sdebug_proc_name,
5293	.name =			"SCSI DEBUG",
5294	.info =			scsi_debug_info,
5295	.slave_alloc =		scsi_debug_slave_alloc,
5296	.slave_configure =	scsi_debug_slave_configure,
5297	.slave_destroy =	scsi_debug_slave_destroy,
5298	.ioctl =		scsi_debug_ioctl,
5299	.queuecommand =		sdebug_queuecommand_lock_or_not,
5300	.change_queue_depth =	sdebug_change_qdepth,
5301	.eh_abort_handler =	scsi_debug_abort,
5302	.eh_device_reset_handler = scsi_debug_device_reset,
5303	.eh_target_reset_handler = scsi_debug_target_reset,
5304	.eh_bus_reset_handler = scsi_debug_bus_reset,
 
5305	.eh_host_reset_handler = scsi_debug_host_reset,
 
5306	.can_queue =		SCSI_DEBUG_CANQUEUE,
5307	.this_id =		7,
5308	.sg_tablesize =		SCSI_MAX_SG_CHAIN_SEGMENTS,
5309	.cmd_per_lun =		DEF_CMD_PER_LUN,
5310	.max_sectors =		-1U,
5311	.use_clustering = 	DISABLE_CLUSTERING,
5312	.module =		THIS_MODULE,
5313	.track_queue_depth =	1,
5314	.cmd_size =		sizeof(struct sdebug_scmd_extra_t),
5315};
5316
5317static int sdebug_driver_probe(struct device * dev)
5318{
5319	int error = 0;
5320	int opts;
5321	struct sdebug_host_info *sdbg_host;
5322	struct Scsi_Host *hpnt;
5323	int host_prot;
5324
5325	sdbg_host = to_sdebug_host(dev);
5326
5327	sdebug_driver_template.can_queue = scsi_debug_max_queue;
5328	if (scsi_debug_clustering)
5329		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5330	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5331	if (NULL == hpnt) {
5332		pr_err("scsi_host_alloc failed\n");
5333		error = -ENODEV;
5334		return error;
5335	}
5336
5337        sdbg_host->shost = hpnt;
5338	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5339	if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
5340		hpnt->max_id = scsi_debug_num_tgts + 1;
5341	else
5342		hpnt->max_id = scsi_debug_num_tgts;
5343	/* = scsi_debug_max_luns; */
5344	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5345
5346	host_prot = 0;
5347
5348	switch (scsi_debug_dif) {
5349
5350	case SD_DIF_TYPE1_PROTECTION:
5351		host_prot = SHOST_DIF_TYPE1_PROTECTION;
5352		if (scsi_debug_dix)
5353			host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5354		break;
5355
5356	case SD_DIF_TYPE2_PROTECTION:
5357		host_prot = SHOST_DIF_TYPE2_PROTECTION;
5358		if (scsi_debug_dix)
5359			host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5360		break;
5361
5362	case SD_DIF_TYPE3_PROTECTION:
5363		host_prot = SHOST_DIF_TYPE3_PROTECTION;
5364		if (scsi_debug_dix)
5365			host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5366		break;
5367
5368	default:
5369		if (scsi_debug_dix)
5370			host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5371		break;
5372	}
5373
5374	scsi_host_set_prot(hpnt, host_prot);
5375
5376	pr_info("host protection%s%s%s%s%s%s%s\n",
5377	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5378	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5379	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5380	       (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5381	       (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5382	       (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5383	       (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5384
5385	if (scsi_debug_guard == 1)
5386		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5387	else
5388		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5389
5390	opts = scsi_debug_opts;
5391	if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5392		sdebug_any_injecting_opt = true;
5393	else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5394		sdebug_any_injecting_opt = true;
5395	else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5396		sdebug_any_injecting_opt = true;
5397	else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5398		sdebug_any_injecting_opt = true;
5399	else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5400		sdebug_any_injecting_opt = true;
5401
5402        error = scsi_add_host(hpnt, &sdbg_host->dev);
5403        if (error) {
5404		pr_err("scsi_add_host failed\n");
5405                error = -ENODEV;
5406		scsi_host_put(hpnt);
5407        } else
5408		scsi_scan_host(hpnt);
5409
5410	return error;
 
5411}
5412
5413static int sdebug_driver_remove(struct device * dev)
5414{
5415        struct sdebug_host_info *sdbg_host;
5416	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5417
5418	sdbg_host = to_sdebug_host(dev);
5419
5420	if (!sdbg_host) {
5421		pr_err("Unable to locate host info\n");
 
5422		return -ENODEV;
5423	}
5424
5425        scsi_remove_host(sdbg_host->shost);
5426
5427	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5428				 dev_list) {
5429                list_del(&sdbg_devinfo->dev_list);
5430                kfree(sdbg_devinfo);
5431        }
5432
5433        scsi_host_put(sdbg_host->shost);
5434        return 0;
5435}
5436
5437static int pseudo_lld_bus_match(struct device *dev,
5438				struct device_driver *dev_driver)
5439{
5440	return 1;
5441}
5442
5443static struct bus_type pseudo_lld_bus = {
5444	.name = "pseudo",
5445	.match = pseudo_lld_bus_match,
5446	.probe = sdebug_driver_probe,
5447	.remove = sdebug_driver_remove,
5448	.drv_groups = sdebug_drv_groups,
5449};
v3.5.6
   1/*
   2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
   3 *  Copyright (C) 1992  Eric Youngdale
   4 *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
   5 *  to make sure that we are not getting blocks mixed up, and PANIC if
   6 *  anything out of the ordinary is seen.
   7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
   8 *
   9 *  This version is more generic, simulating a variable number of disk
  10 *  (or disk like devices) sharing a common amount of RAM. To be more
  11 *  realistic, the simulated devices have the transport attributes of
  12 *  SAS disks.
  13 *
  14 *
  15 *  For documentation see http://sg.danny.cz/sg/sdebug26.html
  16 *
  17 *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
  18 *   dpg: work for devfs large number of disks [20010809]
  19 *        forked for lk 2.5 series [20011216, 20020101]
  20 *        use vmalloc() more inquiry+mode_sense [20020302]
  21 *        add timers for delayed responses [20020721]
  22 *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
  23 *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
  24 *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
  25 *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
  26 */
  27
 
 
 
  28#include <linux/module.h>
  29
  30#include <linux/kernel.h>
  31#include <linux/errno.h>
  32#include <linux/timer.h>
  33#include <linux/slab.h>
  34#include <linux/types.h>
  35#include <linux/string.h>
  36#include <linux/genhd.h>
  37#include <linux/fs.h>
  38#include <linux/init.h>
  39#include <linux/proc_fs.h>
  40#include <linux/vmalloc.h>
  41#include <linux/moduleparam.h>
  42#include <linux/scatterlist.h>
  43#include <linux/blkdev.h>
  44#include <linux/crc-t10dif.h>
 
 
 
 
  45
  46#include <net/checksum.h>
  47
  48#include <asm/unaligned.h>
  49
  50#include <scsi/scsi.h>
  51#include <scsi/scsi_cmnd.h>
  52#include <scsi/scsi_device.h>
  53#include <scsi/scsi_host.h>
  54#include <scsi/scsicam.h>
  55#include <scsi/scsi_eh.h>
 
  56#include <scsi/scsi_dbg.h>
  57
  58#include "sd.h"
  59#include "scsi_logging.h"
  60
  61#define SCSI_DEBUG_VERSION "1.82"
  62static const char * scsi_debug_version_date = "20100324";
 
 
  63
  64/* Additional Sense Code (ASC) */
  65#define NO_ADDITIONAL_SENSE 0x0
  66#define LOGICAL_UNIT_NOT_READY 0x4
 
  67#define UNRECOVERED_READ_ERR 0x11
  68#define PARAMETER_LIST_LENGTH_ERR 0x1a
  69#define INVALID_OPCODE 0x20
  70#define ADDR_OUT_OF_RANGE 0x21
  71#define INVALID_COMMAND_OPCODE 0x20
  72#define INVALID_FIELD_IN_CDB 0x24
  73#define INVALID_FIELD_IN_PARAM_LIST 0x26
  74#define POWERON_RESET 0x29
 
 
 
 
 
 
 
 
 
  75#define SAVING_PARAMS_UNSUP 0x39
  76#define TRANSPORT_PROBLEM 0x4b
  77#define THRESHOLD_EXCEEDED 0x5d
  78#define LOW_POWER_COND_ON 0x5e
 
 
 
  79
  80/* Additional Sense Code Qualifier (ASCQ) */
  81#define ACK_NAK_TO 0x3
  82
  83#define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
  84
  85/* Default values for driver parameters */
  86#define DEF_NUM_HOST   1
  87#define DEF_NUM_TGTS   1
  88#define DEF_MAX_LUNS   1
  89/* With these defaults, this driver will make 1 host with 1 target
  90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
  91 */
  92#define DEF_ATO 1
  93#define DEF_DELAY   1
  94#define DEF_DEV_SIZE_MB   8
  95#define DEF_DIF 0
  96#define DEF_DIX 0
  97#define DEF_D_SENSE   0
  98#define DEF_EVERY_NTH   0
  99#define DEF_FAKE_RW	0
 100#define DEF_GUARD 0
 
 101#define DEF_LBPU 0
 102#define DEF_LBPWS 0
 103#define DEF_LBPWS10 0
 104#define DEF_LBPRZ 1
 105#define DEF_LOWEST_ALIGNED 0
 
 106#define DEF_NO_LUN_0   0
 107#define DEF_NUM_PARTS   0
 108#define DEF_OPTS   0
 109#define DEF_OPT_BLKS 64
 110#define DEF_PHYSBLK_EXP 0
 111#define DEF_PTYPE   0
 112#define DEF_SCSI_LEVEL   5    /* INQUIRY, byte2 [5->SPC-3] */
 
 113#define DEF_SECTOR_SIZE 512
 114#define DEF_UNMAP_ALIGNMENT 0
 115#define DEF_UNMAP_GRANULARITY 1
 116#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
 117#define DEF_UNMAP_MAX_DESC 256
 118#define DEF_VIRTUAL_GB   0
 119#define DEF_VPD_USE_HOSTNO 1
 120#define DEF_WRITESAME_LENGTH 0xFFFF
 
 
 121
 122/* bit mask values for scsi_debug_opts */
 123#define SCSI_DEBUG_OPT_NOISE   1
 124#define SCSI_DEBUG_OPT_MEDIUM_ERR   2
 125#define SCSI_DEBUG_OPT_TIMEOUT   4
 126#define SCSI_DEBUG_OPT_RECOVERED_ERR   8
 127#define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
 128#define SCSI_DEBUG_OPT_DIF_ERR   32
 129#define SCSI_DEBUG_OPT_DIX_ERR   64
 130#define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
 
 
 
 
 
 
 
 
 131/* When "every_nth" > 0 then modulo "every_nth" commands:
 132 *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
 133 *   - a RECOVERED_ERROR is simulated on successful read and write
 134 *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
 135 *   - a TRANSPORT_ERROR is simulated on successful read and write
 136 *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
 137 *
 138 * When "every_nth" < 0 then after "- every_nth" commands:
 139 *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
 140 *   - a RECOVERED_ERROR is simulated on successful read and write
 141 *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
 142 *   - a TRANSPORT_ERROR is simulated on successful read and write
 143 *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
 144 * This will continue until some other action occurs (e.g. the user
 145 * writing a new value (other than -1 or 1) to every_nth via sysfs).
 146 */
 147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 148/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
 149 * sector on read commands: */
 150#define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
 151#define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
 152
 153/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
 154 * or "peripheral device" addressing (value 0) */
 155#define SAM2_LUN_ADDRESS_METHOD 0
 156#define SAM2_WLUN_REPORT_LUNS 0xc101
 157
 158/* Can queue up to this number of commands. Typically commands that
 159 * that have a non-zero delay are queued. */
 160#define SCSI_DEBUG_CANQUEUE  255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 161
 162static int scsi_debug_add_host = DEF_NUM_HOST;
 163static int scsi_debug_ato = DEF_ATO;
 164static int scsi_debug_delay = DEF_DELAY;
 165static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
 166static int scsi_debug_dif = DEF_DIF;
 167static int scsi_debug_dix = DEF_DIX;
 168static int scsi_debug_dsense = DEF_D_SENSE;
 169static int scsi_debug_every_nth = DEF_EVERY_NTH;
 170static int scsi_debug_fake_rw = DEF_FAKE_RW;
 171static int scsi_debug_guard = DEF_GUARD;
 172static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
 173static int scsi_debug_max_luns = DEF_MAX_LUNS;
 174static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
 
 
 175static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
 176static int scsi_debug_no_uld = 0;
 177static int scsi_debug_num_parts = DEF_NUM_PARTS;
 178static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
 179static int scsi_debug_opt_blks = DEF_OPT_BLKS;
 180static int scsi_debug_opts = DEF_OPTS;
 181static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
 182static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
 183static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
 184static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
 185static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
 186static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
 187static unsigned int scsi_debug_lbpu = DEF_LBPU;
 188static unsigned int scsi_debug_lbpws = DEF_LBPWS;
 189static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
 190static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
 191static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 192static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 193static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
 194static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
 195static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
 196
 197static int scsi_debug_cmnd_count = 0;
 
 
 
 
 
 
 
 198
 199#define DEV_READONLY(TGT)      (0)
 200#define DEV_REMOVEABLE(TGT)    (0)
 201
 202static unsigned int sdebug_store_sectors;
 203static sector_t sdebug_capacity;	/* in sectors */
 204
 205/* old BIOS stuff, kernel may get rid of them but some mode sense pages
 206   may still need them */
 207static int sdebug_heads;		/* heads per disk */
 208static int sdebug_cylinders_per;	/* cylinders per surface */
 209static int sdebug_sectors_per;		/* sectors per cylinder */
 210
 211#define SDEBUG_MAX_PARTS 4
 212
 213#define SDEBUG_SENSE_LEN 32
 214
 215#define SCSI_DEBUG_MAX_CMD_LEN 32
 216
 217static unsigned int scsi_debug_lbp(void)
 218{
 219	return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
 
 220}
 221
 222struct sdebug_dev_info {
 223	struct list_head dev_list;
 224	unsigned char sense_buff[SDEBUG_SENSE_LEN];	/* weak nexus */
 225	unsigned int channel;
 226	unsigned int target;
 227	unsigned int lun;
 228	struct sdebug_host_info *sdbg_host;
 229	unsigned int wlun;
 230	char reset;
 231	char stopped;
 232	char used;
 233};
 234
 235struct sdebug_host_info {
 236	struct list_head host_list;
 237	struct Scsi_Host *shost;
 238	struct device dev;
 239	struct list_head dev_info_list;
 240};
 241
 242#define to_sdebug_host(d)	\
 243	container_of(d, struct sdebug_host_info, dev)
 244
 245static LIST_HEAD(sdebug_host_list);
 246static DEFINE_SPINLOCK(sdebug_host_list_lock);
 247
 248typedef void (* done_funct_t) (struct scsi_cmnd *);
 
 
 
 
 249
 250struct sdebug_queued_cmd {
 251	int in_use;
 252	struct timer_list cmnd_timer;
 253	done_funct_t done_funct;
 
 254	struct scsi_cmnd * a_cmnd;
 255	int scsi_result;
 256};
 257static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
 
 
 258
 259static unsigned char * fake_storep;	/* ramdisk storage */
 260static unsigned char *dif_storep;	/* protection info */
 261static void *map_storep;		/* provisioning map */
 262
 263static unsigned long map_size;
 264static int num_aborts = 0;
 265static int num_dev_resets = 0;
 266static int num_bus_resets = 0;
 267static int num_host_resets = 0;
 
 268static int dix_writes;
 269static int dix_reads;
 270static int dif_errors;
 271
 272static DEFINE_SPINLOCK(queued_arr_lock);
 273static DEFINE_RWLOCK(atomic_rw);
 274
 275static char sdebug_proc_name[] = "scsi_debug";
 
 276
 277static struct bus_type pseudo_lld_bus;
 278
 279static inline sector_t dif_offset(sector_t sector)
 280{
 281	return sector << 3;
 282}
 283
 284static struct device_driver sdebug_driverfs_driver = {
 285	.name 		= sdebug_proc_name,
 286	.bus		= &pseudo_lld_bus,
 287};
 288
 289static const int check_condition_result =
 290		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 291
 292static const int illegal_condition_result =
 293	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
 294
 
 
 
 
 
 
 295static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
 296				    0, 0, 0x2, 0x4b};
 297static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
 298			           0, 0, 0x0, 0x0};
 299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300static int sdebug_add_adapter(void);
 301static void sdebug_remove_adapter(void);
 302
 303static void sdebug_max_tgts_luns(void)
 304{
 305	struct sdebug_host_info *sdbg_host;
 306	struct Scsi_Host *hpnt;
 307
 308	spin_lock(&sdebug_host_list_lock);
 309	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 310		hpnt = sdbg_host->shost;
 311		if ((hpnt->this_id >= 0) &&
 312		    (scsi_debug_num_tgts > hpnt->this_id))
 313			hpnt->max_id = scsi_debug_num_tgts + 1;
 314		else
 315			hpnt->max_id = scsi_debug_num_tgts;
 316		/* scsi_debug_max_luns; */
 317		hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
 318	}
 319	spin_unlock(&sdebug_host_list_lock);
 320}
 321
 322static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
 323			    int asc, int asq)
 
 
 
 
 324{
 325	unsigned char *sbuff;
 
 
 326
 327	sbuff = devip->sense_buff;
 328	memset(sbuff, 0, SDEBUG_SENSE_LEN);
 329
 330	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
 331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 333		printk(KERN_INFO "scsi_debug:    [sense_key,asc,ascq]: "
 334		      "[0x%x,0x%x,0x%x]\n", key, asc, asq);
 
 335}
 336
 337static void get_data_transfer_info(unsigned char *cmd,
 338				   unsigned long long *lba, unsigned int *num,
 339				   u32 *ei_lba)
 340{
 341	*ei_lba = 0;
 342
 343	switch (*cmd) {
 344	case VARIABLE_LENGTH_CMD:
 345		*lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
 346			(u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
 347			(u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
 348			(u64)cmd[13] << 48 | (u64)cmd[12] << 56;
 
 349
 350		*ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
 351			(u32)cmd[21] << 16 | (u32)cmd[20] << 24;
 352
 353		*num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
 354			(u32)cmd[28] << 24;
 355		break;
 
 
 356
 357	case WRITE_SAME_16:
 358	case WRITE_16:
 359	case READ_16:
 360		*lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
 361			(u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
 362			(u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
 363			(u64)cmd[3] << 48 | (u64)cmd[2] << 56;
 364
 365		*num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
 366			(u32)cmd[10] << 24;
 367		break;
 368	case WRITE_12:
 369	case READ_12:
 370		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
 371			(u32)cmd[2] << 24;
 372
 373		*num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
 374			(u32)cmd[6] << 24;
 375		break;
 376	case WRITE_SAME:
 377	case WRITE_10:
 378	case READ_10:
 379	case XDWRITEREAD_10:
 380		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 |	(u32)cmd[3] << 16 |
 381			(u32)cmd[2] << 24;
 382
 383		*num = (u32)cmd[8] | (u32)cmd[7] << 8;
 384		break;
 385	case WRITE_6:
 386	case READ_6:
 387		*lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
 388			(u32)(cmd[1] & 0x1f) << 16;
 389		*num = (0 == cmd[4]) ? 256 : cmd[4];
 390		break;
 391	default:
 392		break;
 393	}
 394}
 395
 396static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
 397{
 398	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
 399		printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
 
 
 
 
 
 
 
 
 
 400	}
 401	return -EINVAL;
 402	/* return -ENOTTY; // correct return but upsets fdisk */
 403}
 404
 405static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 406			   struct sdebug_dev_info * devip)
 407{
 408	if (devip->reset) {
 409		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 410			printk(KERN_INFO "scsi_debug: Reporting Unit "
 411			       "attention: power on reset\n");
 412		devip->reset = 0;
 413		mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 414		return check_condition_result;
 415	}
 416	if ((0 == reset_only) && devip->stopped) {
 417		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 418			printk(KERN_INFO "scsi_debug: Reporting Not "
 419			       "ready: initializing command required\n");
 420		mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
 421				0x2);
 
 
 
 
 422		return check_condition_result;
 423	}
 424	return 0;
 425}
 426
 427/* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
 428static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 429				int arr_len)
 430{
 431	int act_len;
 432	struct scsi_data_buffer *sdb = scsi_in(scp);
 433
 434	if (!sdb->length)
 435		return 0;
 436	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
 437		return (DID_ERROR << 16);
 438
 439	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
 440				      arr, arr_len);
 441	if (sdb->resid)
 442		sdb->resid -= act_len;
 443	else
 444		sdb->resid = scsi_bufflen(scp) - act_len;
 445
 446	return 0;
 447}
 448
 449/* Returns number of bytes fetched into 'arr' or -1 if error. */
 450static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 451			       int arr_len)
 452{
 453	if (!scsi_bufflen(scp))
 454		return 0;
 455	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
 456		return -1;
 457
 458	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
 459}
 460
 461
 462static const char * inq_vendor_id = "Linux   ";
 463static const char * inq_product_id = "scsi_debug      ";
 464static const char * inq_product_rev = "0004";
 465
 
 466static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
 467			   int target_dev_id, int dev_id_num,
 468			   const char * dev_id_str,
 469			   int dev_id_str_len)
 470{
 471	int num, port_a;
 472	char b[32];
 473
 474	port_a = target_dev_id + 1;
 475	/* T10 vendor identifier field format (faked) */
 476	arr[0] = 0x2;	/* ASCII */
 477	arr[1] = 0x1;
 478	arr[2] = 0x0;
 479	memcpy(&arr[4], inq_vendor_id, 8);
 480	memcpy(&arr[12], inq_product_id, 16);
 481	memcpy(&arr[28], dev_id_str, dev_id_str_len);
 482	num = 8 + 16 + dev_id_str_len;
 483	arr[3] = num;
 484	num += 4;
 485	if (dev_id_num >= 0) {
 486		/* NAA-5, Logical unit identifier (binary) */
 487		arr[num++] = 0x1;	/* binary (not necessarily sas) */
 488		arr[num++] = 0x3;	/* PIV=0, lu, naa */
 489		arr[num++] = 0x0;
 490		arr[num++] = 0x8;
 491		arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
 492		arr[num++] = 0x33;
 493		arr[num++] = 0x33;
 494		arr[num++] = 0x30;
 495		arr[num++] = (dev_id_num >> 24);
 496		arr[num++] = (dev_id_num >> 16) & 0xff;
 497		arr[num++] = (dev_id_num >> 8) & 0xff;
 498		arr[num++] = dev_id_num & 0xff;
 499		/* Target relative port number */
 500		arr[num++] = 0x61;	/* proto=sas, binary */
 501		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
 502		arr[num++] = 0x0;	/* reserved */
 503		arr[num++] = 0x4;	/* length */
 504		arr[num++] = 0x0;	/* reserved */
 505		arr[num++] = 0x0;	/* reserved */
 506		arr[num++] = 0x0;
 507		arr[num++] = 0x1;	/* relative port A */
 508	}
 509	/* NAA-5, Target port identifier */
 510	arr[num++] = 0x61;	/* proto=sas, binary */
 511	arr[num++] = 0x93;	/* piv=1, target port, naa */
 512	arr[num++] = 0x0;
 513	arr[num++] = 0x8;
 514	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
 515	arr[num++] = 0x22;
 516	arr[num++] = 0x22;
 517	arr[num++] = 0x20;
 518	arr[num++] = (port_a >> 24);
 519	arr[num++] = (port_a >> 16) & 0xff;
 520	arr[num++] = (port_a >> 8) & 0xff;
 521	arr[num++] = port_a & 0xff;
 522	/* NAA-5, Target port group identifier */
 523	arr[num++] = 0x61;	/* proto=sas, binary */
 524	arr[num++] = 0x95;	/* piv=1, target port group id */
 525	arr[num++] = 0x0;
 526	arr[num++] = 0x4;
 527	arr[num++] = 0;
 528	arr[num++] = 0;
 529	arr[num++] = (port_group_id >> 8) & 0xff;
 530	arr[num++] = port_group_id & 0xff;
 531	/* NAA-5, Target device identifier */
 532	arr[num++] = 0x61;	/* proto=sas, binary */
 533	arr[num++] = 0xa3;	/* piv=1, target device, naa */
 534	arr[num++] = 0x0;
 535	arr[num++] = 0x8;
 536	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
 537	arr[num++] = 0x22;
 538	arr[num++] = 0x22;
 539	arr[num++] = 0x20;
 540	arr[num++] = (target_dev_id >> 24);
 541	arr[num++] = (target_dev_id >> 16) & 0xff;
 542	arr[num++] = (target_dev_id >> 8) & 0xff;
 543	arr[num++] = target_dev_id & 0xff;
 544	/* SCSI name string: Target device identifier */
 545	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
 546	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
 547	arr[num++] = 0x0;
 548	arr[num++] = 24;
 549	memcpy(arr + num, "naa.52222220", 12);
 550	num += 12;
 551	snprintf(b, sizeof(b), "%08X", target_dev_id);
 552	memcpy(arr + num, b, 8);
 553	num += 8;
 554	memset(arr + num, 0, 4);
 555	num += 4;
 556	return num;
 557}
 558
 559
 560static unsigned char vpd84_data[] = {
 561/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
 562    0x22,0x22,0x22,0x0,0xbb,0x1,
 563    0x22,0x22,0x22,0x0,0xbb,0x2,
 564};
 565
 
 566static int inquiry_evpd_84(unsigned char * arr)
 567{
 568	memcpy(arr, vpd84_data, sizeof(vpd84_data));
 569	return sizeof(vpd84_data);
 570}
 571
 
 572static int inquiry_evpd_85(unsigned char * arr)
 573{
 574	int num = 0;
 575	const char * na1 = "https://www.kernel.org/config";
 576	const char * na2 = "http://www.kernel.org/log";
 577	int plen, olen;
 578
 579	arr[num++] = 0x1;	/* lu, storage config */
 580	arr[num++] = 0x0;	/* reserved */
 581	arr[num++] = 0x0;
 582	olen = strlen(na1);
 583	plen = olen + 1;
 584	if (plen % 4)
 585		plen = ((plen / 4) + 1) * 4;
 586	arr[num++] = plen;	/* length, null termianted, padded */
 587	memcpy(arr + num, na1, olen);
 588	memset(arr + num + olen, 0, plen - olen);
 589	num += plen;
 590
 591	arr[num++] = 0x4;	/* lu, logging */
 592	arr[num++] = 0x0;	/* reserved */
 593	arr[num++] = 0x0;
 594	olen = strlen(na2);
 595	plen = olen + 1;
 596	if (plen % 4)
 597		plen = ((plen / 4) + 1) * 4;
 598	arr[num++] = plen;	/* length, null terminated, padded */
 599	memcpy(arr + num, na2, olen);
 600	memset(arr + num + olen, 0, plen - olen);
 601	num += plen;
 602
 603	return num;
 604}
 605
 606/* SCSI ports VPD page */
 607static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
 608{
 609	int num = 0;
 610	int port_a, port_b;
 611
 612	port_a = target_dev_id + 1;
 613	port_b = port_a + 1;
 614	arr[num++] = 0x0;	/* reserved */
 615	arr[num++] = 0x0;	/* reserved */
 616	arr[num++] = 0x0;
 617	arr[num++] = 0x1;	/* relative port 1 (primary) */
 618	memset(arr + num, 0, 6);
 619	num += 6;
 620	arr[num++] = 0x0;
 621	arr[num++] = 12;	/* length tp descriptor */
 622	/* naa-5 target port identifier (A) */
 623	arr[num++] = 0x61;	/* proto=sas, binary */
 624	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
 625	arr[num++] = 0x0;	/* reserved */
 626	arr[num++] = 0x8;	/* length */
 627	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
 628	arr[num++] = 0x22;
 629	arr[num++] = 0x22;
 630	arr[num++] = 0x20;
 631	arr[num++] = (port_a >> 24);
 632	arr[num++] = (port_a >> 16) & 0xff;
 633	arr[num++] = (port_a >> 8) & 0xff;
 634	arr[num++] = port_a & 0xff;
 635
 636	arr[num++] = 0x0;	/* reserved */
 637	arr[num++] = 0x0;	/* reserved */
 638	arr[num++] = 0x0;
 639	arr[num++] = 0x2;	/* relative port 2 (secondary) */
 640	memset(arr + num, 0, 6);
 641	num += 6;
 642	arr[num++] = 0x0;
 643	arr[num++] = 12;	/* length tp descriptor */
 644	/* naa-5 target port identifier (B) */
 645	arr[num++] = 0x61;	/* proto=sas, binary */
 646	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
 647	arr[num++] = 0x0;	/* reserved */
 648	arr[num++] = 0x8;	/* length */
 649	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
 650	arr[num++] = 0x22;
 651	arr[num++] = 0x22;
 652	arr[num++] = 0x20;
 653	arr[num++] = (port_b >> 24);
 654	arr[num++] = (port_b >> 16) & 0xff;
 655	arr[num++] = (port_b >> 8) & 0xff;
 656	arr[num++] = port_b & 0xff;
 657
 658	return num;
 659}
 660
 661
 662static unsigned char vpd89_data[] = {
 663/* from 4th byte */ 0,0,0,0,
 664'l','i','n','u','x',' ',' ',' ',
 665'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
 666'1','2','3','4',
 6670x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
 6680xec,0,0,0,
 6690x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
 6700,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
 6710x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
 6720x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
 6730x53,0x41,
 6740x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
 6750x20,0x20,
 6760x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
 6770x10,0x80,
 6780,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
 6790x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
 6800x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
 6810,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
 6820x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
 6830x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
 6840,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
 6850,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6870,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6880x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
 6890,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
 6900xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
 6910,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
 6920,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6930,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6940,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6950,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6960,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6970,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6980,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6990,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 7000,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 7010,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 7020,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 7030,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
 704};
 705
 
 706static int inquiry_evpd_89(unsigned char * arr)
 707{
 708	memcpy(arr, vpd89_data, sizeof(vpd89_data));
 709	return sizeof(vpd89_data);
 710}
 711
 712
 713/* Block limits VPD page (SBC-3) */
 714static unsigned char vpdb0_data[] = {
 715	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
 716	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 717	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 718	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 719};
 720
 
 721static int inquiry_evpd_b0(unsigned char * arr)
 722{
 723	unsigned int gran;
 724
 725	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
 726
 727	/* Optimal transfer length granularity */
 728	gran = 1 << scsi_debug_physblk_exp;
 729	arr[2] = (gran >> 8) & 0xff;
 730	arr[3] = gran & 0xff;
 731
 732	/* Maximum Transfer Length */
 733	if (sdebug_store_sectors > 0x400) {
 734		arr[4] = (sdebug_store_sectors >> 24) & 0xff;
 735		arr[5] = (sdebug_store_sectors >> 16) & 0xff;
 736		arr[6] = (sdebug_store_sectors >> 8) & 0xff;
 737		arr[7] = sdebug_store_sectors & 0xff;
 738	}
 739
 740	/* Optimal Transfer Length */
 741	put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
 742
 743	if (scsi_debug_lbpu) {
 744		/* Maximum Unmap LBA Count */
 745		put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
 746
 747		/* Maximum Unmap Block Descriptor Count */
 748		put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
 749	}
 750
 751	/* Unmap Granularity Alignment */
 752	if (scsi_debug_unmap_alignment) {
 753		put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
 754		arr[28] |= 0x80; /* UGAVALID */
 755	}
 756
 757	/* Optimal Unmap Granularity */
 758	put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
 759
 760	/* Maximum WRITE SAME Length */
 761	put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
 762
 763	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
 764
 765	return sizeof(vpdb0_data);
 766}
 767
 768/* Block device characteristics VPD page (SBC-3) */
 769static int inquiry_evpd_b1(unsigned char *arr)
 770{
 771	memset(arr, 0, 0x3c);
 772	arr[0] = 0;
 773	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
 774	arr[2] = 0;
 775	arr[3] = 5;	/* less than 1.8" */
 776
 777	return 0x3c;
 778}
 779
 780/* Logical block provisioning VPD page (SBC-3) */
 781static int inquiry_evpd_b2(unsigned char *arr)
 782{
 783	memset(arr, 0, 0x4);
 784	arr[0] = 0;			/* threshold exponent */
 785
 786	if (scsi_debug_lbpu)
 787		arr[1] = 1 << 7;
 788
 789	if (scsi_debug_lbpws)
 790		arr[1] |= 1 << 6;
 791
 792	if (scsi_debug_lbpws10)
 793		arr[1] |= 1 << 5;
 794
 795	if (scsi_debug_lbprz)
 796		arr[1] |= 1 << 2;
 797
 798	return 0x4;
 799}
 800
 801#define SDEBUG_LONG_INQ_SZ 96
 802#define SDEBUG_MAX_INQ_ARR_SZ 584
 803
 804static int resp_inquiry(struct scsi_cmnd * scp, int target,
 805			struct sdebug_dev_info * devip)
 806{
 807	unsigned char pq_pdt;
 808	unsigned char * arr;
 809	unsigned char *cmd = (unsigned char *)scp->cmnd;
 810	int alloc_len, n, ret;
 
 811
 812	alloc_len = (cmd[3] << 8) + cmd[4];
 813	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
 814	if (! arr)
 815		return DID_REQUEUE << 16;
 816	if (devip->wlun)
 
 817		pq_pdt = 0x1e;	/* present, wlun */
 818	else if (scsi_debug_no_lun_0 && (0 == devip->lun))
 819		pq_pdt = 0x7f;	/* not present, no device type */
 820	else
 821		pq_pdt = (scsi_debug_ptype & 0x1f);
 822	arr[0] = pq_pdt;
 823	if (0x2 & cmd[1]) {  /* CMDDT bit set */
 824		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
 825			       	0);
 826		kfree(arr);
 827		return check_condition_result;
 828	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
 829		int lu_id_num, port_group_id, target_dev_id, len;
 830		char lu_id_str[6];
 831		int host_no = devip->sdbg_host->shost->host_no;
 832		
 833		port_group_id = (((host_no + 1) & 0x7f) << 8) +
 834		    (devip->channel & 0x7f);
 835		if (0 == scsi_debug_vpd_use_hostno)
 836			host_no = 0;
 837		lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
 838			    (devip->target * 1000) + devip->lun);
 839		target_dev_id = ((host_no + 1) * 2000) +
 840				 (devip->target * 1000) - 3;
 841		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
 842		if (0 == cmd[2]) { /* supported vital product data pages */
 843			arr[1] = cmd[2];	/*sanity */
 844			n = 4;
 845			arr[n++] = 0x0;   /* this page */
 846			arr[n++] = 0x80;  /* unit serial number */
 847			arr[n++] = 0x83;  /* device identification */
 848			arr[n++] = 0x84;  /* software interface ident. */
 849			arr[n++] = 0x85;  /* management network addresses */
 850			arr[n++] = 0x86;  /* extended inquiry */
 851			arr[n++] = 0x87;  /* mode page policy */
 852			arr[n++] = 0x88;  /* SCSI ports */
 853			arr[n++] = 0x89;  /* ATA information */
 854			arr[n++] = 0xb0;  /* Block limits (SBC) */
 855			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
 856			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
 857				arr[n++] = 0xb2;
 858			arr[3] = n - 4;	  /* number of supported VPD pages */
 859		} else if (0x80 == cmd[2]) { /* unit serial number */
 860			arr[1] = cmd[2];	/*sanity */
 861			arr[3] = len;
 862			memcpy(&arr[4], lu_id_str, len);
 863		} else if (0x83 == cmd[2]) { /* device identification */
 864			arr[1] = cmd[2];	/*sanity */
 865			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
 866						 target_dev_id, lu_id_num,
 867						 lu_id_str, len);
 868		} else if (0x84 == cmd[2]) { /* Software interface ident. */
 869			arr[1] = cmd[2];	/*sanity */
 870			arr[3] = inquiry_evpd_84(&arr[4]);
 871		} else if (0x85 == cmd[2]) { /* Management network addresses */
 872			arr[1] = cmd[2];	/*sanity */
 873			arr[3] = inquiry_evpd_85(&arr[4]);
 874		} else if (0x86 == cmd[2]) { /* extended inquiry */
 875			arr[1] = cmd[2];	/*sanity */
 876			arr[3] = 0x3c;	/* number of following entries */
 877			if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
 878				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
 879			else if (scsi_debug_dif)
 880				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
 881			else
 882				arr[4] = 0x0;   /* no protection stuff */
 883			arr[5] = 0x7;   /* head of q, ordered + simple q's */
 884		} else if (0x87 == cmd[2]) { /* mode page policy */
 885			arr[1] = cmd[2];	/*sanity */
 886			arr[3] = 0x8;	/* number of following entries */
 887			arr[4] = 0x2;	/* disconnect-reconnect mp */
 888			arr[6] = 0x80;	/* mlus, shared */
 889			arr[8] = 0x18;	 /* protocol specific lu */
 890			arr[10] = 0x82;	 /* mlus, per initiator port */
 891		} else if (0x88 == cmd[2]) { /* SCSI Ports */
 892			arr[1] = cmd[2];	/*sanity */
 893			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
 894		} else if (0x89 == cmd[2]) { /* ATA information */
 895			arr[1] = cmd[2];        /*sanity */
 896			n = inquiry_evpd_89(&arr[4]);
 897			arr[2] = (n >> 8);
 898			arr[3] = (n & 0xff);
 899		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
 900			arr[1] = cmd[2];        /*sanity */
 901			arr[3] = inquiry_evpd_b0(&arr[4]);
 902		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
 903			arr[1] = cmd[2];        /*sanity */
 904			arr[3] = inquiry_evpd_b1(&arr[4]);
 905		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
 906			arr[1] = cmd[2];        /*sanity */
 907			arr[3] = inquiry_evpd_b2(&arr[4]);
 908		} else {
 909			/* Illegal request, invalid field in cdb */
 910			mk_sense_buffer(devip, ILLEGAL_REQUEST,
 911					INVALID_FIELD_IN_CDB, 0);
 912			kfree(arr);
 913			return check_condition_result;
 914		}
 915		len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
 916		ret = fill_from_dev_buffer(scp, arr,
 917			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
 918		kfree(arr);
 919		return ret;
 920	}
 921	/* drops through here for a standard inquiry */
 922	arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0;	/* Removable disk */
 923	arr[2] = scsi_debug_scsi_level;
 924	arr[3] = 2;    /* response_data_format==2 */
 925	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
 926	arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
 927	if (0 == scsi_debug_vpd_use_hostno)
 928		arr[5] = 0x10; /* claim: implicit TGPS */
 929	arr[6] = 0x10; /* claim: MultiP */
 930	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
 931	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
 932	memcpy(&arr[8], inq_vendor_id, 8);
 933	memcpy(&arr[16], inq_product_id, 16);
 934	memcpy(&arr[32], inq_product_rev, 4);
 935	/* version descriptors (2 bytes each) follow */
 936	arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
 937	arr[60] = 0x3; arr[61] = 0x14;  /* SPC-3 ANSI */
 938	n = 62;
 939	if (scsi_debug_ptype == 0) {
 940		arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
 941	} else if (scsi_debug_ptype == 1) {
 942		arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
 943	}
 944	arr[n++] = 0xc; arr[n++] = 0xf;  /* SAS-1.1 rev 10 */
 945	ret = fill_from_dev_buffer(scp, arr,
 946			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
 947	kfree(arr);
 948	return ret;
 949}
 950
 951static int resp_requests(struct scsi_cmnd * scp,
 952			 struct sdebug_dev_info * devip)
 953{
 954	unsigned char * sbuff;
 955	unsigned char *cmd = (unsigned char *)scp->cmnd;
 956	unsigned char arr[SDEBUG_SENSE_LEN];
 957	int want_dsense;
 958	int len = 18;
 959
 960	memset(arr, 0, sizeof(arr));
 961	if (devip->reset == 1)
 962		mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
 963	want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
 964	sbuff = devip->sense_buff;
 965	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
 966		if (want_dsense) {
 967			arr[0] = 0x72;
 968			arr[1] = 0x0;		/* NO_SENSE in sense_key */
 969			arr[2] = THRESHOLD_EXCEEDED;
 970			arr[3] = 0xff;		/* TEST set and MRIE==6 */
 
 971		} else {
 972			arr[0] = 0x70;
 973			arr[2] = 0x0;		/* NO_SENSE in sense_key */
 974			arr[7] = 0xa;   	/* 18 byte sense buffer */
 975			arr[12] = THRESHOLD_EXCEEDED;
 976			arr[13] = 0xff;		/* TEST set and MRIE==6 */
 977		}
 978	} else {
 979		memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
 980		if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
 981			/* DESC bit set and sense_buff in fixed format */
 982			memset(arr, 0, sizeof(arr));
 
 
 
 
 
 
 
 
 
 
 
 983			arr[0] = 0x72;
 984			arr[1] = sbuff[2];     /* sense key */
 985			arr[2] = sbuff[12];    /* asc */
 986			arr[3] = sbuff[13];    /* ascq */
 987			len = 8;
 
 
 
 
 
 
 
 988		}
 
 989	}
 990	mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
 991	return fill_from_dev_buffer(scp, arr, len);
 992}
 993
 994static int resp_start_stop(struct scsi_cmnd * scp,
 995			   struct sdebug_dev_info * devip)
 996{
 997	unsigned char *cmd = (unsigned char *)scp->cmnd;
 998	int power_cond, errsts, start;
 999
1000	if ((errsts = check_readiness(scp, 1, devip)))
1001		return errsts;
1002	power_cond = (cmd[4] & 0xf0) >> 4;
1003	if (power_cond) {
1004		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1005			       	0);
1006		return check_condition_result;
1007	}
1008	start = cmd[4] & 1;
1009	if (start == devip->stopped)
1010		devip->stopped = !start;
1011	return 0;
1012}
1013
1014static sector_t get_sdebug_capacity(void)
1015{
1016	if (scsi_debug_virtual_gb > 0)
1017		return (sector_t)scsi_debug_virtual_gb *
1018			(1073741824 / scsi_debug_sector_size);
1019	else
1020		return sdebug_store_sectors;
1021}
1022
1023#define SDEBUG_READCAP_ARR_SZ 8
1024static int resp_readcap(struct scsi_cmnd * scp,
1025			struct sdebug_dev_info * devip)
1026{
1027	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1028	unsigned int capac;
1029	int errsts;
1030
1031	if ((errsts = check_readiness(scp, 1, devip)))
1032		return errsts;
1033	/* following just in case virtual_gb changed */
1034	sdebug_capacity = get_sdebug_capacity();
1035	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1036	if (sdebug_capacity < 0xffffffff) {
1037		capac = (unsigned int)sdebug_capacity - 1;
1038		arr[0] = (capac >> 24);
1039		arr[1] = (capac >> 16) & 0xff;
1040		arr[2] = (capac >> 8) & 0xff;
1041		arr[3] = capac & 0xff;
1042	} else {
1043		arr[0] = 0xff;
1044		arr[1] = 0xff;
1045		arr[2] = 0xff;
1046		arr[3] = 0xff;
1047	}
1048	arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1049	arr[7] = scsi_debug_sector_size & 0xff;
1050	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1051}
1052
1053#define SDEBUG_READCAP16_ARR_SZ 32
1054static int resp_readcap16(struct scsi_cmnd * scp,
1055			  struct sdebug_dev_info * devip)
1056{
1057	unsigned char *cmd = (unsigned char *)scp->cmnd;
1058	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1059	unsigned long long capac;
1060	int errsts, k, alloc_len;
1061
1062	if ((errsts = check_readiness(scp, 1, devip)))
1063		return errsts;
1064	alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1065		     + cmd[13]);
1066	/* following just in case virtual_gb changed */
1067	sdebug_capacity = get_sdebug_capacity();
1068	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1069	capac = sdebug_capacity - 1;
1070	for (k = 0; k < 8; ++k, capac >>= 8)
1071		arr[7 - k] = capac & 0xff;
1072	arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1073	arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1074	arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1075	arr[11] = scsi_debug_sector_size & 0xff;
1076	arr[13] = scsi_debug_physblk_exp & 0xf;
1077	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1078
1079	if (scsi_debug_lbp()) {
1080		arr[14] |= 0x80; /* LBPME */
1081		if (scsi_debug_lbprz)
1082			arr[14] |= 0x40; /* LBPRZ */
1083	}
1084
1085	arr[15] = scsi_debug_lowest_aligned & 0xff;
1086
1087	if (scsi_debug_dif) {
1088		arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1089		arr[12] |= 1; /* PROT_EN */
1090	}
1091
1092	return fill_from_dev_buffer(scp, arr,
1093				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1094}
1095
1096#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1097
1098static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1099			      struct sdebug_dev_info * devip)
1100{
1101	unsigned char *cmd = (unsigned char *)scp->cmnd;
1102	unsigned char * arr;
1103	int host_no = devip->sdbg_host->shost->host_no;
1104	int n, ret, alen, rlen;
1105	int port_group_a, port_group_b, port_a, port_b;
1106
1107	alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1108		+ cmd[9]);
1109
1110	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1111	if (! arr)
1112		return DID_REQUEUE << 16;
1113	/*
1114	 * EVPD page 0x88 states we have two ports, one
1115	 * real and a fake port with no device connected.
1116	 * So we create two port groups with one port each
1117	 * and set the group with port B to unavailable.
1118	 */
1119	port_a = 0x1; /* relative port A */
1120	port_b = 0x2; /* relative port B */
1121	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1122	    (devip->channel & 0x7f);
1123	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1124	    (devip->channel & 0x7f) + 0x80;
1125
1126	/*
1127	 * The asymmetric access state is cycled according to the host_id.
1128	 */
1129	n = 4;
1130	if (0 == scsi_debug_vpd_use_hostno) {
1131	    arr[n++] = host_no % 3; /* Asymm access state */
1132	    arr[n++] = 0x0F; /* claim: all states are supported */
1133	} else {
1134	    arr[n++] = 0x0; /* Active/Optimized path */
1135	    arr[n++] = 0x01; /* claim: only support active/optimized paths */
1136	}
1137	arr[n++] = (port_group_a >> 8) & 0xff;
1138	arr[n++] = port_group_a & 0xff;
1139	arr[n++] = 0;    /* Reserved */
1140	arr[n++] = 0;    /* Status code */
1141	arr[n++] = 0;    /* Vendor unique */
1142	arr[n++] = 0x1;  /* One port per group */
1143	arr[n++] = 0;    /* Reserved */
1144	arr[n++] = 0;    /* Reserved */
1145	arr[n++] = (port_a >> 8) & 0xff;
1146	arr[n++] = port_a & 0xff;
1147	arr[n++] = 3;    /* Port unavailable */
1148	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1149	arr[n++] = (port_group_b >> 8) & 0xff;
1150	arr[n++] = port_group_b & 0xff;
1151	arr[n++] = 0;    /* Reserved */
1152	arr[n++] = 0;    /* Status code */
1153	arr[n++] = 0;    /* Vendor unique */
1154	arr[n++] = 0x1;  /* One port per group */
1155	arr[n++] = 0;    /* Reserved */
1156	arr[n++] = 0;    /* Reserved */
1157	arr[n++] = (port_b >> 8) & 0xff;
1158	arr[n++] = port_b & 0xff;
1159
1160	rlen = n - 4;
1161	arr[0] = (rlen >> 24) & 0xff;
1162	arr[1] = (rlen >> 16) & 0xff;
1163	arr[2] = (rlen >> 8) & 0xff;
1164	arr[3] = rlen & 0xff;
1165
1166	/*
1167	 * Return the smallest value of either
1168	 * - The allocated length
1169	 * - The constructed command length
1170	 * - The maximum array size
1171	 */
1172	rlen = min(alen,n);
1173	ret = fill_from_dev_buffer(scp, arr,
1174				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1175	kfree(arr);
1176	return ret;
1177}
1178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1179/* <<Following mode page info copied from ST318451LW>> */
1180
1181static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1182{	/* Read-Write Error Recovery page for mode_sense */
1183	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1184					5, 0, 0xff, 0xff};
1185
1186	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1187	if (1 == pcontrol)
1188		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1189	return sizeof(err_recov_pg);
1190}
1191
1192static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1193{ 	/* Disconnect-Reconnect page for mode_sense */
1194	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1195					 0, 0, 0, 0, 0, 0, 0, 0};
1196
1197	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1198	if (1 == pcontrol)
1199		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1200	return sizeof(disconnect_pg);
1201}
1202
1203static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1204{       /* Format device page for mode_sense */
1205	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1206				     0, 0, 0, 0, 0, 0, 0, 0,
1207				     0, 0, 0, 0, 0x40, 0, 0, 0};
1208
1209	memcpy(p, format_pg, sizeof(format_pg));
1210	p[10] = (sdebug_sectors_per >> 8) & 0xff;
1211	p[11] = sdebug_sectors_per & 0xff;
1212	p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1213	p[13] = scsi_debug_sector_size & 0xff;
1214	if (DEV_REMOVEABLE(target))
1215		p[20] |= 0x20; /* should agree with INQUIRY */
1216	if (1 == pcontrol)
1217		memset(p + 2, 0, sizeof(format_pg) - 2);
1218	return sizeof(format_pg);
1219}
1220
1221static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1222{ 	/* Caching page for mode_sense */
1223	unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
 
 
1224		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1225
 
 
1226	memcpy(p, caching_pg, sizeof(caching_pg));
1227	if (1 == pcontrol)
1228		memset(p + 2, 0, sizeof(caching_pg) - 2);
 
 
1229	return sizeof(caching_pg);
1230}
1231
1232static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1233{ 	/* Control mode page for mode_sense */
1234	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1235				        0, 0, 0, 0};
1236	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1237				     0, 0, 0x2, 0x4b};
1238
1239	if (scsi_debug_dsense)
1240		ctrl_m_pg[2] |= 0x4;
1241	else
1242		ctrl_m_pg[2] &= ~0x4;
1243
1244	if (scsi_debug_ato)
1245		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1246
1247	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1248	if (1 == pcontrol)
1249		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1250	else if (2 == pcontrol)
1251		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1252	return sizeof(ctrl_m_pg);
1253}
1254
1255
1256static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1257{	/* Informational Exceptions control mode page for mode_sense */
1258	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1259				       0, 0, 0x0, 0x0};
1260	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1261				      0, 0, 0x0, 0x0};
1262
1263	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1264	if (1 == pcontrol)
1265		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1266	else if (2 == pcontrol)
1267		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1268	return sizeof(iec_m_pg);
1269}
1270
1271static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1272{	/* SAS SSP mode page - short format for mode_sense */
1273	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1274		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1275
1276	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1277	if (1 == pcontrol)
1278		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1279	return sizeof(sas_sf_m_pg);
1280}
1281
1282
1283static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1284			      int target_dev_id)
1285{	/* SAS phy control and discover mode page for mode_sense */
1286	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1287		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1288		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1289		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1290		    0x2, 0, 0, 0, 0, 0, 0, 0,
1291		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1292		    0, 0, 0, 0, 0, 0, 0, 0,
1293		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1294		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1295		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1296		    0x3, 0, 0, 0, 0, 0, 0, 0,
1297		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1298		    0, 0, 0, 0, 0, 0, 0, 0,
1299		};
1300	int port_a, port_b;
1301
1302	port_a = target_dev_id + 1;
1303	port_b = port_a + 1;
1304	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1305	p[20] = (port_a >> 24);
1306	p[21] = (port_a >> 16) & 0xff;
1307	p[22] = (port_a >> 8) & 0xff;
1308	p[23] = port_a & 0xff;
1309	p[48 + 20] = (port_b >> 24);
1310	p[48 + 21] = (port_b >> 16) & 0xff;
1311	p[48 + 22] = (port_b >> 8) & 0xff;
1312	p[48 + 23] = port_b & 0xff;
1313	if (1 == pcontrol)
1314		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1315	return sizeof(sas_pcd_m_pg);
1316}
1317
1318static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1319{	/* SAS SSP shared protocol specific port mode subpage */
1320	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1321		    0, 0, 0, 0, 0, 0, 0, 0,
1322		};
1323
1324	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1325	if (1 == pcontrol)
1326		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1327	return sizeof(sas_sha_m_pg);
1328}
1329
1330#define SDEBUG_MAX_MSENSE_SZ 256
1331
1332static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1333			   struct sdebug_dev_info * devip)
1334{
1335	unsigned char dbd, llbaa;
1336	int pcontrol, pcode, subpcode, bd_len;
1337	unsigned char dev_spec;
1338	int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
 
1339	unsigned char * ap;
1340	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1341	unsigned char *cmd = (unsigned char *)scp->cmnd;
1342
1343	if ((errsts = check_readiness(scp, 1, devip)))
1344		return errsts;
1345	dbd = !!(cmd[1] & 0x8);
1346	pcontrol = (cmd[2] & 0xc0) >> 6;
1347	pcode = cmd[2] & 0x3f;
1348	subpcode = cmd[3];
1349	msense_6 = (MODE_SENSE == cmd[0]);
1350	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1351	if ((0 == scsi_debug_ptype) && (0 == dbd))
1352		bd_len = llbaa ? 16 : 8;
1353	else
1354		bd_len = 0;
1355	alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1356	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1357	if (0x3 == pcontrol) {  /* Saving values not supported */
1358		mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1359			       	0);
1360		return check_condition_result;
1361	}
1362	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1363			(devip->target * 1000) - 3;
1364	/* set DPOFUA bit for disks */
1365	if (0 == scsi_debug_ptype)
1366		dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1367	else
1368		dev_spec = 0x0;
1369	if (msense_6) {
1370		arr[2] = dev_spec;
1371		arr[3] = bd_len;
1372		offset = 4;
1373	} else {
1374		arr[3] = dev_spec;
1375		if (16 == bd_len)
1376			arr[4] = 0x1;	/* set LONGLBA bit */
1377		arr[7] = bd_len;	/* assume 255 or less */
1378		offset = 8;
1379	}
1380	ap = arr + offset;
1381	if ((bd_len > 0) && (!sdebug_capacity))
1382		sdebug_capacity = get_sdebug_capacity();
1383
1384	if (8 == bd_len) {
1385		if (sdebug_capacity > 0xfffffffe) {
1386			ap[0] = 0xff;
1387			ap[1] = 0xff;
1388			ap[2] = 0xff;
1389			ap[3] = 0xff;
1390		} else {
1391			ap[0] = (sdebug_capacity >> 24) & 0xff;
1392			ap[1] = (sdebug_capacity >> 16) & 0xff;
1393			ap[2] = (sdebug_capacity >> 8) & 0xff;
1394			ap[3] = sdebug_capacity & 0xff;
1395		}
1396		ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1397		ap[7] = scsi_debug_sector_size & 0xff;
1398		offset += bd_len;
1399		ap = arr + offset;
1400	} else if (16 == bd_len) {
1401		unsigned long long capac = sdebug_capacity;
1402
1403        	for (k = 0; k < 8; ++k, capac >>= 8)
1404                	ap[7 - k] = capac & 0xff;
1405		ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1406		ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1407		ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1408		ap[15] = scsi_debug_sector_size & 0xff;
1409		offset += bd_len;
1410		ap = arr + offset;
1411	}
1412
1413	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1414		/* TODO: Control Extension page */
1415		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1416			       	0);
1417		return check_condition_result;
1418	}
1419	switch (pcode) {
1420	case 0x1:	/* Read-Write error recovery page, direct access */
1421		len = resp_err_recov_pg(ap, pcontrol, target);
1422		offset += len;
1423		break;
1424	case 0x2:	/* Disconnect-Reconnect page, all devices */
1425		len = resp_disconnect_pg(ap, pcontrol, target);
1426		offset += len;
1427		break;
1428        case 0x3:       /* Format device page, direct access */
1429                len = resp_format_pg(ap, pcontrol, target);
1430                offset += len;
1431                break;
1432	case 0x8:	/* Caching page, direct access */
1433		len = resp_caching_pg(ap, pcontrol, target);
1434		offset += len;
1435		break;
1436	case 0xa:	/* Control Mode page, all devices */
1437		len = resp_ctrl_m_pg(ap, pcontrol, target);
1438		offset += len;
1439		break;
1440	case 0x19:	/* if spc==1 then sas phy, control+discover */
1441		if ((subpcode > 0x2) && (subpcode < 0xff)) {
1442		        mk_sense_buffer(devip, ILLEGAL_REQUEST,
1443					INVALID_FIELD_IN_CDB, 0);
1444			return check_condition_result;
1445	        }
1446		len = 0;
1447		if ((0x0 == subpcode) || (0xff == subpcode))
1448			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1449		if ((0x1 == subpcode) || (0xff == subpcode))
1450			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1451						  target_dev_id);
1452		if ((0x2 == subpcode) || (0xff == subpcode))
1453			len += resp_sas_sha_m_spg(ap + len, pcontrol);
1454		offset += len;
1455		break;
1456	case 0x1c:	/* Informational Exceptions Mode page, all devices */
1457		len = resp_iec_m_pg(ap, pcontrol, target);
1458		offset += len;
1459		break;
1460	case 0x3f:	/* Read all Mode pages */
1461		if ((0 == subpcode) || (0xff == subpcode)) {
1462			len = resp_err_recov_pg(ap, pcontrol, target);
1463			len += resp_disconnect_pg(ap + len, pcontrol, target);
1464			len += resp_format_pg(ap + len, pcontrol, target);
1465			len += resp_caching_pg(ap + len, pcontrol, target);
1466			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1467			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1468			if (0xff == subpcode) {
1469				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1470						  target, target_dev_id);
1471				len += resp_sas_sha_m_spg(ap + len, pcontrol);
1472			}
1473			len += resp_iec_m_pg(ap + len, pcontrol, target);
1474		} else {
1475			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1476					INVALID_FIELD_IN_CDB, 0);
1477			return check_condition_result;
1478                }
1479		offset += len;
1480		break;
1481	default:
1482		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1483			       	0);
1484		return check_condition_result;
1485	}
1486	if (msense_6)
1487		arr[0] = offset - 1;
1488	else {
1489		arr[0] = ((offset - 2) >> 8) & 0xff;
1490		arr[1] = (offset - 2) & 0xff;
1491	}
1492	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1493}
1494
1495#define SDEBUG_MAX_MSELECT_SZ 512
1496
1497static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1498			    struct sdebug_dev_info * devip)
1499{
1500	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1501	int param_len, res, errsts, mpage;
1502	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1503	unsigned char *cmd = (unsigned char *)scp->cmnd;
 
1504
1505	if ((errsts = check_readiness(scp, 1, devip)))
1506		return errsts;
1507	memset(arr, 0, sizeof(arr));
1508	pf = cmd[1] & 0x10;
1509	sp = cmd[1] & 0x1;
1510	param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1511	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1512		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1513				INVALID_FIELD_IN_CDB, 0);
1514		return check_condition_result;
1515	}
1516        res = fetch_to_dev_buffer(scp, arr, param_len);
1517        if (-1 == res)
1518                return (DID_ERROR << 16);
1519        else if ((res < param_len) &&
1520                 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1521                printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1522                       " IO sent=%d bytes\n", param_len, res);
 
1523	md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1524	bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1525	if (md_len > 2) {
1526		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1527				INVALID_FIELD_IN_PARAM_LIST, 0);
1528		return check_condition_result;
1529	}
1530	off = bd_len + (mselect6 ? 4 : 8);
1531	mpage = arr[off] & 0x3f;
1532	ps = !!(arr[off] & 0x80);
1533	if (ps) {
1534		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1535				INVALID_FIELD_IN_PARAM_LIST, 0);
1536		return check_condition_result;
1537	}
1538	spf = !!(arr[off] & 0x40);
1539	pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1540		       (arr[off + 1] + 2);
1541	if ((pg_len + off) > param_len) {
1542		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1543				PARAMETER_LIST_LENGTH_ERR, 0);
1544		return check_condition_result;
1545	}
1546	switch (mpage) {
 
 
 
 
 
 
 
1547	case 0xa:      /* Control Mode page */
1548		if (ctrl_m_pg[1] == arr[off + 1]) {
1549			memcpy(ctrl_m_pg + 2, arr + off + 2,
1550			       sizeof(ctrl_m_pg) - 2);
1551			scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1552			return 0;
1553		}
1554		break;
1555	case 0x1c:      /* Informational Exceptions Mode page */
1556		if (iec_m_pg[1] == arr[off + 1]) {
1557			memcpy(iec_m_pg + 2, arr + off + 2,
1558			       sizeof(iec_m_pg) - 2);
1559			return 0;
1560		}
1561		break;
1562	default:
1563		break;
1564	}
1565	mk_sense_buffer(devip, ILLEGAL_REQUEST,
1566			INVALID_FIELD_IN_PARAM_LIST, 0);
1567	return check_condition_result;
 
 
 
1568}
1569
1570static int resp_temp_l_pg(unsigned char * arr)
1571{
1572	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1573				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
1574		};
1575
1576        memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1577        return sizeof(temp_l_pg);
1578}
1579
1580static int resp_ie_l_pg(unsigned char * arr)
1581{
1582	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1583		};
1584
1585        memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1586	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
1587		arr[4] = THRESHOLD_EXCEEDED;
1588		arr[5] = 0xff;
1589	}
1590        return sizeof(ie_l_pg);
1591}
1592
1593#define SDEBUG_MAX_LSENSE_SZ 512
1594
1595static int resp_log_sense(struct scsi_cmnd * scp,
1596                          struct sdebug_dev_info * devip)
1597{
1598	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1599	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1600	unsigned char *cmd = (unsigned char *)scp->cmnd;
1601
1602	if ((errsts = check_readiness(scp, 1, devip)))
1603		return errsts;
1604	memset(arr, 0, sizeof(arr));
1605	ppc = cmd[1] & 0x2;
1606	sp = cmd[1] & 0x1;
1607	if (ppc || sp) {
1608		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1609				INVALID_FIELD_IN_CDB, 0);
1610		return check_condition_result;
1611	}
1612	pcontrol = (cmd[2] & 0xc0) >> 6;
1613	pcode = cmd[2] & 0x3f;
1614	subpcode = cmd[3] & 0xff;
1615	alloc_len = (cmd[7] << 8) + cmd[8];
1616	arr[0] = pcode;
1617	if (0 == subpcode) {
1618		switch (pcode) {
1619		case 0x0:	/* Supported log pages log page */
1620			n = 4;
1621			arr[n++] = 0x0;		/* this page */
1622			arr[n++] = 0xd;		/* Temperature */
1623			arr[n++] = 0x2f;	/* Informational exceptions */
1624			arr[3] = n - 4;
1625			break;
1626		case 0xd:	/* Temperature log page */
1627			arr[3] = resp_temp_l_pg(arr + 4);
1628			break;
1629		case 0x2f:	/* Informational exceptions log page */
1630			arr[3] = resp_ie_l_pg(arr + 4);
1631			break;
1632		default:
1633			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1634					INVALID_FIELD_IN_CDB, 0);
1635			return check_condition_result;
1636		}
1637	} else if (0xff == subpcode) {
1638		arr[0] |= 0x40;
1639		arr[1] = subpcode;
1640		switch (pcode) {
1641		case 0x0:	/* Supported log pages and subpages log page */
1642			n = 4;
1643			arr[n++] = 0x0;
1644			arr[n++] = 0x0;		/* 0,0 page */
1645			arr[n++] = 0x0;
1646			arr[n++] = 0xff;	/* this page */
1647			arr[n++] = 0xd;
1648			arr[n++] = 0x0;		/* Temperature */
1649			arr[n++] = 0x2f;
1650			arr[n++] = 0x0;	/* Informational exceptions */
1651			arr[3] = n - 4;
1652			break;
1653		case 0xd:	/* Temperature subpages */
1654			n = 4;
1655			arr[n++] = 0xd;
1656			arr[n++] = 0x0;		/* Temperature */
1657			arr[3] = n - 4;
1658			break;
1659		case 0x2f:	/* Informational exceptions subpages */
1660			n = 4;
1661			arr[n++] = 0x2f;
1662			arr[n++] = 0x0;		/* Informational exceptions */
1663			arr[3] = n - 4;
1664			break;
1665		default:
1666			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1667					INVALID_FIELD_IN_CDB, 0);
1668			return check_condition_result;
1669		}
1670	} else {
1671		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1672				INVALID_FIELD_IN_CDB, 0);
1673		return check_condition_result;
1674	}
1675	len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1676	return fill_from_dev_buffer(scp, arr,
1677		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1678}
1679
1680static int check_device_access_params(struct sdebug_dev_info *devi,
1681				      unsigned long long lba, unsigned int num)
1682{
1683	if (lba + num > sdebug_capacity) {
1684		mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1685		return check_condition_result;
1686	}
1687	/* transfer length excessive (tie in to block limits VPD page) */
1688	if (num > sdebug_store_sectors) {
1689		mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
 
1690		return check_condition_result;
1691	}
1692	return 0;
1693}
1694
1695static int do_device_access(struct scsi_cmnd *scmd,
1696			    struct sdebug_dev_info *devi,
1697			    unsigned long long lba, unsigned int num, int write)
1698{
1699	int ret;
1700	unsigned long long block, rest = 0;
1701	int (*func)(struct scsi_cmnd *, unsigned char *, int);
 
 
 
 
 
 
 
 
 
1702
1703	func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
 
 
 
1704
1705	block = do_div(lba, sdebug_store_sectors);
1706	if (block + num > sdebug_store_sectors)
1707		rest = block + num - sdebug_store_sectors;
1708
1709	ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1710		   (num - rest) * scsi_debug_sector_size);
1711	if (!ret && rest)
1712		ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
 
 
 
 
 
 
 
1713
1714	return ret;
1715}
1716
1717static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1718			    unsigned int sectors, u32 ei_lba)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1719{
1720	unsigned int i, resid;
1721	struct scatterlist *psgl;
1722	struct sd_dif_tuple *sdt;
1723	sector_t sector;
1724	sector_t tmp_sec = start_sec;
1725	void *paddr;
 
 
 
1726
1727	start_sec = do_div(tmp_sec, sdebug_store_sectors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1728
1729	sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
 
 
 
 
 
 
1730
1731	for (i = 0 ; i < sectors ; i++) {
1732		u16 csum;
1733
1734		if (sdt[i].app_tag == 0xffff)
1735			continue;
 
1736
1737		sector = start_sec + i;
 
 
 
1738
1739		switch (scsi_debug_guard) {
1740		case 1:
1741			csum = ip_compute_csum(fake_storep +
1742					       sector * scsi_debug_sector_size,
1743					       scsi_debug_sector_size);
1744			break;
1745		case 0:
1746			csum = crc_t10dif(fake_storep +
1747					  sector * scsi_debug_sector_size,
1748					  scsi_debug_sector_size);
1749			csum = cpu_to_be16(csum);
1750			break;
1751		default:
1752			BUG();
1753		}
1754
1755		if (sdt[i].guard_tag != csum) {
1756			printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1757			       " rcvd 0x%04x, data 0x%04x\n", __func__,
1758			       (unsigned long)sector,
1759			       be16_to_cpu(sdt[i].guard_tag),
1760			       be16_to_cpu(csum));
1761			dif_errors++;
1762			return 0x01;
1763		}
1764
1765		if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1766		    be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1767			printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1768			       __func__, (unsigned long)sector);
1769			dif_errors++;
1770			return 0x03;
1771		}
1772
1773		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1774		    be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1775			printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1776			       __func__, (unsigned long)sector);
1777			dif_errors++;
1778			return 0x03;
1779		}
1780
1781		ei_lba++;
 
1782	}
 
 
1783
1784	resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1785	sector = start_sec;
 
 
 
 
 
 
 
1786
1787	scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1788		int len = min(psgl->length, resid);
1789
1790		paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1791		memcpy(paddr, dif_storep + dif_offset(sector), len);
1792
1793		sector += len >> 3;
1794		if (sector >= sdebug_store_sectors) {
1795			/* Force wrap */
1796			tmp_sec = sector;
1797			sector = do_div(tmp_sec, sdebug_store_sectors);
1798		}
1799		resid -= len;
1800		kunmap_atomic(paddr);
1801	}
1802
 
1803	dix_reads++;
1804
1805	return 0;
1806}
1807
1808static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1809		     unsigned int num, struct sdebug_dev_info *devip,
1810		     u32 ei_lba)
1811{
 
 
 
 
1812	unsigned long iflags;
1813	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1814
1815	ret = check_device_access_params(devip, lba, num);
1816	if (ret)
1817		return ret;
 
 
 
 
 
 
 
 
1818
1819	if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1820	    (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1821	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1822		/* claim unrecoverable read error */
1823		mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1824		/* set info field and valid bit for fixed descriptor */
1825		if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1826			devip->sense_buff[0] |= 0x80;	/* Valid bit */
1827			ret = (lba < OPT_MEDIUM_ERR_ADDR)
1828			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1829			devip->sense_buff[3] = (ret >> 24) & 0xff;
1830			devip->sense_buff[4] = (ret >> 16) & 0xff;
1831			devip->sense_buff[5] = (ret >> 8) & 0xff;
1832			devip->sense_buff[6] = ret & 0xff;
1833		}
1834	        scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1835		return check_condition_result;
1836	}
1837
 
 
1838	/* DIX + T10 DIF */
1839	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1840		int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1841
1842		if (prot_ret) {
1843			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
 
1844			return illegal_condition_result;
1845		}
1846	}
1847
1848	read_lock_irqsave(&atomic_rw, iflags);
1849	ret = do_device_access(SCpnt, devip, lba, num, 0);
1850	read_unlock_irqrestore(&atomic_rw, iflags);
1851	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1852}
1853
1854void dump_sector(unsigned char *buf, int len)
1855{
1856	int i, j;
1857
1858	printk(KERN_ERR ">>> Sector Dump <<<\n");
1859
 
1860	for (i = 0 ; i < len ; i += 16) {
1861		printk(KERN_ERR "%04d: ", i);
1862
1863		for (j = 0 ; j < 16 ; j++) {
1864			unsigned char c = buf[i+j];
 
1865			if (c >= 0x20 && c < 0x7e)
1866				printk(" %c ", buf[i+j]);
 
1867			else
1868				printk("%02x ", buf[i+j]);
 
1869		}
1870
1871		printk("\n");
1872	}
1873}
1874
1875static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1876			     unsigned int sectors, u32 ei_lba)
1877{
1878	int i, j, ret;
1879	struct sd_dif_tuple *sdt;
1880	struct scatterlist *dsgl = scsi_sglist(SCpnt);
1881	struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1882	void *daddr, *paddr;
1883	sector_t tmp_sec = start_sec;
1884	sector_t sector;
1885	int ppage_offset;
1886	unsigned short csum;
1887
1888	sector = do_div(tmp_sec, sdebug_store_sectors);
1889
1890	BUG_ON(scsi_sg_count(SCpnt) == 0);
1891	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1892
1893	paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1894	ppage_offset = 0;
1895
1896	/* For each data page */
1897	scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1898		daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1899
1900		/* For each sector-sized chunk in data page */
1901		for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
 
 
 
 
1902
 
 
1903			/* If we're at the end of the current
1904			 * protection page advance to the next one
1905			 */
1906			if (ppage_offset >= psgl->length) {
1907				kunmap_atomic(paddr);
1908				psgl = sg_next(psgl);
1909				BUG_ON(psgl == NULL);
1910				paddr = kmap_atomic(sg_page(psgl))
1911					+ psgl->offset;
1912				ppage_offset = 0;
1913			}
1914
1915			sdt = paddr + ppage_offset;
1916
1917			switch (scsi_debug_guard) {
1918			case 1:
1919				csum = ip_compute_csum(daddr,
1920						       scsi_debug_sector_size);
1921				break;
1922			case 0:
1923				csum = cpu_to_be16(crc_t10dif(daddr,
1924						      scsi_debug_sector_size));
1925				break;
1926			default:
1927				BUG();
1928				ret = 0;
1929				goto out;
1930			}
1931
1932			if (sdt->guard_tag != csum) {
1933				printk(KERN_ERR
1934				       "%s: GUARD check failed on sector %lu " \
1935				       "rcvd 0x%04x, calculated 0x%04x\n",
1936				       __func__, (unsigned long)sector,
1937				       be16_to_cpu(sdt->guard_tag),
1938				       be16_to_cpu(csum));
1939				ret = 0x01;
1940				dump_sector(daddr, scsi_debug_sector_size);
1941				goto out;
1942			}
1943
1944			if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1945			    be32_to_cpu(sdt->ref_tag)
1946			    != (start_sec & 0xffffffff)) {
1947				printk(KERN_ERR
1948				       "%s: REF check failed on sector %lu\n",
1949				       __func__, (unsigned long)sector);
1950				ret = 0x03;
1951				dump_sector(daddr, scsi_debug_sector_size);
1952				goto out;
1953			}
1954
1955			if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1956			    be32_to_cpu(sdt->ref_tag) != ei_lba) {
1957				printk(KERN_ERR
1958				       "%s: REF check failed on sector %lu\n",
1959				       __func__, (unsigned long)sector);
1960				ret = 0x03;
1961				dump_sector(daddr, scsi_debug_sector_size);
1962				goto out;
1963			}
1964
1965			/* Would be great to copy this in bigger
1966			 * chunks.  However, for the sake of
1967			 * correctness we need to verify each sector
1968			 * before writing it to "stable" storage
1969			 */
1970			memcpy(dif_storep + dif_offset(sector), sdt, 8);
1971
1972			sector++;
1973
1974			if (sector == sdebug_store_sectors)
1975				sector = 0;	/* Force wrap */
1976
1977			start_sec++;
1978			ei_lba++;
1979			daddr += scsi_debug_sector_size;
1980			ppage_offset += sizeof(struct sd_dif_tuple);
1981		}
1982
1983		kunmap_atomic(daddr);
1984	}
 
1985
1986	kunmap_atomic(paddr);
1987
1988	dix_writes++;
1989
1990	return 0;
1991
1992out:
1993	dif_errors++;
1994	kunmap_atomic(daddr);
1995	kunmap_atomic(paddr);
1996	return ret;
1997}
1998
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1999static unsigned int map_state(sector_t lba, unsigned int *num)
2000{
2001	unsigned int granularity, alignment, mapped;
2002	sector_t block, next, end;
2003
2004	granularity = scsi_debug_unmap_granularity;
2005	alignment = granularity - scsi_debug_unmap_alignment;
2006	block = lba + alignment;
2007	do_div(block, granularity);
2008
2009	mapped = test_bit(block, map_storep);
 
2010
2011	if (mapped)
2012		next = find_next_zero_bit(map_storep, map_size, block);
2013	else
2014		next = find_next_bit(map_storep, map_size, block);
2015
2016	end = next * granularity - scsi_debug_unmap_alignment;
2017	*num = end - lba;
2018
2019	return mapped;
2020}
2021
2022static void map_region(sector_t lba, unsigned int len)
2023{
2024	unsigned int granularity, alignment;
2025	sector_t end = lba + len;
2026
2027	granularity = scsi_debug_unmap_granularity;
2028	alignment = granularity - scsi_debug_unmap_alignment;
2029
2030	while (lba < end) {
2031		sector_t block, rem;
2032
2033		block = lba + alignment;
2034		rem = do_div(block, granularity);
2035
2036		if (block < map_size)
2037			set_bit(block, map_storep);
2038
2039		lba += granularity - rem;
2040	}
2041}
2042
2043static void unmap_region(sector_t lba, unsigned int len)
2044{
2045	unsigned int granularity, alignment;
2046	sector_t end = lba + len;
2047
2048	granularity = scsi_debug_unmap_granularity;
2049	alignment = granularity - scsi_debug_unmap_alignment;
2050
2051	while (lba < end) {
2052		sector_t block, rem;
2053
2054		block = lba + alignment;
2055		rem = do_div(block, granularity);
2056
2057		if (rem == 0 && lba + granularity <= end && block < map_size) {
2058			clear_bit(block, map_storep);
2059			if (scsi_debug_lbprz)
2060				memset(fake_storep +
2061				       block * scsi_debug_sector_size, 0,
2062				       scsi_debug_sector_size);
 
 
 
 
 
 
 
2063		}
2064		lba += granularity - rem;
2065	}
2066}
2067
2068static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2069		      unsigned int num, struct sdebug_dev_info *devip,
2070		      u32 ei_lba)
2071{
 
 
 
 
2072	unsigned long iflags;
2073	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2074
2075	ret = check_device_access_params(devip, lba, num);
2076	if (ret)
2077		return ret;
2078
2079	/* DIX + T10 DIF */
2080	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2081		int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2082
2083		if (prot_ret) {
2084			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
 
2085			return illegal_condition_result;
2086		}
2087	}
2088
2089	write_lock_irqsave(&atomic_rw, iflags);
2090	ret = do_device_access(SCpnt, devip, lba, num, 1);
2091	if (scsi_debug_unmap_granularity)
2092		map_region(lba, num);
2093	write_unlock_irqrestore(&atomic_rw, iflags);
2094	if (-1 == ret)
2095		return (DID_ERROR << 16);
2096	else if ((ret < (num * scsi_debug_sector_size)) &&
2097		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2098		printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2099		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
 
2100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2101	return 0;
2102}
2103
2104static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2105		      unsigned int num, struct sdebug_dev_info *devip,
2106			   u32 ei_lba, unsigned int unmap)
2107{
2108	unsigned long iflags;
2109	unsigned long long i;
2110	int ret;
2111
2112	ret = check_device_access_params(devip, lba, num);
2113	if (ret)
2114		return ret;
2115
2116	if (num > scsi_debug_write_same_length) {
2117		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2118				0);
2119		return check_condition_result;
2120	}
2121
2122	write_lock_irqsave(&atomic_rw, iflags);
2123
2124	if (unmap && scsi_debug_unmap_granularity) {
2125		unmap_region(lba, num);
2126		goto out;
2127	}
2128
2129	/* Else fetch one logical block */
2130	ret = fetch_to_dev_buffer(scmd,
2131				  fake_storep + (lba * scsi_debug_sector_size),
2132				  scsi_debug_sector_size);
 
 
 
 
 
2133
2134	if (-1 == ret) {
2135		write_unlock_irqrestore(&atomic_rw, iflags);
2136		return (DID_ERROR << 16);
2137	} else if ((ret < (num * scsi_debug_sector_size)) &&
2138		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2139		printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2140		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
 
 
2141
2142	/* Copy first sector to remaining blocks */
2143	for (i = 1 ; i < num ; i++)
2144		memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2145		       fake_storep + (lba * scsi_debug_sector_size),
2146		       scsi_debug_sector_size);
2147
2148	if (scsi_debug_unmap_granularity)
2149		map_region(lba, num);
2150out:
2151	write_unlock_irqrestore(&atomic_rw, iflags);
2152
2153	return 0;
2154}
2155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2156struct unmap_block_desc {
2157	__be64	lba;
2158	__be32	blocks;
2159	__be32	__reserved;
2160};
2161
2162static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
 
2163{
2164	unsigned char *buf;
2165	struct unmap_block_desc *desc;
2166	unsigned int i, payload_len, descriptors;
2167	int ret;
 
2168
2169	ret = check_readiness(scmd, 1, devip);
2170	if (ret)
2171		return ret;
2172
2173	payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2174	BUG_ON(scsi_bufflen(scmd) != payload_len);
 
 
2175
2176	descriptors = (payload_len - 8) / 16;
 
 
 
 
2177
2178	buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2179	if (!buf)
 
 
2180		return check_condition_result;
 
2181
2182	scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2183
2184	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2185	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2186
2187	desc = (void *)&buf[8];
2188
 
 
2189	for (i = 0 ; i < descriptors ; i++) {
2190		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2191		unsigned int num = get_unaligned_be32(&desc[i].blocks);
2192
2193		ret = check_device_access_params(devip, lba, num);
2194		if (ret)
2195			goto out;
2196
2197		unmap_region(lba, num);
2198	}
2199
2200	ret = 0;
2201
2202out:
 
2203	kfree(buf);
2204
2205	return ret;
2206}
2207
2208#define SDEBUG_GET_LBA_STATUS_LEN 32
2209
2210static int resp_get_lba_status(struct scsi_cmnd * scmd,
2211			       struct sdebug_dev_info * devip)
2212{
2213	unsigned long long lba;
2214	unsigned int alloc_len, mapped, num;
2215	unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
 
2216	int ret;
2217
2218	ret = check_readiness(scmd, 1, devip);
2219	if (ret)
2220		return ret;
2221
2222	lba = get_unaligned_be64(&scmd->cmnd[2]);
2223	alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2224
2225	if (alloc_len < 24)
2226		return 0;
2227
2228	ret = check_device_access_params(devip, lba, 1);
2229	if (ret)
2230		return ret;
2231
2232	mapped = map_state(lba, &num);
 
 
 
 
 
 
 
 
 
 
2233
2234	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2235	put_unaligned_be32(20, &arr[0]);	/* Parameter Data Length */
2236	put_unaligned_be64(lba, &arr[8]);	/* LBA */
2237	put_unaligned_be32(num, &arr[16]);	/* Number of blocks */
2238	arr[20] = !mapped;			/* mapped = 0, unmapped = 1 */
2239
2240	return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2241}
2242
2243#define SDEBUG_RLUN_ARR_SZ 256
2244
2245static int resp_report_luns(struct scsi_cmnd * scp,
2246			    struct sdebug_dev_info * devip)
2247{
2248	unsigned int alloc_len;
2249	int lun_cnt, i, upper, num, n, wlun, lun;
2250	unsigned char *cmd = (unsigned char *)scp->cmnd;
 
2251	int select_report = (int)cmd[2];
2252	struct scsi_lun *one_lun;
2253	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2254	unsigned char * max_addr;
2255
 
2256	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2257	if ((alloc_len < 4) || (select_report > 2)) {
2258		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2259			       	0);
2260		return check_condition_result;
2261	}
2262	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
2263	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2264	lun_cnt = scsi_debug_max_luns;
2265	if (1 == select_report)
2266		lun_cnt = 0;
2267	else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2268		--lun_cnt;
2269	wlun = (select_report > 0) ? 1 : 0;
2270	num = lun_cnt + wlun;
2271	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2272	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2273	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2274			    sizeof(struct scsi_lun)), num);
2275	if (n < num) {
2276		wlun = 0;
2277		lun_cnt = n;
2278	}
2279	one_lun = (struct scsi_lun *) &arr[8];
2280	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2281	for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2282             ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2283	     i++, lun++) {
2284		upper = (lun >> 8) & 0x3f;
2285		if (upper)
2286			one_lun[i].scsi_lun[0] =
2287			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2288		one_lun[i].scsi_lun[1] = lun & 0xff;
2289	}
2290	if (wlun) {
2291		one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2292		one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2293		i++;
2294	}
2295	alloc_len = (unsigned char *)(one_lun + i) - arr;
2296	return fill_from_dev_buffer(scp, arr,
2297				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2298}
2299
2300static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2301			    unsigned int num, struct sdebug_dev_info *devip)
2302{
2303	int i, j, ret = -1;
2304	unsigned char *kaddr, *buf;
2305	unsigned int offset;
2306	struct scatterlist *sg;
2307	struct scsi_data_buffer *sdb = scsi_in(scp);
 
2308
2309	/* better not to use temporary buffer. */
2310	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2311	if (!buf)
2312		return ret;
 
 
 
2313
2314	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2315
2316	offset = 0;
2317	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2318		kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2319		if (!kaddr)
2320			goto out;
2321
2322		for (j = 0; j < sg->length; j++)
2323			*(kaddr + sg->offset + j) ^= *(buf + offset + j);
 
 
2324
2325		offset += sg->length;
2326		kunmap_atomic(kaddr);
2327	}
2328	ret = 0;
2329out:
2330	kfree(buf);
2331
2332	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2333}
2334
2335/* When timer goes off this function is called. */
2336static void timer_intr_handler(unsigned long indx)
2337{
2338	struct sdebug_queued_cmd * sqcp;
 
2339	unsigned long iflags;
 
 
 
2340
2341	if (indx >= scsi_debug_max_queue) {
2342		printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2343		       "large\n");
 
2344		return;
2345	}
2346	spin_lock_irqsave(&queued_arr_lock, iflags);
2347	sqcp = &queued_arr[(int)indx];
2348	if (! sqcp->in_use) {
2349		printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2350		       "interrupt\n");
2351		spin_unlock_irqrestore(&queued_arr_lock, iflags);
 
2352		return;
2353	}
2354	sqcp->in_use = 0;
2355	if (sqcp->done_funct) {
2356		sqcp->a_cmnd->result = sqcp->scsi_result;
2357		sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2358	}
2359	sqcp->done_funct = NULL;
2360	spin_unlock_irqrestore(&queued_arr_lock, iflags);
 
2361}
2362
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2363
2364static struct sdebug_dev_info *
2365sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2366{
2367	struct sdebug_dev_info *devip;
2368
2369	devip = kzalloc(sizeof(*devip), flags);
2370	if (devip) {
2371		devip->sdbg_host = sdbg_host;
2372		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2373	}
2374	return devip;
2375}
2376
2377static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2378{
2379	struct sdebug_host_info * sdbg_host;
2380	struct sdebug_dev_info * open_devip = NULL;
2381	struct sdebug_dev_info * devip =
2382			(struct sdebug_dev_info *)sdev->hostdata;
2383
2384	if (devip)
2385		return devip;
2386	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2387	if (!sdbg_host) {
2388                printk(KERN_ERR "Host info NULL\n");
2389		return NULL;
2390        }
2391	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2392		if ((devip->used) && (devip->channel == sdev->channel) &&
2393                    (devip->target == sdev->id) &&
2394                    (devip->lun == sdev->lun))
2395                        return devip;
2396		else {
2397			if ((!devip->used) && (!open_devip))
2398				open_devip = devip;
2399		}
2400	}
2401	if (!open_devip) { /* try and make a new one */
2402		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2403		if (!open_devip) {
2404			printk(KERN_ERR "%s: out of memory at line %d\n",
2405				__func__, __LINE__);
2406			return NULL;
2407		}
2408	}
2409
2410	open_devip->channel = sdev->channel;
2411	open_devip->target = sdev->id;
2412	open_devip->lun = sdev->lun;
2413	open_devip->sdbg_host = sdbg_host;
2414	open_devip->reset = 1;
2415	open_devip->used = 1;
2416	memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2417	if (scsi_debug_dsense)
2418		open_devip->sense_buff[0] = 0x72;
2419	else {
2420		open_devip->sense_buff[0] = 0x70;
2421		open_devip->sense_buff[7] = 0xa;
2422	}
2423	if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2424		open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2425
2426	return open_devip;
2427}
2428
2429static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2430{
2431	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2432		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2433		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2434	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2435	return 0;
2436}
2437
2438static int scsi_debug_slave_configure(struct scsi_device *sdp)
2439{
2440	struct sdebug_dev_info *devip;
2441
2442	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2443		printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2444		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2445	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2446		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2447	devip = devInfoReg(sdp);
2448	if (NULL == devip)
2449		return 1;	/* no resources, will be marked offline */
2450	sdp->hostdata = devip;
2451	if (sdp->host->cmd_per_lun)
2452		scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2453					sdp->host->cmd_per_lun);
2454	blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2455	if (scsi_debug_no_uld)
2456		sdp->no_uld_attach = 1;
2457	return 0;
2458}
2459
2460static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2461{
2462	struct sdebug_dev_info *devip =
2463		(struct sdebug_dev_info *)sdp->hostdata;
2464
2465	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2466		printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2467		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2468	if (devip) {
2469		/* make this slot available for re-use */
2470		devip->used = 0;
2471		sdp->hostdata = NULL;
2472	}
2473}
2474
2475/* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2476static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2477{
2478	unsigned long iflags;
2479	int k;
2480	struct sdebug_queued_cmd *sqcp;
 
2481
2482	spin_lock_irqsave(&queued_arr_lock, iflags);
2483	for (k = 0; k < scsi_debug_max_queue; ++k) {
2484		sqcp = &queued_arr[k];
2485		if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2486			del_timer_sync(&sqcp->cmnd_timer);
2487			sqcp->in_use = 0;
2488			sqcp->a_cmnd = NULL;
2489			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2490		}
2491	}
2492	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2493	return (k < scsi_debug_max_queue) ? 1 : 0;
2494}
2495
2496/* Deletes (stops) timers of all queued commands */
2497static void stop_all_queued(void)
2498{
2499	unsigned long iflags;
2500	int k;
2501	struct sdebug_queued_cmd *sqcp;
 
2502
2503	spin_lock_irqsave(&queued_arr_lock, iflags);
2504	for (k = 0; k < scsi_debug_max_queue; ++k) {
2505		sqcp = &queued_arr[k];
2506		if (sqcp->in_use && sqcp->a_cmnd) {
2507			del_timer_sync(&sqcp->cmnd_timer);
2508			sqcp->in_use = 0;
2509			sqcp->a_cmnd = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2510		}
2511	}
2512	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2513}
2514
2515static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
 
2516{
2517	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2518		printk(KERN_INFO "scsi_debug: abort\n");
2519	++num_aborts;
2520	stop_queued_cmnd(SCpnt);
2521	return SUCCESS;
 
 
 
 
 
 
 
 
 
 
2522}
2523
2524static int scsi_debug_biosparam(struct scsi_device *sdev,
2525		struct block_device * bdev, sector_t capacity, int *info)
2526{
2527	int res;
2528	unsigned char *buf;
2529
2530	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2531		printk(KERN_INFO "scsi_debug: biosparam\n");
2532	buf = scsi_bios_ptable(bdev);
2533	if (buf) {
2534		res = scsi_partsize(buf, capacity,
2535				    &info[2], &info[0], &info[1]);
2536		kfree(buf);
2537		if (! res)
2538			return res;
2539	}
2540	info[0] = sdebug_heads;
2541	info[1] = sdebug_sectors_per;
2542	info[2] = sdebug_cylinders_per;
2543	return 0;
2544}
2545
2546static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2547{
2548	struct sdebug_dev_info * devip;
2549
2550	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2551		printk(KERN_INFO "scsi_debug: device_reset\n");
2552	++num_dev_resets;
2553	if (SCpnt) {
2554		devip = devInfoReg(SCpnt->device);
 
 
 
 
2555		if (devip)
2556			devip->reset = 1;
2557	}
2558	return SUCCESS;
2559}
2560
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2561static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2562{
2563	struct sdebug_host_info *sdbg_host;
2564        struct sdebug_dev_info * dev_info;
2565        struct scsi_device * sdp;
2566        struct Scsi_Host * hp;
 
2567
2568	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2569		printk(KERN_INFO "scsi_debug: bus_reset\n");
2570	++num_bus_resets;
2571	if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
 
 
 
 
 
 
2572		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2573		if (sdbg_host) {
2574			list_for_each_entry(dev_info,
2575                                            &sdbg_host->dev_info_list,
2576                                            dev_list)
2577				dev_info->reset = 1;
 
 
2578		}
2579	}
 
 
 
 
2580	return SUCCESS;
2581}
2582
2583static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2584{
2585	struct sdebug_host_info * sdbg_host;
2586        struct sdebug_dev_info * dev_info;
 
2587
2588	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2589		printk(KERN_INFO "scsi_debug: host_reset\n");
2590	++num_host_resets;
 
 
2591        spin_lock(&sdebug_host_list_lock);
2592        list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2593                list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2594                                    dev_list)
2595                        dev_info->reset = 1;
 
 
2596        }
2597        spin_unlock(&sdebug_host_list_lock);
2598	stop_all_queued();
 
 
 
2599	return SUCCESS;
2600}
2601
2602/* Initializes timers in queued array */
2603static void __init init_all_queued(void)
2604{
2605	unsigned long iflags;
2606	int k;
2607	struct sdebug_queued_cmd * sqcp;
2608
2609	spin_lock_irqsave(&queued_arr_lock, iflags);
2610	for (k = 0; k < scsi_debug_max_queue; ++k) {
2611		sqcp = &queued_arr[k];
2612		init_timer(&sqcp->cmnd_timer);
2613		sqcp->in_use = 0;
2614		sqcp->a_cmnd = NULL;
2615	}
2616	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2617}
2618
2619static void __init sdebug_build_parts(unsigned char *ramp,
2620				      unsigned long store_size)
2621{
2622	struct partition * pp;
2623	int starts[SDEBUG_MAX_PARTS + 2];
2624	int sectors_per_part, num_sectors, k;
2625	int heads_by_sects, start_sec, end_sec;
2626
2627	/* assume partition table already zeroed */
2628	if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2629		return;
2630	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2631		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2632		printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2633				    "partitions to %d\n", SDEBUG_MAX_PARTS);
2634	}
2635	num_sectors = (int)sdebug_store_sectors;
2636	sectors_per_part = (num_sectors - sdebug_sectors_per)
2637			   / scsi_debug_num_parts;
2638	heads_by_sects = sdebug_heads * sdebug_sectors_per;
2639        starts[0] = sdebug_sectors_per;
2640	for (k = 1; k < scsi_debug_num_parts; ++k)
2641		starts[k] = ((k * sectors_per_part) / heads_by_sects)
2642			    * heads_by_sects;
2643	starts[scsi_debug_num_parts] = num_sectors;
2644	starts[scsi_debug_num_parts + 1] = 0;
2645
2646	ramp[510] = 0x55;	/* magic partition markings */
2647	ramp[511] = 0xAA;
2648	pp = (struct partition *)(ramp + 0x1be);
2649	for (k = 0; starts[k + 1]; ++k, ++pp) {
2650		start_sec = starts[k];
2651		end_sec = starts[k + 1] - 1;
2652		pp->boot_ind = 0;
2653
2654		pp->cyl = start_sec / heads_by_sects;
2655		pp->head = (start_sec - (pp->cyl * heads_by_sects))
2656			   / sdebug_sectors_per;
2657		pp->sector = (start_sec % sdebug_sectors_per) + 1;
2658
2659		pp->end_cyl = end_sec / heads_by_sects;
2660		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2661			       / sdebug_sectors_per;
2662		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2663
2664		pp->start_sect = start_sec;
2665		pp->nr_sects = end_sec - start_sec + 1;
2666		pp->sys_ind = 0x83;	/* plain Linux partition */
2667	}
2668}
2669
2670static int schedule_resp(struct scsi_cmnd * cmnd,
2671			 struct sdebug_dev_info * devip,
2672			 done_funct_t done, int scsi_result, int delta_jiff)
2673{
2674	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2675		if (scsi_result) {
2676			struct scsi_device * sdp = cmnd->device;
 
 
 
 
 
 
 
 
 
 
 
 
 
2677
2678			printk(KERN_INFO "scsi_debug:    <%u %u %u %u> "
2679			       "non-zero result=0x%x\n", sdp->host->host_no,
2680			       sdp->channel, sdp->id, sdp->lun, scsi_result);
2681		}
2682	}
2683	if (cmnd && devip) {
2684		/* simulate autosense by this driver */
2685		if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2686			memcpy(cmnd->sense_buffer, devip->sense_buff,
2687			       (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2688			       SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2689	}
2690	if (delta_jiff <= 0) {
2691		if (cmnd)
2692			cmnd->result = scsi_result;
2693		if (done)
2694			done(cmnd);
2695		return 0;
2696	} else {
2697		unsigned long iflags;
2698		int k;
2699		struct sdebug_queued_cmd * sqcp = NULL;
2700
2701		spin_lock_irqsave(&queued_arr_lock, iflags);
2702		for (k = 0; k < scsi_debug_max_queue; ++k) {
2703			sqcp = &queued_arr[k];
2704			if (! sqcp->in_use)
2705				break;
2706		}
2707		if (k >= scsi_debug_max_queue) {
2708			spin_unlock_irqrestore(&queued_arr_lock, iflags);
2709			printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2710			return 1;	/* report busy to mid level */
 
 
 
 
 
 
 
 
 
 
2711		}
2712		sqcp->in_use = 1;
2713		sqcp->a_cmnd = cmnd;
2714		sqcp->scsi_result = scsi_result;
2715		sqcp->done_funct = done;
2716		sqcp->cmnd_timer.function = timer_intr_handler;
2717		sqcp->cmnd_timer.data = k;
2718		sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2719		add_timer(&sqcp->cmnd_timer);
2720		spin_unlock_irqrestore(&queued_arr_lock, iflags);
2721		if (cmnd)
2722			cmnd->result = 0;
2723		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2724	}
 
 
 
 
 
 
 
 
 
 
 
 
2725}
 
2726/* Note: The following macros create attribute files in the
2727   /sys/module/scsi_debug/parameters directory. Unfortunately this
2728   driver is unaware of a change and cannot trigger auxiliary actions
2729   as it can when the corresponding attribute in the
2730   /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2731 */
2732module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2733module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
 
2734module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2735module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2736module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2737module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2738module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2739module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2740module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2741module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
 
2742module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2743module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2744module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2745module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2746module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2747module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2748module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
 
2749module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2750module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2751module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2752module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2753module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2754module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2755module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2756module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
 
2757module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2758module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
 
2759module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2760module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2761module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2762module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2763module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2764module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2765		   S_IRUGO | S_IWUSR);
2766module_param_named(write_same_length, scsi_debug_write_same_length, int,
2767		   S_IRUGO | S_IWUSR);
2768
2769MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2770MODULE_DESCRIPTION("SCSI debug adapter driver");
2771MODULE_LICENSE("GPL");
2772MODULE_VERSION(SCSI_DEBUG_VERSION);
2773
2774MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2775MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2776MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2777MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
 
2778MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2779MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2780MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2781MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2782MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2783MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
 
2784MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2785MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2786MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2787MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2788MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2789MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2790MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
 
2791MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2792MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2793MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2794MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2795MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2796MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2797MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2798MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2799MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
 
2800MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
 
2801MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2802MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2803MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2804MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2805MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2806MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2807MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2808
2809static char sdebug_info[256];
2810
2811static const char * scsi_debug_info(struct Scsi_Host * shp)
2812{
2813	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2814		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2815		scsi_debug_version_date, scsi_debug_dev_size_mb,
2816		scsi_debug_opts);
2817	return sdebug_info;
2818}
2819
2820/* scsi_debug_proc_info
2821 * Used if the driver currently has no own support for /proc/scsi
2822 */
2823static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2824				int length, int inout)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2825{
2826	int len, pos, begin;
2827	int orig_length;
2828
2829	orig_length = length;
 
 
 
 
 
 
2830
2831	if (inout == 1) {
2832		char arr[16];
2833		int minLen = length > 15 ? 15 : length;
2834
2835		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2836			return -EACCES;
2837		memcpy(arr, buffer, minLen);
2838		arr[minLen] = '\0';
2839		if (1 != sscanf(arr, "%d", &pos))
2840			return -EINVAL;
2841		scsi_debug_opts = pos;
2842		if (scsi_debug_every_nth != 0)
2843                        scsi_debug_cmnd_count = 0;
2844		return length;
2845	}
2846	begin = 0;
2847	pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2848	    "%s [%s]\n"
2849	    "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2850	    "every_nth=%d(curr:%d)\n"
2851	    "delay=%d, max_luns=%d, scsi_level=%d\n"
2852	    "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2853	    "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2854	    "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2855	    SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2856	    scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2857	    scsi_debug_cmnd_count, scsi_debug_delay,
2858	    scsi_debug_max_luns, scsi_debug_scsi_level,
2859	    scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2860	    sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2861	    num_host_resets, dix_reads, dix_writes, dif_errors);
2862	if (pos < offset) {
2863		len = 0;
2864		begin = pos;
2865	}
2866	*start = buffer + (offset - begin);	/* Start of wanted data */
2867	len -= (offset - begin);
2868	if (len > length)
2869		len = length;
2870	return len;
2871}
2872
2873static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2874{
2875        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2876}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2877
2878static ssize_t sdebug_delay_store(struct device_driver * ddp,
2879				  const char * buf, size_t count)
 
 
 
 
 
 
2880{
2881        int delay;
2882	char work[20];
2883
2884        if (1 == sscanf(buf, "%10s", work)) {
2885		if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2886			scsi_debug_delay = delay;
2887			return count;
 
 
 
 
 
 
 
 
 
 
 
2888		}
 
2889	}
2890	return -EINVAL;
2891}
2892DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2893	    sdebug_delay_store);
2894
2895static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2896{
2897        return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2898}
2899
2900static ssize_t sdebug_opts_store(struct device_driver * ddp,
2901				 const char * buf, size_t count)
2902{
2903        int opts;
2904	char work[20];
2905
2906        if (1 == sscanf(buf, "%10s", work)) {
2907		if (0 == strnicmp(work,"0x", 2)) {
2908			if (1 == sscanf(&work[2], "%x", &opts))
2909				goto opts_done;
2910		} else {
2911			if (1 == sscanf(work, "%d", &opts))
2912				goto opts_done;
2913		}
2914	}
2915	return -EINVAL;
2916opts_done:
2917	scsi_debug_opts = opts;
2918	scsi_debug_cmnd_count = 0;
 
 
 
 
 
 
 
 
 
 
 
2919	return count;
2920}
2921DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2922	    sdebug_opts_store);
2923
2924static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2925{
2926        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2927}
2928static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2929				  const char * buf, size_t count)
2930{
2931        int n;
2932
2933	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2934		scsi_debug_ptype = n;
2935		return count;
2936	}
2937	return -EINVAL;
2938}
2939DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2940
2941static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2942{
2943        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2944}
2945static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2946				  const char * buf, size_t count)
2947{
2948        int n;
2949
2950	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2951		scsi_debug_dsense = n;
2952		return count;
2953	}
2954	return -EINVAL;
2955}
2956DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2957	    sdebug_dsense_store);
2958
2959static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2960{
2961        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2962}
2963static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2964				    const char * buf, size_t count)
2965{
2966        int n;
2967
2968	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2969		scsi_debug_fake_rw = n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2970		return count;
2971	}
2972	return -EINVAL;
2973}
2974DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2975	    sdebug_fake_rw_store);
2976
2977static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2978{
2979        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2980}
2981static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2982				     const char * buf, size_t count)
2983{
2984        int n;
2985
2986	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2987		scsi_debug_no_lun_0 = n;
2988		return count;
2989	}
2990	return -EINVAL;
2991}
2992DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2993	    sdebug_no_lun_0_store);
2994
2995static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2996{
2997        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2998}
2999static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
3000				     const char * buf, size_t count)
3001{
3002        int n;
3003
3004	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3005		scsi_debug_num_tgts = n;
3006		sdebug_max_tgts_luns();
3007		return count;
3008	}
3009	return -EINVAL;
3010}
3011DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
3012	    sdebug_num_tgts_store);
3013
3014static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
3015{
3016        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3017}
3018DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
3019
3020static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
3021{
3022        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3023}
3024DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
3025
3026static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
3027{
3028        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3029}
3030static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
3031				      const char * buf, size_t count)
3032{
3033        int nth;
3034
3035	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3036		scsi_debug_every_nth = nth;
3037		scsi_debug_cmnd_count = 0;
3038		return count;
3039	}
3040	return -EINVAL;
3041}
3042DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3043	    sdebug_every_nth_store);
3044
3045static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3046{
3047        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3048}
3049static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3050				     const char * buf, size_t count)
3051{
3052        int n;
 
3053
3054	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
 
3055		scsi_debug_max_luns = n;
3056		sdebug_max_tgts_luns();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3057		return count;
3058	}
3059	return -EINVAL;
3060}
3061DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3062	    sdebug_max_luns_store);
3063
3064static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3065{
3066        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3067}
3068static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3069				      const char * buf, size_t count)
 
 
3070{
3071        int n;
 
3072
3073	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3074	    (n <= SCSI_DEBUG_CANQUEUE)) {
 
 
3075		scsi_debug_max_queue = n;
 
 
 
 
 
 
 
3076		return count;
3077	}
3078	return -EINVAL;
3079}
3080DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3081	    sdebug_max_queue_store);
3082
3083static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3084{
3085        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3086}
3087DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3088
3089static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3090{
3091        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3092}
3093DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3094
3095static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3096{
3097        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3098}
3099static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3100				       const char * buf, size_t count)
3101{
3102        int n;
 
3103
3104	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
 
3105		scsi_debug_virtual_gb = n;
3106
3107		sdebug_capacity = get_sdebug_capacity();
3108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3109		return count;
3110	}
3111	return -EINVAL;
3112}
3113DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3114	    sdebug_virtual_gb_store);
3115
3116static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3117{
3118        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3119}
3120
3121static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3122				     const char * buf, size_t count)
3123{
3124	int delta_hosts;
3125
3126	if (sscanf(buf, "%d", &delta_hosts) != 1)
3127		return -EINVAL;
3128	if (delta_hosts > 0) {
3129		do {
3130			sdebug_add_adapter();
3131		} while (--delta_hosts);
3132	} else if (delta_hosts < 0) {
3133		do {
3134			sdebug_remove_adapter();
3135		} while (++delta_hosts);
3136	}
3137	return count;
3138}
3139DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3140	    sdebug_add_host_store);
3141
3142static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3143					  char * buf)
3144{
3145	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3146}
3147static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3148					   const char * buf, size_t count)
3149{
3150	int n;
3151
3152	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3153		scsi_debug_vpd_use_hostno = n;
3154		return count;
3155	}
3156	return -EINVAL;
3157}
3158DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3159	    sdebug_vpd_use_hostno_store);
3160
3161static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3162{
3163	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3164}
3165DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3166
3167static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3168{
3169	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3170}
3171DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3172
3173static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3174{
3175	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3176}
3177DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3178
3179static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3180{
3181	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3182}
3183DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3184
3185static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3186{
3187	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3188}
3189DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3190
3191static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3192{
3193	ssize_t count;
3194
3195	if (!scsi_debug_lbp())
3196		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3197				 sdebug_store_sectors);
3198
3199	count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3200
3201	buf[count++] = '\n';
3202	buf[count++] = 0;
3203
3204	return count;
3205}
3206DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3207
 
 
 
 
 
 
 
 
3208
3209/* Note: The following function creates attribute files in the
 
 
 
 
 
 
 
 
 
3210   /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3211   files (over those found in the /sys/module/scsi_debug/parameters
3212   directory) is that auxiliary actions can be triggered when an attribute
3213   is changed. For example see: sdebug_add_host_store() above.
3214 */
3215static int do_create_driverfs_files(void)
3216{
3217	int ret;
3218
3219	ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3220	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3221	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3222	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3223	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3224	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3225	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3226	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3227	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3228	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3229	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3230	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3231	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3232	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3233	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3234	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3235	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3236	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3237	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3238	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3239	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3240	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3241	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3242	return ret;
3243}
3244
3245static void do_remove_driverfs_files(void)
3246{
3247	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3248	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3249	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3250	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3251	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3252	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3253	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3254	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3255	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3256	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3257	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3258	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3259	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3260	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3261	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3262	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3263	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3264	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3265	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3266	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3267	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3268	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3269	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3270}
3271
3272struct device *pseudo_primary;
3273
3274static int __init scsi_debug_init(void)
3275{
3276	unsigned long sz;
3277	int host_to_add;
3278	int k;
3279	int ret;
3280
 
 
 
 
 
 
 
 
 
 
3281	switch (scsi_debug_sector_size) {
3282	case  512:
3283	case 1024:
3284	case 2048:
3285	case 4096:
3286		break;
3287	default:
3288		printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3289		       scsi_debug_sector_size);
3290		return -EINVAL;
3291	}
3292
3293	switch (scsi_debug_dif) {
3294
3295	case SD_DIF_TYPE0_PROTECTION:
3296	case SD_DIF_TYPE1_PROTECTION:
3297	case SD_DIF_TYPE2_PROTECTION:
3298	case SD_DIF_TYPE3_PROTECTION:
3299		break;
3300
3301	default:
3302		printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3303		return -EINVAL;
3304	}
3305
3306	if (scsi_debug_guard > 1) {
3307		printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3308		return -EINVAL;
3309	}
3310
3311	if (scsi_debug_ato > 1) {
3312		printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3313		return -EINVAL;
3314	}
3315
3316	if (scsi_debug_physblk_exp > 15) {
3317		printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3318		       scsi_debug_physblk_exp);
3319		return -EINVAL;
3320	}
3321
3322	if (scsi_debug_lowest_aligned > 0x3fff) {
3323		printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3324		       scsi_debug_lowest_aligned);
3325		return -EINVAL;
3326	}
3327
3328	if (scsi_debug_dev_size_mb < 1)
3329		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
3330	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3331	sdebug_store_sectors = sz / scsi_debug_sector_size;
3332	sdebug_capacity = get_sdebug_capacity();
3333
3334	/* play around with geometry, don't waste too much on track 0 */
3335	sdebug_heads = 8;
3336	sdebug_sectors_per = 32;
3337	if (scsi_debug_dev_size_mb >= 16)
 
 
3338		sdebug_heads = 32;
3339	else if (scsi_debug_dev_size_mb >= 256)
3340		sdebug_heads = 64;
3341	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3342			       (sdebug_sectors_per * sdebug_heads);
3343	if (sdebug_cylinders_per >= 1024) {
3344		/* other LLDs do this; implies >= 1GB ram disk ... */
3345		sdebug_heads = 255;
3346		sdebug_sectors_per = 63;
3347		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3348			       (sdebug_sectors_per * sdebug_heads);
3349	}
3350
3351	fake_storep = vmalloc(sz);
3352	if (NULL == fake_storep) {
3353		printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3354		return -ENOMEM;
3355	}
3356	memset(fake_storep, 0, sz);
3357	if (scsi_debug_num_parts > 0)
3358		sdebug_build_parts(fake_storep, sz);
 
 
3359
3360	if (scsi_debug_dif) {
3361		int dif_size;
3362
3363		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3364		dif_storep = vmalloc(dif_size);
3365
3366		printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3367		       dif_size, dif_storep);
3368
3369		if (dif_storep == NULL) {
3370			printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3371			ret = -ENOMEM;
3372			goto free_vm;
3373		}
3374
3375		memset(dif_storep, 0xff, dif_size);
3376	}
3377
3378	/* Logical Block Provisioning */
3379	if (scsi_debug_lbp()) {
3380		unsigned int map_bytes;
3381
3382		scsi_debug_unmap_max_blocks =
3383			clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3384
3385		scsi_debug_unmap_max_desc =
3386			clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3387
3388		scsi_debug_unmap_granularity =
3389			clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3390
3391		if (scsi_debug_unmap_alignment &&
3392		    scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3393			printk(KERN_ERR
3394			       "%s: ERR: unmap_granularity < unmap_alignment\n",
3395			       __func__);
3396			return -EINVAL;
3397		}
3398
3399		map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3400		map_bytes = map_size >> 3;
3401		map_storep = vmalloc(map_bytes);
3402
3403		printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3404		       map_size);
3405
3406		if (map_storep == NULL) {
3407			printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3408			ret = -ENOMEM;
3409			goto free_vm;
3410		}
3411
3412		memset(map_storep, 0x0, map_bytes);
3413
3414		/* Map first 1KB for partition table */
3415		if (scsi_debug_num_parts)
3416			map_region(0, 2);
3417	}
3418
3419	pseudo_primary = root_device_register("pseudo_0");
3420	if (IS_ERR(pseudo_primary)) {
3421		printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3422		ret = PTR_ERR(pseudo_primary);
3423		goto free_vm;
3424	}
3425	ret = bus_register(&pseudo_lld_bus);
3426	if (ret < 0) {
3427		printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3428			ret);
3429		goto dev_unreg;
3430	}
3431	ret = driver_register(&sdebug_driverfs_driver);
3432	if (ret < 0) {
3433		printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3434			ret);
3435		goto bus_unreg;
3436	}
3437	ret = do_create_driverfs_files();
3438	if (ret < 0) {
3439		printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3440			ret);
3441		goto del_files;
3442	}
3443
3444	init_all_queued();
3445
3446	host_to_add = scsi_debug_add_host;
3447        scsi_debug_add_host = 0;
3448
3449        for (k = 0; k < host_to_add; k++) {
3450                if (sdebug_add_adapter()) {
3451                        printk(KERN_ERR "scsi_debug_init: "
3452                               "sdebug_add_adapter failed k=%d\n", k);
3453                        break;
3454                }
3455        }
3456
3457	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3458		printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3459		       scsi_debug_add_host);
3460	}
3461	return 0;
3462
3463del_files:
3464	do_remove_driverfs_files();
3465	driver_unregister(&sdebug_driverfs_driver);
3466bus_unreg:
3467	bus_unregister(&pseudo_lld_bus);
3468dev_unreg:
3469	root_device_unregister(pseudo_primary);
3470free_vm:
3471	if (map_storep)
3472		vfree(map_storep);
3473	if (dif_storep)
3474		vfree(dif_storep);
3475	vfree(fake_storep);
3476
3477	return ret;
3478}
3479
3480static void __exit scsi_debug_exit(void)
3481{
3482	int k = scsi_debug_add_host;
3483
3484	stop_all_queued();
 
3485	for (; k; k--)
3486		sdebug_remove_adapter();
3487	do_remove_driverfs_files();
3488	driver_unregister(&sdebug_driverfs_driver);
3489	bus_unregister(&pseudo_lld_bus);
3490	root_device_unregister(pseudo_primary);
3491
3492	if (dif_storep)
3493		vfree(dif_storep);
3494
3495	vfree(fake_storep);
3496}
3497
3498device_initcall(scsi_debug_init);
3499module_exit(scsi_debug_exit);
3500
3501static void sdebug_release_adapter(struct device * dev)
3502{
3503        struct sdebug_host_info *sdbg_host;
3504
3505	sdbg_host = to_sdebug_host(dev);
3506        kfree(sdbg_host);
3507}
3508
3509static int sdebug_add_adapter(void)
3510{
3511	int k, devs_per_host;
3512        int error = 0;
3513        struct sdebug_host_info *sdbg_host;
3514	struct sdebug_dev_info *sdbg_devinfo, *tmp;
3515
3516        sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3517        if (NULL == sdbg_host) {
3518                printk(KERN_ERR "%s: out of memory at line %d\n",
3519                       __func__, __LINE__);
3520                return -ENOMEM;
3521        }
3522
3523        INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3524
3525	devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3526        for (k = 0; k < devs_per_host; k++) {
3527		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3528		if (!sdbg_devinfo) {
3529                        printk(KERN_ERR "%s: out of memory at line %d\n",
3530                               __func__, __LINE__);
3531                        error = -ENOMEM;
3532			goto clean;
3533                }
3534        }
3535
3536        spin_lock(&sdebug_host_list_lock);
3537        list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3538        spin_unlock(&sdebug_host_list_lock);
3539
3540        sdbg_host->dev.bus = &pseudo_lld_bus;
3541        sdbg_host->dev.parent = pseudo_primary;
3542        sdbg_host->dev.release = &sdebug_release_adapter;
3543        dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3544
3545        error = device_register(&sdbg_host->dev);
3546
3547        if (error)
3548		goto clean;
3549
3550	++scsi_debug_add_host;
3551        return error;
3552
3553clean:
3554	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3555				 dev_list) {
3556		list_del(&sdbg_devinfo->dev_list);
3557		kfree(sdbg_devinfo);
3558	}
3559
3560	kfree(sdbg_host);
3561        return error;
3562}
3563
3564static void sdebug_remove_adapter(void)
3565{
3566        struct sdebug_host_info * sdbg_host = NULL;
3567
3568        spin_lock(&sdebug_host_list_lock);
3569        if (!list_empty(&sdebug_host_list)) {
3570                sdbg_host = list_entry(sdebug_host_list.prev,
3571                                       struct sdebug_host_info, host_list);
3572		list_del(&sdbg_host->host_list);
3573	}
3574        spin_unlock(&sdebug_host_list_lock);
3575
3576	if (!sdbg_host)
3577		return;
3578
3579        device_unregister(&sdbg_host->dev);
3580        --scsi_debug_add_host;
3581}
3582
3583static
3584int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3585{
3586	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3587	int len, k;
3588	unsigned int num;
3589	unsigned long long lba;
3590	u32 ei_lba;
3591	int errsts = 0;
3592	int target = SCpnt->device->id;
3593	struct sdebug_dev_info *devip = NULL;
3594	int inj_recovered = 0;
3595	int inj_transport = 0;
3596	int inj_dif = 0;
3597	int inj_dix = 0;
3598	int delay_override = 0;
3599	int unmap = 0;
3600
3601	scsi_set_resid(SCpnt, 0);
3602	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3603		printk(KERN_INFO "scsi_debug: cmd ");
3604		for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3605			printk("%02x ", (int)cmd[k]);
3606		printk("\n");
3607	}
3608
3609	if (target == SCpnt->device->host->hostt->this_id) {
3610		printk(KERN_INFO "scsi_debug: initiator's id used as "
3611		       "target!\n");
3612		return schedule_resp(SCpnt, NULL, done,
3613				     DID_NO_CONNECT << 16, 0);
3614	}
3615
3616	if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3617	    (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3618		return schedule_resp(SCpnt, NULL, done,
3619				     DID_NO_CONNECT << 16, 0);
3620	devip = devInfoReg(SCpnt->device);
3621	if (NULL == devip)
3622		return schedule_resp(SCpnt, NULL, done,
3623				     DID_NO_CONNECT << 16, 0);
3624
3625	if ((scsi_debug_every_nth != 0) &&
3626	    (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3627		scsi_debug_cmnd_count = 0;
3628		if (scsi_debug_every_nth < -1)
3629			scsi_debug_every_nth = -1;
3630		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3631			return 0; /* ignore command causing timeout */
3632		else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3633			 scsi_medium_access_command(SCpnt))
3634			return 0; /* time out reads and writes */
3635		else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3636			inj_recovered = 1; /* to reads and writes below */
3637		else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3638			inj_transport = 1; /* to reads and writes below */
3639		else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3640			inj_dif = 1; /* to reads and writes below */
3641		else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3642			inj_dix = 1; /* to reads and writes below */
3643	}
3644
3645	if (devip->wlun) {
3646		switch (*cmd) {
3647		case INQUIRY:
3648		case REQUEST_SENSE:
3649		case TEST_UNIT_READY:
3650		case REPORT_LUNS:
3651			break;  /* only allowable wlun commands */
3652		default:
3653			if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3654				printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3655				       "not supported for wlun\n", *cmd);
3656			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3657					INVALID_OPCODE, 0);
3658			errsts = check_condition_result;
3659			return schedule_resp(SCpnt, devip, done, errsts,
3660					     0);
3661		}
3662	}
 
 
3663
3664	switch (*cmd) {
3665	case INQUIRY:     /* mandatory, ignore unit attention */
3666		delay_override = 1;
3667		errsts = resp_inquiry(SCpnt, target, devip);
3668		break;
3669	case REQUEST_SENSE:	/* mandatory, ignore unit attention */
3670		delay_override = 1;
3671		errsts = resp_requests(SCpnt, devip);
3672		break;
3673	case REZERO_UNIT:	/* actually this is REWIND for SSC */
3674	case START_STOP:
3675		errsts = resp_start_stop(SCpnt, devip);
3676		break;
3677	case ALLOW_MEDIUM_REMOVAL:
3678		errsts = check_readiness(SCpnt, 1, devip);
3679		if (errsts)
3680			break;
3681		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3682			printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3683			       cmd[4] ? "inhibited" : "enabled");
3684		break;
3685	case SEND_DIAGNOSTIC:     /* mandatory */
3686		errsts = check_readiness(SCpnt, 1, devip);
3687		break;
3688	case TEST_UNIT_READY:     /* mandatory */
3689		delay_override = 1;
3690		errsts = check_readiness(SCpnt, 0, devip);
3691		break;
3692	case RESERVE:
3693		errsts = check_readiness(SCpnt, 1, devip);
3694		break;
3695	case RESERVE_10:
3696		errsts = check_readiness(SCpnt, 1, devip);
3697		break;
3698	case RELEASE:
3699		errsts = check_readiness(SCpnt, 1, devip);
3700		break;
3701	case RELEASE_10:
3702		errsts = check_readiness(SCpnt, 1, devip);
3703		break;
3704	case READ_CAPACITY:
3705		errsts = resp_readcap(SCpnt, devip);
3706		break;
3707	case SERVICE_ACTION_IN:
3708		if (cmd[1] == SAI_READ_CAPACITY_16)
3709			errsts = resp_readcap16(SCpnt, devip);
3710		else if (cmd[1] == SAI_GET_LBA_STATUS) {
3711
3712			if (scsi_debug_lbp() == 0) {
3713				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3714						INVALID_COMMAND_OPCODE, 0);
3715				errsts = check_condition_result;
3716			} else
3717				errsts = resp_get_lba_status(SCpnt, devip);
3718		} else {
3719			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3720					INVALID_OPCODE, 0);
3721			errsts = check_condition_result;
 
 
 
 
 
 
 
3722		}
3723		break;
3724	case MAINTENANCE_IN:
3725		if (MI_REPORT_TARGET_PGS != cmd[1]) {
3726			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3727					INVALID_OPCODE, 0);
3728			errsts = check_condition_result;
3729			break;
3730		}
3731		errsts = resp_report_tgtpgs(SCpnt, devip);
3732		break;
3733	case READ_16:
3734	case READ_12:
3735	case READ_10:
3736		/* READ{10,12,16} and DIF Type 2 are natural enemies */
3737		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3738		    cmd[1] & 0xe0) {
3739			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3740					INVALID_COMMAND_OPCODE, 0);
3741			errsts = check_condition_result;
3742			break;
3743		}
3744
3745		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3746		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3747		    (cmd[1] & 0xe0) == 0)
3748			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3749
3750		/* fall through */
3751	case READ_6:
3752read:
3753		errsts = check_readiness(SCpnt, 0, devip);
3754		if (errsts)
3755			break;
3756		if (scsi_debug_fake_rw)
3757			break;
3758		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3759		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3760		if (inj_recovered && (0 == errsts)) {
3761			mk_sense_buffer(devip, RECOVERED_ERROR,
3762					THRESHOLD_EXCEEDED, 0);
3763			errsts = check_condition_result;
3764		} else if (inj_transport && (0 == errsts)) {
3765			mk_sense_buffer(devip, ABORTED_COMMAND,
3766					TRANSPORT_PROBLEM, ACK_NAK_TO);
3767			errsts = check_condition_result;
3768		} else if (inj_dif && (0 == errsts)) {
3769			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3770			errsts = illegal_condition_result;
3771		} else if (inj_dix && (0 == errsts)) {
3772			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3773			errsts = illegal_condition_result;
3774		}
3775		break;
3776	case REPORT_LUNS:	/* mandatory, ignore unit attention */
3777		delay_override = 1;
3778		errsts = resp_report_luns(SCpnt, devip);
3779		break;
3780	case VERIFY:		/* 10 byte SBC-2 command */
3781		errsts = check_readiness(SCpnt, 0, devip);
3782		break;
3783	case WRITE_16:
3784	case WRITE_12:
3785	case WRITE_10:
3786		/* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3787		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3788		    cmd[1] & 0xe0) {
3789			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3790					INVALID_COMMAND_OPCODE, 0);
3791			errsts = check_condition_result;
3792			break;
3793		}
3794
3795		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3796		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3797		    (cmd[1] & 0xe0) == 0)
3798			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3799
3800		/* fall through */
3801	case WRITE_6:
3802write:
3803		errsts = check_readiness(SCpnt, 0, devip);
3804		if (errsts)
3805			break;
3806		if (scsi_debug_fake_rw)
3807			break;
3808		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3809		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3810		if (inj_recovered && (0 == errsts)) {
3811			mk_sense_buffer(devip, RECOVERED_ERROR,
3812					THRESHOLD_EXCEEDED, 0);
3813			errsts = check_condition_result;
3814		} else if (inj_dif && (0 == errsts)) {
3815			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3816			errsts = illegal_condition_result;
3817		} else if (inj_dix && (0 == errsts)) {
3818			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3819			errsts = illegal_condition_result;
3820		}
3821		break;
3822	case WRITE_SAME_16:
3823	case WRITE_SAME:
3824		if (cmd[1] & 0x8) {
3825			if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3826			    (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3827				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3828						INVALID_FIELD_IN_CDB, 0);
3829				errsts = check_condition_result;
3830			} else
3831				unmap = 1;
3832		}
3833		if (errsts)
3834			break;
3835		errsts = check_readiness(SCpnt, 0, devip);
3836		if (errsts)
3837			break;
3838		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3839		errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3840		break;
3841	case UNMAP:
3842		errsts = check_readiness(SCpnt, 0, devip);
3843		if (errsts)
3844			break;
3845
3846		if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3847			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3848					INVALID_COMMAND_OPCODE, 0);
3849			errsts = check_condition_result;
3850		} else
3851			errsts = resp_unmap(SCpnt, devip);
3852		break;
3853	case MODE_SENSE:
3854	case MODE_SENSE_10:
3855		errsts = resp_mode_sense(SCpnt, target, devip);
3856		break;
3857	case MODE_SELECT:
3858		errsts = resp_mode_select(SCpnt, 1, devip);
3859		break;
3860	case MODE_SELECT_10:
3861		errsts = resp_mode_select(SCpnt, 0, devip);
3862		break;
3863	case LOG_SENSE:
3864		errsts = resp_log_sense(SCpnt, devip);
3865		break;
3866	case SYNCHRONIZE_CACHE:
3867		delay_override = 1;
3868		errsts = check_readiness(SCpnt, 0, devip);
3869		break;
3870	case WRITE_BUFFER:
3871		errsts = check_readiness(SCpnt, 1, devip);
3872		break;
3873	case XDWRITEREAD_10:
3874		if (!scsi_bidi_cmnd(SCpnt)) {
3875			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3876					INVALID_FIELD_IN_CDB, 0);
3877			errsts = check_condition_result;
3878			break;
3879		}
3880
3881		errsts = check_readiness(SCpnt, 0, devip);
3882		if (errsts)
3883			break;
3884		if (scsi_debug_fake_rw)
3885			break;
3886		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3887		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3888		if (errsts)
3889			break;
3890		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3891		if (errsts)
3892			break;
3893		errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3894		break;
3895	case VARIABLE_LENGTH_CMD:
3896		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3897
3898			if ((cmd[10] & 0xe0) == 0)
3899				printk(KERN_ERR
3900				       "Unprotected RD/WR to DIF device\n");
3901
3902			if (cmd[9] == READ_32) {
3903				BUG_ON(SCpnt->cmd_len < 32);
3904				goto read;
3905			}
3906
3907			if (cmd[9] == WRITE_32) {
3908				BUG_ON(SCpnt->cmd_len < 32);
3909				goto write;
3910			}
3911		}
3912
3913		mk_sense_buffer(devip, ILLEGAL_REQUEST,
3914				INVALID_FIELD_IN_CDB, 0);
3915		errsts = check_condition_result;
3916		break;
3917
3918	default:
3919		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3920			printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3921			       "supported\n", *cmd);
3922		errsts = check_readiness(SCpnt, 1, devip);
3923		if (errsts)
3924			break;	/* Unit attention takes precedence */
3925		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
 
 
 
 
 
 
3926		errsts = check_condition_result;
3927		break;
3928	}
3929	return schedule_resp(SCpnt, devip, done, errsts,
3930			     (delay_override ? 0 : scsi_debug_delay));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3931}
3932
3933static DEF_SCSI_QCMD(scsi_debug_queuecommand)
 
 
 
 
 
 
 
 
 
 
 
 
 
3934
3935static struct scsi_host_template sdebug_driver_template = {
3936	.proc_info =		scsi_debug_proc_info,
 
3937	.proc_name =		sdebug_proc_name,
3938	.name =			"SCSI DEBUG",
3939	.info =			scsi_debug_info,
3940	.slave_alloc =		scsi_debug_slave_alloc,
3941	.slave_configure =	scsi_debug_slave_configure,
3942	.slave_destroy =	scsi_debug_slave_destroy,
3943	.ioctl =		scsi_debug_ioctl,
3944	.queuecommand =		scsi_debug_queuecommand,
 
3945	.eh_abort_handler =	scsi_debug_abort,
 
 
3946	.eh_bus_reset_handler = scsi_debug_bus_reset,
3947	.eh_device_reset_handler = scsi_debug_device_reset,
3948	.eh_host_reset_handler = scsi_debug_host_reset,
3949	.bios_param =		scsi_debug_biosparam,
3950	.can_queue =		SCSI_DEBUG_CANQUEUE,
3951	.this_id =		7,
3952	.sg_tablesize =		256,
3953	.cmd_per_lun =		16,
3954	.max_sectors =		0xffff,
3955	.use_clustering = 	DISABLE_CLUSTERING,
3956	.module =		THIS_MODULE,
 
 
3957};
3958
3959static int sdebug_driver_probe(struct device * dev)
3960{
3961        int error = 0;
3962        struct sdebug_host_info *sdbg_host;
3963        struct Scsi_Host *hpnt;
 
3964	int host_prot;
3965
3966	sdbg_host = to_sdebug_host(dev);
3967
3968	sdebug_driver_template.can_queue = scsi_debug_max_queue;
 
 
3969	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3970	if (NULL == hpnt) {
3971		printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3972		error = -ENODEV;
3973		return error;
3974	}
3975
3976        sdbg_host->shost = hpnt;
3977	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3978	if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3979		hpnt->max_id = scsi_debug_num_tgts + 1;
3980	else
3981		hpnt->max_id = scsi_debug_num_tgts;
3982	hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;	/* = scsi_debug_max_luns; */
 
3983
3984	host_prot = 0;
3985
3986	switch (scsi_debug_dif) {
3987
3988	case SD_DIF_TYPE1_PROTECTION:
3989		host_prot = SHOST_DIF_TYPE1_PROTECTION;
3990		if (scsi_debug_dix)
3991			host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3992		break;
3993
3994	case SD_DIF_TYPE2_PROTECTION:
3995		host_prot = SHOST_DIF_TYPE2_PROTECTION;
3996		if (scsi_debug_dix)
3997			host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3998		break;
3999
4000	case SD_DIF_TYPE3_PROTECTION:
4001		host_prot = SHOST_DIF_TYPE3_PROTECTION;
4002		if (scsi_debug_dix)
4003			host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4004		break;
4005
4006	default:
4007		if (scsi_debug_dix)
4008			host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4009		break;
4010	}
4011
4012	scsi_host_set_prot(hpnt, host_prot);
4013
4014	printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4015	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4016	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4017	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4018	       (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4019	       (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4020	       (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4021	       (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4022
4023	if (scsi_debug_guard == 1)
4024		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4025	else
4026		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4027
 
 
 
 
 
 
 
 
 
 
 
 
4028        error = scsi_add_host(hpnt, &sdbg_host->dev);
4029        if (error) {
4030                printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4031                error = -ENODEV;
4032		scsi_host_put(hpnt);
4033        } else
4034		scsi_scan_host(hpnt);
4035
4036
4037        return error;
4038}
4039
4040static int sdebug_driver_remove(struct device * dev)
4041{
4042        struct sdebug_host_info *sdbg_host;
4043	struct sdebug_dev_info *sdbg_devinfo, *tmp;
4044
4045	sdbg_host = to_sdebug_host(dev);
4046
4047	if (!sdbg_host) {
4048		printk(KERN_ERR "%s: Unable to locate host info\n",
4049		       __func__);
4050		return -ENODEV;
4051	}
4052
4053        scsi_remove_host(sdbg_host->shost);
4054
4055	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4056				 dev_list) {
4057                list_del(&sdbg_devinfo->dev_list);
4058                kfree(sdbg_devinfo);
4059        }
4060
4061        scsi_host_put(sdbg_host->shost);
4062        return 0;
4063}
4064
4065static int pseudo_lld_bus_match(struct device *dev,
4066				struct device_driver *dev_driver)
4067{
4068	return 1;
4069}
4070
4071static struct bus_type pseudo_lld_bus = {
4072	.name = "pseudo",
4073	.match = pseudo_lld_bus_match,
4074	.probe = sdebug_driver_probe,
4075	.remove = sdebug_driver_remove,
 
4076};