Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
   4 *  Copyright (C) 1992  Eric Youngdale
   5 *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
   6 *  to make sure that we are not getting blocks mixed up, and PANIC if
   7 *  anything out of the ordinary is seen.
   8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
   9 *
  10 * Copyright (C) 2001 - 2021 Douglas Gilbert
 
 
 
 
 
 
 
  11 *
  12 *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
  13 */
  14
  15
  16#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  17
  18#include <linux/module.h>
  19#include <linux/align.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/jiffies.h>
  23#include <linux/slab.h>
  24#include <linux/types.h>
  25#include <linux/string.h>
 
  26#include <linux/fs.h>
  27#include <linux/init.h>
  28#include <linux/proc_fs.h>
  29#include <linux/vmalloc.h>
  30#include <linux/moduleparam.h>
  31#include <linux/scatterlist.h>
  32#include <linux/blkdev.h>
  33#include <linux/crc-t10dif.h>
  34#include <linux/spinlock.h>
  35#include <linux/interrupt.h>
  36#include <linux/atomic.h>
  37#include <linux/hrtimer.h>
  38#include <linux/uuid.h>
  39#include <linux/t10-pi.h>
  40#include <linux/msdos_partition.h>
  41#include <linux/random.h>
  42#include <linux/xarray.h>
  43#include <linux/prefetch.h>
  44#include <linux/debugfs.h>
  45#include <linux/async.h>
  46#include <linux/cleanup.h>
  47
  48#include <net/checksum.h>
  49
  50#include <asm/unaligned.h>
  51
  52#include <scsi/scsi.h>
  53#include <scsi/scsi_cmnd.h>
  54#include <scsi/scsi_device.h>
  55#include <scsi/scsi_host.h>
  56#include <scsi/scsicam.h>
  57#include <scsi/scsi_eh.h>
  58#include <scsi/scsi_tcq.h>
  59#include <scsi/scsi_dbg.h>
  60
  61#include "sd.h"
  62#include "scsi_logging.h"
  63
  64/* make sure inq_product_rev string corresponds to this version */
  65#define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
  66static const char *sdebug_version_date = "20210520";
  67
  68#define MY_NAME "scsi_debug"
  69
  70/* Additional Sense Code (ASC) */
  71#define NO_ADDITIONAL_SENSE 0x0
  72#define LOGICAL_UNIT_NOT_READY 0x4
  73#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
  74#define UNRECOVERED_READ_ERR 0x11
  75#define PARAMETER_LIST_LENGTH_ERR 0x1a
  76#define INVALID_OPCODE 0x20
  77#define LBA_OUT_OF_RANGE 0x21
  78#define INVALID_FIELD_IN_CDB 0x24
  79#define INVALID_FIELD_IN_PARAM_LIST 0x26
  80#define WRITE_PROTECTED 0x27
  81#define UA_RESET_ASC 0x29
  82#define UA_CHANGED_ASC 0x2a
  83#define TARGET_CHANGED_ASC 0x3f
  84#define LUNS_CHANGED_ASCQ 0x0e
  85#define INSUFF_RES_ASC 0x55
  86#define INSUFF_RES_ASCQ 0x3
  87#define POWER_ON_RESET_ASCQ 0x0
  88#define POWER_ON_OCCURRED_ASCQ 0x1
  89#define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
  90#define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
  91#define CAPACITY_CHANGED_ASCQ 0x9
  92#define SAVING_PARAMS_UNSUP 0x39
  93#define TRANSPORT_PROBLEM 0x4b
  94#define THRESHOLD_EXCEEDED 0x5d
  95#define LOW_POWER_COND_ON 0x5e
  96#define MISCOMPARE_VERIFY_ASC 0x1d
  97#define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
  98#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
  99#define WRITE_ERROR_ASC 0xc
 100#define UNALIGNED_WRITE_ASCQ 0x4
 101#define WRITE_BOUNDARY_ASCQ 0x5
 102#define READ_INVDATA_ASCQ 0x6
 103#define READ_BOUNDARY_ASCQ 0x7
 104#define ATTEMPT_ACCESS_GAP 0x9
 105#define INSUFF_ZONE_ASCQ 0xe
 106
 107/* Additional Sense Code Qualifier (ASCQ) */
 108#define ACK_NAK_TO 0x3
 109
 110/* Default values for driver parameters */
 111#define DEF_NUM_HOST   1
 112#define DEF_NUM_TGTS   1
 113#define DEF_MAX_LUNS   1
 114/* With these defaults, this driver will make 1 host with 1 target
 115 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
 116 */
 117#define DEF_ATO 1
 118#define DEF_CDB_LEN 10
 119#define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
 120#define DEF_DEV_SIZE_PRE_INIT   0
 121#define DEF_DEV_SIZE_MB   8
 122#define DEF_ZBC_DEV_SIZE_MB   128
 123#define DEF_DIF 0
 124#define DEF_DIX 0
 125#define DEF_PER_HOST_STORE false
 126#define DEF_D_SENSE   0
 127#define DEF_EVERY_NTH   0
 128#define DEF_FAKE_RW	0
 129#define DEF_GUARD 0
 130#define DEF_HOST_LOCK 0
 131#define DEF_LBPU 0
 132#define DEF_LBPWS 0
 133#define DEF_LBPWS10 0
 134#define DEF_LBPRZ 1
 135#define DEF_LOWEST_ALIGNED 0
 136#define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
 137#define DEF_NO_LUN_0   0
 138#define DEF_NUM_PARTS   0
 139#define DEF_OPTS   0
 140#define DEF_OPT_BLKS 1024
 141#define DEF_PHYSBLK_EXP 0
 142#define DEF_OPT_XFERLEN_EXP 0
 143#define DEF_PTYPE   TYPE_DISK
 144#define DEF_RANDOM false
 145#define DEF_REMOVABLE false
 146#define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
 147#define DEF_SECTOR_SIZE 512
 148#define DEF_UNMAP_ALIGNMENT 0
 149#define DEF_UNMAP_GRANULARITY 1
 150#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
 151#define DEF_UNMAP_MAX_DESC 256
 152#define DEF_VIRTUAL_GB   0
 153#define DEF_VPD_USE_HOSTNO 1
 154#define DEF_WRITESAME_LENGTH 0xFFFF
 155#define DEF_STRICT 0
 156#define DEF_STATISTICS false
 157#define DEF_SUBMIT_QUEUES 1
 158#define DEF_TUR_MS_TO_READY 0
 159#define DEF_UUID_CTL 0
 160#define JDELAY_OVERRIDDEN -9999
 161
 162/* Default parameters for ZBC drives */
 163#define DEF_ZBC_ZONE_SIZE_MB	128
 164#define DEF_ZBC_MAX_OPEN_ZONES	8
 165#define DEF_ZBC_NR_CONV_ZONES	1
 166
 167#define SDEBUG_LUN_0_VAL 0
 168
 169/* bit mask values for sdebug_opts */
 170#define SDEBUG_OPT_NOISE		1
 171#define SDEBUG_OPT_MEDIUM_ERR		2
 172#define SDEBUG_OPT_TIMEOUT		4
 173#define SDEBUG_OPT_RECOVERED_ERR	8
 174#define SDEBUG_OPT_TRANSPORT_ERR	16
 175#define SDEBUG_OPT_DIF_ERR		32
 176#define SDEBUG_OPT_DIX_ERR		64
 177#define SDEBUG_OPT_MAC_TIMEOUT		128
 178#define SDEBUG_OPT_SHORT_TRANSFER	0x100
 179#define SDEBUG_OPT_Q_NOISE		0x200
 180#define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
 181#define SDEBUG_OPT_RARE_TSF		0x800
 182#define SDEBUG_OPT_N_WCE		0x1000
 183#define SDEBUG_OPT_RESET_NOISE		0x2000
 184#define SDEBUG_OPT_NO_CDB_NOISE		0x4000
 185#define SDEBUG_OPT_HOST_BUSY		0x8000
 186#define SDEBUG_OPT_CMD_ABORT		0x10000
 187#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
 188			      SDEBUG_OPT_RESET_NOISE)
 189#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
 190				  SDEBUG_OPT_TRANSPORT_ERR | \
 191				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
 192				  SDEBUG_OPT_SHORT_TRANSFER | \
 193				  SDEBUG_OPT_HOST_BUSY | \
 194				  SDEBUG_OPT_CMD_ABORT)
 195#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
 196				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197
 198/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
 199 * priority order. In the subset implemented here lower numbers have higher
 200 * priority. The UA numbers should be a sequence starting from 0 with
 201 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
 202#define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
 203#define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
 204#define SDEBUG_UA_BUS_RESET 2
 205#define SDEBUG_UA_MODE_CHANGED 3
 206#define SDEBUG_UA_CAPACITY_CHANGED 4
 207#define SDEBUG_UA_LUNS_CHANGED 5
 208#define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
 209#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
 210#define SDEBUG_NUM_UAS 8
 211
 212/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
 213 * sector on read commands: */
 214#define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
 215#define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
 216
 
 
 
 
 217/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
 218 * (for response) per submit queue at one time. Can be reduced by max_queue
 219 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
 220 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
 221 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
 222 * but cannot exceed SDEBUG_CANQUEUE .
 223 */
 224#define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
 225#define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
 226#define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
 227
 228/* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
 229#define F_D_IN			1	/* Data-in command (e.g. READ) */
 230#define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
 231#define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
 232#define F_D_UNKN		8
 233#define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
 234#define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
 235#define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
 236#define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
 237#define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
 238#define F_INV_OP		0x200	/* invalid opcode (not supported) */
 239#define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
 240#define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
 241#define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
 242#define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
 243
 244/* Useful combinations of the above flags */
 245#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
 246#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
 247#define FF_SA (F_SA_HIGH | F_SA_LOW)
 248#define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
 249
 250#define SDEBUG_MAX_PARTS 4
 251
 252#define SDEBUG_MAX_CMD_LEN 32
 253
 254#define SDEB_XA_NOT_IN_USE XA_MARK_1
 255
 256static struct kmem_cache *queued_cmd_cache;
 257
 258#define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
 259#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
 260
 261/* Zone types (zbcr05 table 25) */
 262enum sdebug_z_type {
 263	ZBC_ZTYPE_CNV	= 0x1,
 264	ZBC_ZTYPE_SWR	= 0x2,
 265	ZBC_ZTYPE_SWP	= 0x3,
 266	/* ZBC_ZTYPE_SOBR = 0x4, */
 267	ZBC_ZTYPE_GAP	= 0x5,
 268};
 269
 270/* enumeration names taken from table 26, zbcr05 */
 271enum sdebug_z_cond {
 272	ZBC_NOT_WRITE_POINTER	= 0x0,
 273	ZC1_EMPTY		= 0x1,
 274	ZC2_IMPLICIT_OPEN	= 0x2,
 275	ZC3_EXPLICIT_OPEN	= 0x3,
 276	ZC4_CLOSED		= 0x4,
 277	ZC6_READ_ONLY		= 0xd,
 278	ZC5_FULL		= 0xe,
 279	ZC7_OFFLINE		= 0xf,
 280};
 281
 282struct sdeb_zone_state {	/* ZBC: per zone state */
 283	enum sdebug_z_type z_type;
 284	enum sdebug_z_cond z_cond;
 285	bool z_non_seq_resource;
 286	unsigned int z_size;
 287	sector_t z_start;
 288	sector_t z_wp;
 289};
 290
 291enum sdebug_err_type {
 292	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
 293	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
 294					/* queuecmd return failed */
 295	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
 296					/* queuecmd return succeed but */
 297					/* with errors set in scsi_cmnd */
 298	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
 299					/* scsi_debug_abort() */
 300	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
 301					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
 302};
 303
 304struct sdebug_err_inject {
 305	int type;
 306	struct list_head list;
 307	int cnt;
 308	unsigned char cmd;
 309	struct rcu_head rcu;
 310
 311	union {
 312		/*
 313		 * For ERR_FAIL_QUEUE_CMD
 314		 */
 315		int queuecmd_ret;
 316
 317		/*
 318		 * For ERR_FAIL_CMD
 319		 */
 320		struct {
 321			unsigned char host_byte;
 322			unsigned char driver_byte;
 323			unsigned char status_byte;
 324			unsigned char sense_key;
 325			unsigned char asc;
 326			unsigned char asq;
 327		};
 328	};
 329};
 330
 331struct sdebug_dev_info {
 332	struct list_head dev_list;
 333	unsigned int channel;
 334	unsigned int target;
 335	u64 lun;
 336	uuid_t lu_name;
 337	struct sdebug_host_info *sdbg_host;
 338	unsigned long uas_bm[1];
 339	atomic_t stopped;	/* 1: by SSU, 2: device start */
 
 340	bool used;
 341
 342	/* For ZBC devices */
 343	bool zoned;
 344	unsigned int zcap;
 345	unsigned int zsize;
 346	unsigned int zsize_shift;
 347	unsigned int nr_zones;
 348	unsigned int nr_conv_zones;
 349	unsigned int nr_seq_zones;
 350	unsigned int nr_imp_open;
 351	unsigned int nr_exp_open;
 352	unsigned int nr_closed;
 353	unsigned int max_open;
 354	ktime_t create_ts;	/* time since bootup that this device was created */
 355	struct sdeb_zone_state *zstate;
 356
 357	struct dentry *debugfs_entry;
 358	struct spinlock list_lock;
 359	struct list_head inject_err_list;
 360};
 361
 362struct sdebug_target_info {
 363	bool reset_fail;
 364	struct dentry *debugfs_entry;
 365};
 366
 367struct sdebug_host_info {
 368	struct list_head host_list;
 369	int si_idx;	/* sdeb_store_info (per host) xarray index */
 370	struct Scsi_Host *shost;
 371	struct device dev;
 372	struct list_head dev_info_list;
 373};
 374
 375/* There is an xarray of pointers to this struct's objects, one per host */
 376struct sdeb_store_info {
 377	rwlock_t macc_lck;	/* for atomic media access on this store */
 378	u8 *storep;		/* user data storage (ram) */
 379	struct t10_pi_tuple *dif_storep; /* protection info */
 380	void *map_storep;	/* provisioning map */
 381};
 382
 383#define dev_to_sdebug_host(d)	\
 384	container_of(d, struct sdebug_host_info, dev)
 385
 386#define shost_to_sdebug_host(shost)	\
 387	dev_to_sdebug_host(shost->dma_dev)
 388
 389enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
 390		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
 391
 392struct sdebug_defer {
 393	struct hrtimer hrt;
 394	struct execute_work ew;
 395	ktime_t cmpl_ts;/* time since boot to complete this cmd */
 
 396	int issuing_cpu;
 397	bool aborted;	/* true when blk_abort_request() already called */
 
 398	enum sdeb_defer_type defer_t;
 399};
 400
 401struct sdebug_queued_cmd {
 402	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
 403	 * instance indicates this slot is in use.
 404	 */
 405	struct sdebug_defer sd_dp;
 406	struct scsi_cmnd *scmd;
 
 
 
 
 
 
 407};
 408
 409struct sdebug_scsi_cmd {
 410	spinlock_t   lock;
 
 
 
 411};
 412
 413static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
 414static atomic_t sdebug_completions;  /* count of deferred completions */
 415static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
 416static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
 417static atomic_t sdeb_inject_pending;
 418static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
 419
 420struct opcode_info_t {
 421	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
 422				/* for terminating element */
 423	u8 opcode;		/* if num_attached > 0, preferred */
 424	u16 sa;			/* service action */
 425	u32 flags;		/* OR-ed set of SDEB_F_* */
 426	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
 427	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
 428	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
 429				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
 430};
 431
 432/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
 433enum sdeb_opcode_index {
 434	SDEB_I_INVALID_OPCODE =	0,
 435	SDEB_I_INQUIRY = 1,
 436	SDEB_I_REPORT_LUNS = 2,
 437	SDEB_I_REQUEST_SENSE = 3,
 438	SDEB_I_TEST_UNIT_READY = 4,
 439	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
 440	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
 441	SDEB_I_LOG_SENSE = 7,
 442	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
 443	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
 444	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
 445	SDEB_I_START_STOP = 11,
 446	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
 447	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
 448	SDEB_I_MAINT_IN = 14,
 449	SDEB_I_MAINT_OUT = 15,
 450	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
 451	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
 452	SDEB_I_RESERVE = 18,		/* 6, 10 */
 453	SDEB_I_RELEASE = 19,		/* 6, 10 */
 454	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
 455	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
 456	SDEB_I_ATA_PT = 22,		/* 12, 16 */
 457	SDEB_I_SEND_DIAG = 23,
 458	SDEB_I_UNMAP = 24,
 459	SDEB_I_WRITE_BUFFER = 25,
 460	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
 461	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
 462	SDEB_I_COMP_WRITE = 28,
 463	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
 464	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
 465	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
 466	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
 467};
 468
 469
 470static const unsigned char opcode_ind_arr[256] = {
 471/* 0x0; 0x0->0x1f: 6 byte cdbs */
 472	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
 473	    0, 0, 0, 0,
 474	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 475	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 476	    SDEB_I_RELEASE,
 477	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
 478	    SDEB_I_ALLOW_REMOVAL, 0,
 479/* 0x20; 0x20->0x3f: 10 byte cdbs */
 480	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
 481	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
 482	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
 483	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
 484/* 0x40; 0x40->0x5f: 10 byte cdbs */
 485	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
 486	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
 487	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 488	    SDEB_I_RELEASE,
 489	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
 490/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
 491	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 492	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 493	0, SDEB_I_VARIABLE_LEN,
 494/* 0x80; 0x80->0x9f: 16 byte cdbs */
 495	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
 496	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
 497	0, 0, 0, SDEB_I_VERIFY,
 498	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
 499	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
 500	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
 501/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
 502	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
 503	     SDEB_I_MAINT_OUT, 0, 0, 0,
 504	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
 505	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
 506	0, 0, 0, 0, 0, 0, 0, 0,
 507	0, 0, 0, 0, 0, 0, 0, 0,
 508/* 0xc0; 0xc0->0xff: vendor specific */
 509	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 510	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 511	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 512	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 513};
 514
 515/*
 516 * The following "response" functions return the SCSI mid-level's 4 byte
 517 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
 518 * command completion, they can mask their return value with
 519 * SDEG_RES_IMMED_MASK .
 520 */
 521#define SDEG_RES_IMMED_MASK 0x40000000
 522
 523static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
 524static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
 525static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
 526static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 527static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
 528static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 529static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
 530static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 531static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 532static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
 533static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
 534static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
 535static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
 536static int resp_get_stream_status(struct scsi_cmnd *scp,
 537				  struct sdebug_dev_info *devip);
 538static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
 539static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
 540static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
 541static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
 542static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
 543static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 544static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
 
 545static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
 546static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
 547static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
 548static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
 549static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
 550static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
 551static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
 552static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
 553static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
 554
 555static int sdebug_do_add_host(bool mk_new_store);
 556static int sdebug_add_host_helper(int per_host_idx);
 557static void sdebug_do_remove_host(bool the_end);
 558static int sdebug_add_store(void);
 559static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
 560static void sdebug_erase_all_stores(bool apart_from_first);
 561
 562static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
 563
 564/*
 565 * The following are overflow arrays for cdbs that "hit" the same index in
 566 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
 567 * should be placed in opcode_info_arr[], the others should be placed here.
 568 */
 569static const struct opcode_info_t msense_iarr[] = {
 570	{0, 0x1a, 0, F_D_IN, NULL, NULL,
 571	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 572};
 573
 574static const struct opcode_info_t mselect_iarr[] = {
 575	{0, 0x15, 0, F_D_OUT, NULL, NULL,
 576	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 577};
 578
 579static const struct opcode_info_t read_iarr[] = {
 580	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
 581	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
 582	     0, 0, 0, 0} },
 583	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
 584	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 585	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
 586	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
 587	     0xc7, 0, 0, 0, 0} },
 588};
 589
 590static const struct opcode_info_t write_iarr[] = {
 591	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
 592	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
 593		   0, 0, 0, 0, 0, 0} },
 594	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
 595	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
 596		   0, 0, 0} },
 597	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
 598	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 599		   0xbf, 0xc7, 0, 0, 0, 0} },
 600};
 601
 602static const struct opcode_info_t verify_iarr[] = {
 603	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
 604	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
 605		   0, 0, 0, 0, 0, 0} },
 606};
 607
 608static const struct opcode_info_t sa_in_16_iarr[] = {
 609	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
 610	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 611	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
 612	{0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
 613	    {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
 614	     0, 0} },	/* GET STREAM STATUS */
 615};
 616
 617static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
 618	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
 619	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
 620		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
 621	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
 622	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
 623		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
 624};
 625
 626static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
 627	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
 628	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
 629	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
 630	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
 631	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 632	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
 633};
 634
 635static const struct opcode_info_t write_same_iarr[] = {
 636	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
 637	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 638	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
 639};
 640
 641static const struct opcode_info_t reserve_iarr[] = {
 642	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
 643	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 644};
 645
 646static const struct opcode_info_t release_iarr[] = {
 647	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
 648	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 649};
 650
 651static const struct opcode_info_t sync_cache_iarr[] = {
 652	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
 653	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 654	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
 655};
 656
 657static const struct opcode_info_t pre_fetch_iarr[] = {
 658	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
 659	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 660	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
 661};
 662
 663static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
 664	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
 665	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 666	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
 667	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
 668	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 669	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
 670	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
 671	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 672	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
 673};
 674
 675static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
 676	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
 677	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 678	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
 679};
 680
 681
 682/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
 683 * plus the terminating elements for logic that scans this table such as
 684 * REPORT SUPPORTED OPERATION CODES. */
 685static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
 686/* 0 */
 687	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
 688	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 689	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
 690	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 691	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
 692	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 693	     0, 0} },					/* REPORT LUNS */
 694	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
 695	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 696	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
 697	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 698/* 5 */
 699	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
 700	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
 701		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 702	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
 703	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
 704		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 705	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
 706	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
 707	     0, 0, 0} },
 708	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
 709	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
 710	     0, 0} },
 711	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
 712	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
 713	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
 714/* 10 */
 715	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
 716	    resp_write_dt0, write_iarr,			/* WRITE(16) */
 717		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 718		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
 719	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
 720	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 721	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
 722	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
 723		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 724		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
 725	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
 726	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
 727	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
 728	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
 729	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
 730		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
 731				0xff, 0, 0xc7, 0, 0, 0, 0} },
 732/* 15 */
 733	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
 734	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 735	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
 736	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
 737	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 738			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
 739	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
 740	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
 741	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
 742	     0xff, 0xff} },
 743	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
 744	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
 745	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 746	     0} },
 747	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
 748	    NULL, release_iarr, /* RELEASE(10) <no response function> */
 749	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 750	     0} },
 751/* 20 */
 752	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
 753	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 754	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
 755	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 756	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
 757	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 758	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
 759	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 760	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
 761	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 762/* 25 */
 
 
 
 763	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
 764	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
 765	     0, 0, 0, 0} },			/* WRITE_BUFFER */
 766	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
 767	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
 768		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
 769		 0, 0, 0, 0, 0} },
 770	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
 771	    resp_sync_cache, sync_cache_iarr,
 772	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
 773	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
 774	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
 775	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
 776	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
 777	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
 778	    resp_pre_fetch, pre_fetch_iarr,
 779	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
 780	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
 781
 782/* 30 */
 783	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
 784	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
 785		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 786		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
 787	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
 788	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
 789		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 790		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
 791/* sentinel */
 792	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
 793	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 794};
 795
 796static int sdebug_num_hosts;
 797static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
 798static int sdebug_ato = DEF_ATO;
 799static int sdebug_cdb_len = DEF_CDB_LEN;
 800static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
 801static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
 802static int sdebug_dif = DEF_DIF;
 803static int sdebug_dix = DEF_DIX;
 804static int sdebug_dsense = DEF_D_SENSE;
 805static int sdebug_every_nth = DEF_EVERY_NTH;
 806static int sdebug_fake_rw = DEF_FAKE_RW;
 807static unsigned int sdebug_guard = DEF_GUARD;
 808static int sdebug_host_max_queue;	/* per host */
 809static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
 810static int sdebug_max_luns = DEF_MAX_LUNS;
 811static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
 812static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
 813static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
 
 814static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
 815static int sdebug_no_lun_0 = DEF_NO_LUN_0;
 816static int sdebug_no_uld;
 817static int sdebug_num_parts = DEF_NUM_PARTS;
 818static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
 819static int sdebug_opt_blks = DEF_OPT_BLKS;
 820static int sdebug_opts = DEF_OPTS;
 821static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
 822static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
 823static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
 824static int sdebug_scsi_level = DEF_SCSI_LEVEL;
 825static int sdebug_sector_size = DEF_SECTOR_SIZE;
 826static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
 827static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
 828static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
 829static unsigned int sdebug_lbpu = DEF_LBPU;
 830static unsigned int sdebug_lbpws = DEF_LBPWS;
 831static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
 832static unsigned int sdebug_lbprz = DEF_LBPRZ;
 833static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 834static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 835static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
 836static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
 837static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
 838static int sdebug_uuid_ctl = DEF_UUID_CTL;
 839static bool sdebug_random = DEF_RANDOM;
 840static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
 841static bool sdebug_removable = DEF_REMOVABLE;
 842static bool sdebug_clustering;
 843static bool sdebug_host_lock = DEF_HOST_LOCK;
 844static bool sdebug_strict = DEF_STRICT;
 845static bool sdebug_any_injecting_opt;
 846static bool sdebug_no_rwlock;
 847static bool sdebug_verbose;
 848static bool have_dif_prot;
 849static bool write_since_sync;
 850static bool sdebug_statistics = DEF_STATISTICS;
 851static bool sdebug_wp;
 852static bool sdebug_allow_restart;
 853static enum {
 854	BLK_ZONED_NONE	= 0,
 855	BLK_ZONED_HA	= 1,
 856	BLK_ZONED_HM	= 2,
 857} sdeb_zbc_model = BLK_ZONED_NONE;
 858static char *sdeb_zbc_model_s;
 859
 860enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
 861			  SAM_LUN_AM_FLAT = 0x1,
 862			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
 863			  SAM_LUN_AM_EXTENDED = 0x3};
 864static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
 865static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
 866
 867static unsigned int sdebug_store_sectors;
 868static sector_t sdebug_capacity;	/* in sectors */
 869
 870/* old BIOS stuff, kernel may get rid of them but some mode sense pages
 871   may still need them */
 872static int sdebug_heads;		/* heads per disk */
 873static int sdebug_cylinders_per;	/* cylinders per surface */
 874static int sdebug_sectors_per;		/* sectors per cylinder */
 875
 876static LIST_HEAD(sdebug_host_list);
 877static DEFINE_MUTEX(sdebug_host_list_mutex);
 878
 879static struct xarray per_store_arr;
 880static struct xarray *per_store_ap = &per_store_arr;
 881static int sdeb_first_idx = -1;		/* invalid index ==> none created */
 882static int sdeb_most_recent_idx = -1;
 883static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
 884
 885static unsigned long map_size;
 886static int num_aborts;
 887static int num_dev_resets;
 888static int num_target_resets;
 889static int num_bus_resets;
 890static int num_host_resets;
 891static int dix_writes;
 892static int dix_reads;
 893static int dif_errors;
 894
 895/* ZBC global data */
 896static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
 897static int sdeb_zbc_zone_cap_mb;
 898static int sdeb_zbc_zone_size_mb;
 899static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
 900static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
 901
 902static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
 903static int poll_queues; /* iouring iopoll interface.*/
 904
 905static atomic_long_t writes_by_group_number[64];
 906
 907static char sdebug_proc_name[] = MY_NAME;
 908static const char *my_name = MY_NAME;
 909
 910static const struct bus_type pseudo_lld_bus;
 911
 912static struct device_driver sdebug_driverfs_driver = {
 913	.name 		= sdebug_proc_name,
 914	.bus		= &pseudo_lld_bus,
 915};
 916
 917static const int check_condition_result =
 918	SAM_STAT_CHECK_CONDITION;
 919
 920static const int illegal_condition_result =
 921	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
 922
 923static const int device_qfull_result =
 924	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
 925
 926static const int condition_met_result = SAM_STAT_CONDITION_MET;
 927
 928static struct dentry *sdebug_debugfs_root;
 929
 930static void sdebug_err_free(struct rcu_head *head)
 931{
 932	struct sdebug_err_inject *inject =
 933		container_of(head, typeof(*inject), rcu);
 934
 935	kfree(inject);
 936}
 937
 938static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
 939{
 940	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
 941	struct sdebug_err_inject *err;
 942
 943	spin_lock(&devip->list_lock);
 944	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
 945		if (err->type == new->type && err->cmd == new->cmd) {
 946			list_del_rcu(&err->list);
 947			call_rcu(&err->rcu, sdebug_err_free);
 948		}
 949	}
 950
 951	list_add_tail_rcu(&new->list, &devip->inject_err_list);
 952	spin_unlock(&devip->list_lock);
 953}
 954
 955static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
 956{
 957	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
 958	struct sdebug_err_inject *err;
 959	int type;
 960	unsigned char cmd;
 961
 962	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
 963		kfree(buf);
 964		return -EINVAL;
 965	}
 966
 967	spin_lock(&devip->list_lock);
 968	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
 969		if (err->type == type && err->cmd == cmd) {
 970			list_del_rcu(&err->list);
 971			call_rcu(&err->rcu, sdebug_err_free);
 972			spin_unlock(&devip->list_lock);
 973			kfree(buf);
 974			return count;
 975		}
 976	}
 977	spin_unlock(&devip->list_lock);
 978
 979	kfree(buf);
 980	return -EINVAL;
 981}
 982
 983static int sdebug_error_show(struct seq_file *m, void *p)
 984{
 985	struct scsi_device *sdev = (struct scsi_device *)m->private;
 986	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
 987	struct sdebug_err_inject *err;
 988
 989	seq_puts(m, "Type\tCount\tCommand\n");
 990
 991	rcu_read_lock();
 992	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
 993		switch (err->type) {
 994		case ERR_TMOUT_CMD:
 995		case ERR_ABORT_CMD_FAILED:
 996		case ERR_LUN_RESET_FAILED:
 997			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
 998				err->cmd);
 999		break;
1000
1001		case ERR_FAIL_QUEUE_CMD:
1002			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1003				err->cnt, err->cmd, err->queuecmd_ret);
1004		break;
1005
1006		case ERR_FAIL_CMD:
1007			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1008				err->type, err->cnt, err->cmd,
1009				err->host_byte, err->driver_byte,
1010				err->status_byte, err->sense_key,
1011				err->asc, err->asq);
1012		break;
1013		}
1014	}
1015	rcu_read_unlock();
1016
1017	return 0;
1018}
1019
1020static int sdebug_error_open(struct inode *inode, struct file *file)
1021{
1022	return single_open(file, sdebug_error_show, inode->i_private);
1023}
1024
1025static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1026		size_t count, loff_t *ppos)
1027{
1028	char *buf;
1029	unsigned int inject_type;
1030	struct sdebug_err_inject *inject;
1031	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1032
1033	buf = kzalloc(count + 1, GFP_KERNEL);
1034	if (!buf)
1035		return -ENOMEM;
1036
1037	if (copy_from_user(buf, ubuf, count)) {
1038		kfree(buf);
1039		return -EFAULT;
1040	}
1041
1042	if (buf[0] == '-')
1043		return sdebug_err_remove(sdev, buf, count);
1044
1045	if (sscanf(buf, "%d", &inject_type) != 1) {
1046		kfree(buf);
1047		return -EINVAL;
1048	}
1049
1050	inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1051	if (!inject) {
1052		kfree(buf);
1053		return -ENOMEM;
1054	}
1055
1056	switch (inject_type) {
1057	case ERR_TMOUT_CMD:
1058	case ERR_ABORT_CMD_FAILED:
1059	case ERR_LUN_RESET_FAILED:
1060		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1061			   &inject->cmd) != 3)
1062			goto out_error;
1063	break;
1064
1065	case ERR_FAIL_QUEUE_CMD:
1066		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1067			   &inject->cmd, &inject->queuecmd_ret) != 4)
1068			goto out_error;
1069	break;
1070
1071	case ERR_FAIL_CMD:
1072		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1073			   &inject->type, &inject->cnt, &inject->cmd,
1074			   &inject->host_byte, &inject->driver_byte,
1075			   &inject->status_byte, &inject->sense_key,
1076			   &inject->asc, &inject->asq) != 9)
1077			goto out_error;
1078	break;
1079
1080	default:
1081		goto out_error;
1082	break;
1083	}
1084
1085	kfree(buf);
1086	sdebug_err_add(sdev, inject);
1087
1088	return count;
1089
1090out_error:
1091	kfree(buf);
1092	kfree(inject);
1093	return -EINVAL;
1094}
1095
1096static const struct file_operations sdebug_error_fops = {
1097	.open	= sdebug_error_open,
1098	.read	= seq_read,
1099	.write	= sdebug_error_write,
1100	.release = single_release,
1101};
1102
1103static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1104{
1105	struct scsi_target *starget = (struct scsi_target *)m->private;
1106	struct sdebug_target_info *targetip =
1107		(struct sdebug_target_info *)starget->hostdata;
1108
1109	if (targetip)
1110		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1111
1112	return 0;
1113}
1114
1115static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1116{
1117	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1118}
1119
1120static ssize_t sdebug_target_reset_fail_write(struct file *file,
1121		const char __user *ubuf, size_t count, loff_t *ppos)
1122{
1123	int ret;
1124	struct scsi_target *starget =
1125		(struct scsi_target *)file->f_inode->i_private;
1126	struct sdebug_target_info *targetip =
1127		(struct sdebug_target_info *)starget->hostdata;
1128
1129	if (targetip) {
1130		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1131		return ret < 0 ? ret : count;
1132	}
1133	return -ENODEV;
1134}
1135
1136static const struct file_operations sdebug_target_reset_fail_fops = {
1137	.open	= sdebug_target_reset_fail_open,
1138	.read	= seq_read,
1139	.write	= sdebug_target_reset_fail_write,
1140	.release = single_release,
1141};
1142
1143static int sdebug_target_alloc(struct scsi_target *starget)
1144{
1145	struct sdebug_target_info *targetip;
1146
1147	targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1148	if (!targetip)
1149		return -ENOMEM;
1150
1151	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1152				sdebug_debugfs_root);
1153
1154	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1155				&sdebug_target_reset_fail_fops);
1156
1157	starget->hostdata = targetip;
1158
1159	return 0;
1160}
1161
1162static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1163{
1164	struct sdebug_target_info *targetip = data;
1165
1166	debugfs_remove(targetip->debugfs_entry);
1167	kfree(targetip);
1168}
1169
1170static void sdebug_target_destroy(struct scsi_target *starget)
1171{
1172	struct sdebug_target_info *targetip;
1173
1174	targetip = (struct sdebug_target_info *)starget->hostdata;
1175	if (targetip) {
1176		starget->hostdata = NULL;
1177		async_schedule(sdebug_tartget_cleanup_async, targetip);
1178	}
1179}
1180
1181/* Only do the extra work involved in logical block provisioning if one or
1182 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1183 * real reads and writes (i.e. not skipping them for speed).
1184 */
1185static inline bool scsi_debug_lbp(void)
1186{
1187	return 0 == sdebug_fake_rw &&
1188		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1189}
1190
1191static void *lba2fake_store(struct sdeb_store_info *sip,
1192			    unsigned long long lba)
1193{
1194	struct sdeb_store_info *lsip = sip;
1195
1196	lba = do_div(lba, sdebug_store_sectors);
1197	if (!sip || !sip->storep) {
1198		WARN_ON_ONCE(true);
1199		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1200	}
1201	return lsip->storep + lba * sdebug_sector_size;
1202}
1203
1204static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1205				      sector_t sector)
1206{
1207	sector = sector_div(sector, sdebug_store_sectors);
1208
1209	return sip->dif_storep + sector;
1210}
1211
1212static void sdebug_max_tgts_luns(void)
1213{
1214	struct sdebug_host_info *sdbg_host;
1215	struct Scsi_Host *hpnt;
1216
1217	mutex_lock(&sdebug_host_list_mutex);
1218	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1219		hpnt = sdbg_host->shost;
1220		if ((hpnt->this_id >= 0) &&
1221		    (sdebug_num_tgts > hpnt->this_id))
1222			hpnt->max_id = sdebug_num_tgts + 1;
1223		else
1224			hpnt->max_id = sdebug_num_tgts;
1225		/* sdebug_max_luns; */
1226		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1227	}
1228	mutex_unlock(&sdebug_host_list_mutex);
1229}
1230
1231enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1232
1233/* Set in_bit to -1 to indicate no bit position of invalid field */
1234static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1235				 enum sdeb_cmd_data c_d,
1236				 int in_byte, int in_bit)
1237{
1238	unsigned char *sbuff;
1239	u8 sks[4];
1240	int sl, asc;
1241
1242	sbuff = scp->sense_buffer;
1243	if (!sbuff) {
1244		sdev_printk(KERN_ERR, scp->device,
1245			    "%s: sense_buffer is NULL\n", __func__);
1246		return;
1247	}
1248	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1249	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1250	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1251	memset(sks, 0, sizeof(sks));
1252	sks[0] = 0x80;
1253	if (c_d)
1254		sks[0] |= 0x40;
1255	if (in_bit >= 0) {
1256		sks[0] |= 0x8;
1257		sks[0] |= 0x7 & in_bit;
1258	}
1259	put_unaligned_be16(in_byte, sks + 1);
1260	if (sdebug_dsense) {
1261		sl = sbuff[7] + 8;
1262		sbuff[7] = sl;
1263		sbuff[sl] = 0x2;
1264		sbuff[sl + 1] = 0x6;
1265		memcpy(sbuff + sl + 4, sks, 3);
1266	} else
1267		memcpy(sbuff + 15, sks, 3);
1268	if (sdebug_verbose)
1269		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1270			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1271			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1272}
1273
1274static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1275{
1276	if (!scp->sense_buffer) {
 
 
 
1277		sdev_printk(KERN_ERR, scp->device,
1278			    "%s: sense_buffer is NULL\n", __func__);
1279		return;
1280	}
1281	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1282
1283	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1284
1285	if (sdebug_verbose)
1286		sdev_printk(KERN_INFO, scp->device,
1287			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1288			    my_name, key, asc, asq);
1289}
1290
1291static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1292{
1293	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1294}
1295
1296static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1297			    void __user *arg)
1298{
1299	if (sdebug_verbose) {
1300		if (0x1261 == cmd)
1301			sdev_printk(KERN_INFO, dev,
1302				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1303		else if (0x5331 == cmd)
1304			sdev_printk(KERN_INFO, dev,
1305				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1306				    __func__);
1307		else
1308			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1309				    __func__, cmd);
1310	}
1311	return -EINVAL;
1312	/* return -ENOTTY; // correct return but upsets fdisk */
1313}
1314
1315static void config_cdb_len(struct scsi_device *sdev)
1316{
1317	switch (sdebug_cdb_len) {
1318	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1319		sdev->use_10_for_rw = false;
1320		sdev->use_16_for_rw = false;
1321		sdev->use_10_for_ms = false;
1322		break;
1323	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1324		sdev->use_10_for_rw = true;
1325		sdev->use_16_for_rw = false;
1326		sdev->use_10_for_ms = false;
1327		break;
1328	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1329		sdev->use_10_for_rw = true;
1330		sdev->use_16_for_rw = false;
1331		sdev->use_10_for_ms = true;
1332		break;
1333	case 16:
1334		sdev->use_10_for_rw = false;
1335		sdev->use_16_for_rw = true;
1336		sdev->use_10_for_ms = true;
1337		break;
1338	case 32: /* No knobs to suggest this so same as 16 for now */
1339		sdev->use_10_for_rw = false;
1340		sdev->use_16_for_rw = true;
1341		sdev->use_10_for_ms = true;
1342		break;
1343	default:
1344		pr_warn("unexpected cdb_len=%d, force to 10\n",
1345			sdebug_cdb_len);
1346		sdev->use_10_for_rw = true;
1347		sdev->use_16_for_rw = false;
1348		sdev->use_10_for_ms = false;
1349		sdebug_cdb_len = 10;
1350		break;
1351	}
1352}
1353
1354static void all_config_cdb_len(void)
1355{
1356	struct sdebug_host_info *sdbg_host;
1357	struct Scsi_Host *shost;
1358	struct scsi_device *sdev;
1359
1360	mutex_lock(&sdebug_host_list_mutex);
1361	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1362		shost = sdbg_host->shost;
1363		shost_for_each_device(sdev, shost) {
1364			config_cdb_len(sdev);
1365		}
1366	}
1367	mutex_unlock(&sdebug_host_list_mutex);
1368}
1369
1370static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1371{
1372	struct sdebug_host_info *sdhp = devip->sdbg_host;
1373	struct sdebug_dev_info *dp;
1374
1375	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1376		if ((devip->sdbg_host == dp->sdbg_host) &&
1377		    (devip->target == dp->target)) {
1378			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
 
 
1379		}
1380	}
 
1381}
1382
1383static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1384{
1385	int k;
1386
1387	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1388	if (k != SDEBUG_NUM_UAS) {
1389		const char *cp = NULL;
1390
1391		switch (k) {
1392		case SDEBUG_UA_POR:
1393			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1394					POWER_ON_RESET_ASCQ);
1395			if (sdebug_verbose)
1396				cp = "power on reset";
1397			break;
1398		case SDEBUG_UA_POOCCUR:
1399			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1400					POWER_ON_OCCURRED_ASCQ);
1401			if (sdebug_verbose)
1402				cp = "power on occurred";
1403			break;
1404		case SDEBUG_UA_BUS_RESET:
1405			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1406					BUS_RESET_ASCQ);
1407			if (sdebug_verbose)
1408				cp = "bus reset";
1409			break;
1410		case SDEBUG_UA_MODE_CHANGED:
1411			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1412					MODE_CHANGED_ASCQ);
1413			if (sdebug_verbose)
1414				cp = "mode parameters changed";
1415			break;
1416		case SDEBUG_UA_CAPACITY_CHANGED:
1417			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1418					CAPACITY_CHANGED_ASCQ);
1419			if (sdebug_verbose)
1420				cp = "capacity data changed";
1421			break;
1422		case SDEBUG_UA_MICROCODE_CHANGED:
1423			mk_sense_buffer(scp, UNIT_ATTENTION,
1424					TARGET_CHANGED_ASC,
1425					MICROCODE_CHANGED_ASCQ);
1426			if (sdebug_verbose)
1427				cp = "microcode has been changed";
1428			break;
1429		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1430			mk_sense_buffer(scp, UNIT_ATTENTION,
1431					TARGET_CHANGED_ASC,
1432					MICROCODE_CHANGED_WO_RESET_ASCQ);
1433			if (sdebug_verbose)
1434				cp = "microcode has been changed without reset";
1435			break;
1436		case SDEBUG_UA_LUNS_CHANGED:
1437			/*
1438			 * SPC-3 behavior is to report a UNIT ATTENTION with
1439			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1440			 * on the target, until a REPORT LUNS command is
1441			 * received.  SPC-4 behavior is to report it only once.
1442			 * NOTE:  sdebug_scsi_level does not use the same
1443			 * values as struct scsi_device->scsi_level.
1444			 */
1445			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1446				clear_luns_changed_on_target(devip);
1447			mk_sense_buffer(scp, UNIT_ATTENTION,
1448					TARGET_CHANGED_ASC,
1449					LUNS_CHANGED_ASCQ);
1450			if (sdebug_verbose)
1451				cp = "reported luns data has changed";
1452			break;
1453		default:
1454			pr_warn("unexpected unit attention code=%d\n", k);
1455			if (sdebug_verbose)
1456				cp = "unknown";
1457			break;
1458		}
1459		clear_bit(k, devip->uas_bm);
1460		if (sdebug_verbose)
1461			sdev_printk(KERN_INFO, scp->device,
1462				   "%s reports: Unit attention: %s\n",
1463				   my_name, cp);
1464		return check_condition_result;
1465	}
1466	return 0;
1467}
1468
1469/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1470static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1471				int arr_len)
1472{
1473	int act_len;
1474	struct scsi_data_buffer *sdb = &scp->sdb;
1475
1476	if (!sdb->length)
1477		return 0;
1478	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1479		return DID_ERROR << 16;
1480
1481	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1482				      arr, arr_len);
1483	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1484
1485	return 0;
1486}
1487
1488/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1489 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1490 * calls, not required to write in ascending offset order. Assumes resid
1491 * set to scsi_bufflen() prior to any calls.
1492 */
1493static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1494				  int arr_len, unsigned int off_dst)
1495{
1496	unsigned int act_len, n;
1497	struct scsi_data_buffer *sdb = &scp->sdb;
1498	off_t skip = off_dst;
1499
1500	if (sdb->length <= off_dst)
1501		return 0;
1502	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1503		return DID_ERROR << 16;
1504
1505	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1506				       arr, arr_len, skip);
1507	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1508		 __func__, off_dst, scsi_bufflen(scp), act_len,
1509		 scsi_get_resid(scp));
1510	n = scsi_bufflen(scp) - (off_dst + act_len);
1511	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1512	return 0;
1513}
1514
1515/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1516 * 'arr' or -1 if error.
1517 */
1518static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1519			       int arr_len)
1520{
1521	if (!scsi_bufflen(scp))
1522		return 0;
1523	if (scp->sc_data_direction != DMA_TO_DEVICE)
1524		return -1;
1525
1526	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1527}
1528
1529
1530static char sdebug_inq_vendor_id[9] = "Linux   ";
1531static char sdebug_inq_product_id[17] = "scsi_debug      ";
1532static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1533/* Use some locally assigned NAAs for SAS addresses. */
1534static const u64 naa3_comp_a = 0x3222222000000000ULL;
1535static const u64 naa3_comp_b = 0x3333333000000000ULL;
1536static const u64 naa3_comp_c = 0x3111111000000000ULL;
1537
1538/* Device identification VPD page. Returns number of bytes placed in arr */
1539static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1540			  int target_dev_id, int dev_id_num,
1541			  const char *dev_id_str, int dev_id_str_len,
1542			  const uuid_t *lu_name)
1543{
1544	int num, port_a;
1545	char b[32];
1546
1547	port_a = target_dev_id + 1;
1548	/* T10 vendor identifier field format (faked) */
1549	arr[0] = 0x2;	/* ASCII */
1550	arr[1] = 0x1;
1551	arr[2] = 0x0;
1552	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1553	memcpy(&arr[12], sdebug_inq_product_id, 16);
1554	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1555	num = 8 + 16 + dev_id_str_len;
1556	arr[3] = num;
1557	num += 4;
1558	if (dev_id_num >= 0) {
1559		if (sdebug_uuid_ctl) {
1560			/* Locally assigned UUID */
1561			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1562			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1563			arr[num++] = 0x0;
1564			arr[num++] = 0x12;
1565			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1566			arr[num++] = 0x0;
1567			memcpy(arr + num, lu_name, 16);
1568			num += 16;
1569		} else {
1570			/* NAA-3, Logical unit identifier (binary) */
1571			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1572			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1573			arr[num++] = 0x0;
1574			arr[num++] = 0x8;
1575			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1576			num += 8;
1577		}
1578		/* Target relative port number */
1579		arr[num++] = 0x61;	/* proto=sas, binary */
1580		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1581		arr[num++] = 0x0;	/* reserved */
1582		arr[num++] = 0x4;	/* length */
1583		arr[num++] = 0x0;	/* reserved */
1584		arr[num++] = 0x0;	/* reserved */
1585		arr[num++] = 0x0;
1586		arr[num++] = 0x1;	/* relative port A */
1587	}
1588	/* NAA-3, Target port identifier */
1589	arr[num++] = 0x61;	/* proto=sas, binary */
1590	arr[num++] = 0x93;	/* piv=1, target port, naa */
1591	arr[num++] = 0x0;
1592	arr[num++] = 0x8;
1593	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1594	num += 8;
1595	/* NAA-3, Target port group identifier */
1596	arr[num++] = 0x61;	/* proto=sas, binary */
1597	arr[num++] = 0x95;	/* piv=1, target port group id */
1598	arr[num++] = 0x0;
1599	arr[num++] = 0x4;
1600	arr[num++] = 0;
1601	arr[num++] = 0;
1602	put_unaligned_be16(port_group_id, arr + num);
1603	num += 2;
1604	/* NAA-3, Target device identifier */
1605	arr[num++] = 0x61;	/* proto=sas, binary */
1606	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1607	arr[num++] = 0x0;
1608	arr[num++] = 0x8;
1609	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1610	num += 8;
1611	/* SCSI name string: Target device identifier */
1612	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1613	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1614	arr[num++] = 0x0;
1615	arr[num++] = 24;
1616	memcpy(arr + num, "naa.32222220", 12);
1617	num += 12;
1618	snprintf(b, sizeof(b), "%08X", target_dev_id);
1619	memcpy(arr + num, b, 8);
1620	num += 8;
1621	memset(arr + num, 0, 4);
1622	num += 4;
1623	return num;
1624}
1625
1626static unsigned char vpd84_data[] = {
1627/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1628    0x22,0x22,0x22,0x0,0xbb,0x1,
1629    0x22,0x22,0x22,0x0,0xbb,0x2,
1630};
1631
1632/*  Software interface identification VPD page */
1633static int inquiry_vpd_84(unsigned char *arr)
1634{
1635	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1636	return sizeof(vpd84_data);
1637}
1638
1639/* Management network addresses VPD page */
1640static int inquiry_vpd_85(unsigned char *arr)
1641{
1642	int num = 0;
1643	const char *na1 = "https://www.kernel.org/config";
1644	const char *na2 = "http://www.kernel.org/log";
1645	int plen, olen;
1646
1647	arr[num++] = 0x1;	/* lu, storage config */
1648	arr[num++] = 0x0;	/* reserved */
1649	arr[num++] = 0x0;
1650	olen = strlen(na1);
1651	plen = olen + 1;
1652	if (plen % 4)
1653		plen = ((plen / 4) + 1) * 4;
1654	arr[num++] = plen;	/* length, null termianted, padded */
1655	memcpy(arr + num, na1, olen);
1656	memset(arr + num + olen, 0, plen - olen);
1657	num += plen;
1658
1659	arr[num++] = 0x4;	/* lu, logging */
1660	arr[num++] = 0x0;	/* reserved */
1661	arr[num++] = 0x0;
1662	olen = strlen(na2);
1663	plen = olen + 1;
1664	if (plen % 4)
1665		plen = ((plen / 4) + 1) * 4;
1666	arr[num++] = plen;	/* length, null terminated, padded */
1667	memcpy(arr + num, na2, olen);
1668	memset(arr + num + olen, 0, plen - olen);
1669	num += plen;
1670
1671	return num;
1672}
1673
1674/* SCSI ports VPD page */
1675static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1676{
1677	int num = 0;
1678	int port_a, port_b;
1679
1680	port_a = target_dev_id + 1;
1681	port_b = port_a + 1;
1682	arr[num++] = 0x0;	/* reserved */
1683	arr[num++] = 0x0;	/* reserved */
1684	arr[num++] = 0x0;
1685	arr[num++] = 0x1;	/* relative port 1 (primary) */
1686	memset(arr + num, 0, 6);
1687	num += 6;
1688	arr[num++] = 0x0;
1689	arr[num++] = 12;	/* length tp descriptor */
1690	/* naa-5 target port identifier (A) */
1691	arr[num++] = 0x61;	/* proto=sas, binary */
1692	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1693	arr[num++] = 0x0;	/* reserved */
1694	arr[num++] = 0x8;	/* length */
1695	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1696	num += 8;
1697	arr[num++] = 0x0;	/* reserved */
1698	arr[num++] = 0x0;	/* reserved */
1699	arr[num++] = 0x0;
1700	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1701	memset(arr + num, 0, 6);
1702	num += 6;
1703	arr[num++] = 0x0;
1704	arr[num++] = 12;	/* length tp descriptor */
1705	/* naa-5 target port identifier (B) */
1706	arr[num++] = 0x61;	/* proto=sas, binary */
1707	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1708	arr[num++] = 0x0;	/* reserved */
1709	arr[num++] = 0x8;	/* length */
1710	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1711	num += 8;
1712
1713	return num;
1714}
1715
1716
1717static unsigned char vpd89_data[] = {
1718/* from 4th byte */ 0,0,0,0,
1719'l','i','n','u','x',' ',' ',' ',
1720'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1721'1','2','3','4',
17220x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
17230xec,0,0,0,
17240x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
17250,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
17260x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
17270x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
17280x53,0x41,
17290x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
17300x20,0x20,
17310x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
17320x10,0x80,
17330,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
17340x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
17350x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
17360,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
17370x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
17380x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
17390,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
17400,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17410,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17420,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17430x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
17440,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
17450xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
17460,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
17470,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17480,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17490,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17500,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17510,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17520,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17530,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17540,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17550,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17560,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17570,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
17580,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1759};
1760
1761/* ATA Information VPD page */
1762static int inquiry_vpd_89(unsigned char *arr)
1763{
1764	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1765	return sizeof(vpd89_data);
1766}
1767
1768
1769static unsigned char vpdb0_data[] = {
1770	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1771	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1772	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1773	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1774};
1775
1776/* Block limits VPD page (SBC-3) */
1777static int inquiry_vpd_b0(unsigned char *arr)
1778{
1779	unsigned int gran;
1780
1781	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1782
1783	/* Optimal transfer length granularity */
1784	if (sdebug_opt_xferlen_exp != 0 &&
1785	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1786		gran = 1 << sdebug_opt_xferlen_exp;
1787	else
1788		gran = 1 << sdebug_physblk_exp;
1789	put_unaligned_be16(gran, arr + 2);
1790
1791	/* Maximum Transfer Length */
1792	if (sdebug_store_sectors > 0x400)
1793		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1794
1795	/* Optimal Transfer Length */
1796	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1797
1798	if (sdebug_lbpu) {
1799		/* Maximum Unmap LBA Count */
1800		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1801
1802		/* Maximum Unmap Block Descriptor Count */
1803		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1804	}
1805
1806	/* Unmap Granularity Alignment */
1807	if (sdebug_unmap_alignment) {
1808		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1809		arr[28] |= 0x80; /* UGAVALID */
1810	}
1811
1812	/* Optimal Unmap Granularity */
1813	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1814
1815	/* Maximum WRITE SAME Length */
1816	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1817
1818	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
 
 
1819}
1820
1821/* Block device characteristics VPD page (SBC-3) */
1822static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1823{
1824	memset(arr, 0, 0x3c);
1825	arr[0] = 0;
1826	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1827	arr[2] = 0;
1828	arr[3] = 5;	/* less than 1.8" */
1829
1830	return 0x3c;
1831}
1832
1833/* Logical block provisioning VPD page (SBC-4) */
1834static int inquiry_vpd_b2(unsigned char *arr)
1835{
1836	memset(arr, 0, 0x4);
1837	arr[0] = 0;			/* threshold exponent */
1838	if (sdebug_lbpu)
1839		arr[1] = 1 << 7;
1840	if (sdebug_lbpws)
1841		arr[1] |= 1 << 6;
1842	if (sdebug_lbpws10)
1843		arr[1] |= 1 << 5;
1844	if (sdebug_lbprz && scsi_debug_lbp())
1845		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1846	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1847	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1848	/* threshold_percentage=0 */
1849	return 0x4;
1850}
1851
1852/* Zoned block device characteristics VPD page (ZBC mandatory) */
1853static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1854{
1855	memset(arr, 0, 0x3c);
1856	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1857	/*
1858	 * Set Optimal number of open sequential write preferred zones and
1859	 * Optimal number of non-sequentially written sequential write
1860	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1861	 * fields set to zero, apart from Max. number of open swrz_s field.
1862	 */
1863	put_unaligned_be32(0xffffffff, &arr[4]);
1864	put_unaligned_be32(0xffffffff, &arr[8]);
1865	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1866		put_unaligned_be32(devip->max_open, &arr[12]);
1867	else
1868		put_unaligned_be32(0xffffffff, &arr[12]);
1869	if (devip->zcap < devip->zsize) {
1870		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1871		put_unaligned_be64(devip->zsize, &arr[20]);
1872	} else {
1873		arr[19] = 0;
1874	}
1875	return 0x3c;
1876}
1877
1878#define SDEBUG_BLE_LEN_AFTER_B4 28	/* thus vpage 32 bytes long */
1879
1880enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1881
1882/* Block limits extension VPD page (SBC-4) */
1883static int inquiry_vpd_b7(unsigned char *arrb4)
1884{
1885	memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1886	arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1887	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1888	return SDEBUG_BLE_LEN_AFTER_B4;
1889}
1890
1891#define SDEBUG_LONG_INQ_SZ 96
1892#define SDEBUG_MAX_INQ_ARR_SZ 584
1893
1894static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1895{
1896	unsigned char pq_pdt;
1897	unsigned char *arr;
1898	unsigned char *cmd = scp->cmnd;
1899	u32 alloc_len, n;
1900	int ret;
1901	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1902
1903	alloc_len = get_unaligned_be16(cmd + 3);
1904	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1905	if (! arr)
1906		return DID_REQUEUE << 16;
1907	is_disk = (sdebug_ptype == TYPE_DISK);
1908	is_zbc = devip->zoned;
1909	is_disk_zbc = (is_disk || is_zbc);
1910	have_wlun = scsi_is_wlun(scp->device->lun);
1911	if (have_wlun)
1912		pq_pdt = TYPE_WLUN;	/* present, wlun */
1913	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1914		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1915	else
1916		pq_pdt = (sdebug_ptype & 0x1f);
1917	arr[0] = pq_pdt;
1918	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1919		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1920		kfree(arr);
1921		return check_condition_result;
1922	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1923		int lu_id_num, port_group_id, target_dev_id;
1924		u32 len;
1925		char lu_id_str[6];
1926		int host_no = devip->sdbg_host->shost->host_no;
1927
1928		arr[1] = cmd[2];
1929		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1930		    (devip->channel & 0x7f);
1931		if (sdebug_vpd_use_hostno == 0)
1932			host_no = 0;
1933		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1934			    (devip->target * 1000) + devip->lun);
1935		target_dev_id = ((host_no + 1) * 2000) +
1936				 (devip->target * 1000) - 3;
1937		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1938		if (0 == cmd[2]) { /* supported vital product data pages */
 
1939			n = 4;
1940			arr[n++] = 0x0;   /* this page */
1941			arr[n++] = 0x80;  /* unit serial number */
1942			arr[n++] = 0x83;  /* device identification */
1943			arr[n++] = 0x84;  /* software interface ident. */
1944			arr[n++] = 0x85;  /* management network addresses */
1945			arr[n++] = 0x86;  /* extended inquiry */
1946			arr[n++] = 0x87;  /* mode page policy */
1947			arr[n++] = 0x88;  /* SCSI ports */
1948			if (is_disk_zbc) {	  /* SBC or ZBC */
1949				arr[n++] = 0x89;  /* ATA information */
1950				arr[n++] = 0xb0;  /* Block limits */
1951				arr[n++] = 0xb1;  /* Block characteristics */
1952				if (is_disk)
1953					arr[n++] = 0xb2;  /* LB Provisioning */
1954				if (is_zbc)
1955					arr[n++] = 0xb6;  /* ZB dev. char. */
1956				arr[n++] = 0xb7;  /* Block limits extension */
1957			}
1958			arr[3] = n - 4;	  /* number of supported VPD pages */
1959		} else if (0x80 == cmd[2]) { /* unit serial number */
 
1960			arr[3] = len;
1961			memcpy(&arr[4], lu_id_str, len);
1962		} else if (0x83 == cmd[2]) { /* device identification */
 
1963			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1964						target_dev_id, lu_id_num,
1965						lu_id_str, len,
1966						&devip->lu_name);
1967		} else if (0x84 == cmd[2]) { /* Software interface ident. */
 
1968			arr[3] = inquiry_vpd_84(&arr[4]);
1969		} else if (0x85 == cmd[2]) { /* Management network addresses */
 
1970			arr[3] = inquiry_vpd_85(&arr[4]);
1971		} else if (0x86 == cmd[2]) { /* extended inquiry */
 
1972			arr[3] = 0x3c;	/* number of following entries */
1973			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1974				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1975			else if (have_dif_prot)
1976				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1977			else
1978				arr[4] = 0x0;   /* no protection stuff */
1979			/*
1980			 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
1981			 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
1982			 */
1983			arr[5] = 0x17;
1984		} else if (0x87 == cmd[2]) { /* mode page policy */
 
1985			arr[3] = 0x8;	/* number of following entries */
1986			arr[4] = 0x2;	/* disconnect-reconnect mp */
1987			arr[6] = 0x80;	/* mlus, shared */
1988			arr[8] = 0x18;	 /* protocol specific lu */
1989			arr[10] = 0x82;	 /* mlus, per initiator port */
1990		} else if (0x88 == cmd[2]) { /* SCSI Ports */
 
1991			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1992		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
 
1993			n = inquiry_vpd_89(&arr[4]);
1994			put_unaligned_be16(n, arr + 2);
1995		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
 
1996			arr[3] = inquiry_vpd_b0(&arr[4]);
1997		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1998			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
 
1999		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
 
2000			arr[3] = inquiry_vpd_b2(&arr[4]);
2001		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2002			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2003		} else if (cmd[2] == 0xb7) { /* block limits extension page */
2004			arr[3] = inquiry_vpd_b7(&arr[4]);
2005		} else {
2006			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2007			kfree(arr);
2008			return check_condition_result;
2009		}
2010		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2011		ret = fill_from_dev_buffer(scp, arr,
2012			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2013		kfree(arr);
2014		return ret;
2015	}
2016	/* drops through here for a standard inquiry */
2017	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2018	arr[2] = sdebug_scsi_level;
2019	arr[3] = 2;    /* response_data_format==2 */
2020	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2021	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2022	if (sdebug_vpd_use_hostno == 0)
2023		arr[5] |= 0x10; /* claim: implicit TPGS */
2024	arr[6] = 0x10; /* claim: MultiP */
2025	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2026	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2027	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2028	memcpy(&arr[16], sdebug_inq_product_id, 16);
2029	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2030	/* Use Vendor Specific area to place driver date in ASCII hex */
2031	memcpy(&arr[36], sdebug_version_date, 8);
2032	/* version descriptors (2 bytes each) follow */
2033	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2034	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2035	n = 62;
2036	if (is_disk) {		/* SBC-4 no version claimed */
2037		put_unaligned_be16(0x600, arr + n);
2038		n += 2;
2039	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
2040		put_unaligned_be16(0x525, arr + n);
2041		n += 2;
2042	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2043		put_unaligned_be16(0x624, arr + n);
2044		n += 2;
2045	}
2046	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2047	ret = fill_from_dev_buffer(scp, arr,
2048			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2049	kfree(arr);
2050	return ret;
2051}
2052
2053/* See resp_iec_m_pg() for how this data is manipulated */
2054static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2055				   0, 0, 0x0, 0x0};
2056
2057static int resp_requests(struct scsi_cmnd *scp,
2058			 struct sdebug_dev_info *devip)
2059{
 
2060	unsigned char *cmd = scp->cmnd;
2061	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2062	bool dsense = !!(cmd[1] & 1);
2063	u32 alloc_len = cmd[4];
2064	u32 len = 18;
2065	int stopped_state = atomic_read(&devip->stopped);
2066
2067	memset(arr, 0, sizeof(arr));
2068	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2069		if (dsense) {
2070			arr[0] = 0x72;
2071			arr[1] = NOT_READY;
2072			arr[2] = LOGICAL_UNIT_NOT_READY;
2073			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2074			len = 8;
2075		} else {
2076			arr[0] = 0x70;
2077			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2078			arr[7] = 0xa;			/* 18 byte sense buffer */
2079			arr[12] = LOGICAL_UNIT_NOT_READY;
2080			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2081		}
2082	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2083		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2084		if (dsense) {
2085			arr[0] = 0x72;
2086			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2087			arr[2] = THRESHOLD_EXCEEDED;
2088			arr[3] = 0xff;		/* Failure prediction(false) */
2089			len = 8;
2090		} else {
2091			arr[0] = 0x70;
2092			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2093			arr[7] = 0xa;   	/* 18 byte sense buffer */
2094			arr[12] = THRESHOLD_EXCEEDED;
2095			arr[13] = 0xff;		/* Failure prediction(false) */
2096		}
2097	} else {	/* nothing to report */
2098		if (dsense) {
2099			len = 8;
2100			memset(arr, 0, len);
 
 
 
 
 
 
 
 
 
 
 
 
2101			arr[0] = 0x72;
 
 
 
 
2102		} else {
2103			memset(arr, 0, len);
2104			arr[0] = 0x70;
 
2105			arr[7] = 0xa;
 
 
2106		}
 
2107	}
2108	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
 
2109}
2110
2111static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 
2112{
2113	unsigned char *cmd = scp->cmnd;
2114	int power_cond, want_stop, stopped_state;
2115	bool changing;
2116
2117	power_cond = (cmd[4] & 0xf0) >> 4;
2118	if (power_cond) {
2119		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2120		return check_condition_result;
2121	}
2122	want_stop = !(cmd[4] & 1);
2123	stopped_state = atomic_read(&devip->stopped);
2124	if (stopped_state == 2) {
2125		ktime_t now_ts = ktime_get_boottime();
2126
2127		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2128			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2129
2130			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2131				/* tur_ms_to_ready timer extinguished */
2132				atomic_set(&devip->stopped, 0);
2133				stopped_state = 0;
2134			}
2135		}
2136		if (stopped_state == 2) {
2137			if (want_stop) {
2138				stopped_state = 1;	/* dummy up success */
2139			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2140				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2141				return check_condition_result;
2142			}
2143		}
2144	}
2145	changing = (stopped_state != want_stop);
2146	if (changing)
2147		atomic_xchg(&devip->stopped, want_stop);
2148	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2149		return SDEG_RES_IMMED_MASK;
2150	else
2151		return 0;
2152}
2153
2154static sector_t get_sdebug_capacity(void)
2155{
2156	static const unsigned int gibibyte = 1073741824;
2157
2158	if (sdebug_virtual_gb > 0)
2159		return (sector_t)sdebug_virtual_gb *
2160			(gibibyte / sdebug_sector_size);
2161	else
2162		return sdebug_store_sectors;
2163}
2164
2165#define SDEBUG_READCAP_ARR_SZ 8
2166static int resp_readcap(struct scsi_cmnd *scp,
2167			struct sdebug_dev_info *devip)
2168{
2169	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2170	unsigned int capac;
2171
2172	/* following just in case virtual_gb changed */
2173	sdebug_capacity = get_sdebug_capacity();
2174	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2175	if (sdebug_capacity < 0xffffffff) {
2176		capac = (unsigned int)sdebug_capacity - 1;
2177		put_unaligned_be32(capac, arr + 0);
2178	} else
2179		put_unaligned_be32(0xffffffff, arr + 0);
2180	put_unaligned_be16(sdebug_sector_size, arr + 6);
2181	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2182}
2183
2184#define SDEBUG_READCAP16_ARR_SZ 32
2185static int resp_readcap16(struct scsi_cmnd *scp,
2186			  struct sdebug_dev_info *devip)
2187{
2188	unsigned char *cmd = scp->cmnd;
2189	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2190	u32 alloc_len;
2191
2192	alloc_len = get_unaligned_be32(cmd + 10);
2193	/* following just in case virtual_gb changed */
2194	sdebug_capacity = get_sdebug_capacity();
2195	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2196	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2197	put_unaligned_be32(sdebug_sector_size, arr + 8);
2198	arr[13] = sdebug_physblk_exp & 0xf;
2199	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2200
2201	if (scsi_debug_lbp()) {
2202		arr[14] |= 0x80; /* LBPME */
2203		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2204		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2205		 * in the wider field maps to 0 in this field.
2206		 */
2207		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2208			arr[14] |= 0x40;
2209	}
2210
2211	/*
2212	 * Since the scsi_debug READ CAPACITY implementation always reports the
2213	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2214	 */
2215	if (devip->zoned)
2216		arr[12] |= 1 << 4;
2217
2218	arr[15] = sdebug_lowest_aligned & 0xff;
2219
2220	if (have_dif_prot) {
2221		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2222		arr[12] |= 1; /* PROT_EN */
2223	}
2224
2225	return fill_from_dev_buffer(scp, arr,
2226			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2227}
2228
2229#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2230
2231static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2232			      struct sdebug_dev_info *devip)
2233{
2234	unsigned char *cmd = scp->cmnd;
2235	unsigned char *arr;
2236	int host_no = devip->sdbg_host->shost->host_no;
 
2237	int port_group_a, port_group_b, port_a, port_b;
2238	u32 alen, n, rlen;
2239	int ret;
2240
2241	alen = get_unaligned_be32(cmd + 6);
2242	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2243	if (! arr)
2244		return DID_REQUEUE << 16;
2245	/*
2246	 * EVPD page 0x88 states we have two ports, one
2247	 * real and a fake port with no device connected.
2248	 * So we create two port groups with one port each
2249	 * and set the group with port B to unavailable.
2250	 */
2251	port_a = 0x1; /* relative port A */
2252	port_b = 0x2; /* relative port B */
2253	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2254			(devip->channel & 0x7f);
2255	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2256			(devip->channel & 0x7f) + 0x80;
2257
2258	/*
2259	 * The asymmetric access state is cycled according to the host_id.
2260	 */
2261	n = 4;
2262	if (sdebug_vpd_use_hostno == 0) {
2263		arr[n++] = host_no % 3; /* Asymm access state */
2264		arr[n++] = 0x0F; /* claim: all states are supported */
2265	} else {
2266		arr[n++] = 0x0; /* Active/Optimized path */
2267		arr[n++] = 0x01; /* only support active/optimized paths */
2268	}
2269	put_unaligned_be16(port_group_a, arr + n);
2270	n += 2;
2271	arr[n++] = 0;    /* Reserved */
2272	arr[n++] = 0;    /* Status code */
2273	arr[n++] = 0;    /* Vendor unique */
2274	arr[n++] = 0x1;  /* One port per group */
2275	arr[n++] = 0;    /* Reserved */
2276	arr[n++] = 0;    /* Reserved */
2277	put_unaligned_be16(port_a, arr + n);
2278	n += 2;
2279	arr[n++] = 3;    /* Port unavailable */
2280	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2281	put_unaligned_be16(port_group_b, arr + n);
2282	n += 2;
2283	arr[n++] = 0;    /* Reserved */
2284	arr[n++] = 0;    /* Status code */
2285	arr[n++] = 0;    /* Vendor unique */
2286	arr[n++] = 0x1;  /* One port per group */
2287	arr[n++] = 0;    /* Reserved */
2288	arr[n++] = 0;    /* Reserved */
2289	put_unaligned_be16(port_b, arr + n);
2290	n += 2;
2291
2292	rlen = n - 4;
2293	put_unaligned_be32(rlen, arr + 0);
2294
2295	/*
2296	 * Return the smallest value of either
2297	 * - The allocated length
2298	 * - The constructed command length
2299	 * - The maximum array size
2300	 */
2301	rlen = min(alen, n);
2302	ret = fill_from_dev_buffer(scp, arr,
2303			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2304	kfree(arr);
2305	return ret;
2306}
2307
2308static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2309			     struct sdebug_dev_info *devip)
2310{
2311	bool rctd;
2312	u8 reporting_opts, req_opcode, sdeb_i, supp;
2313	u16 req_sa, u;
2314	u32 alloc_len, a_len;
2315	int k, offset, len, errsts, count, bump, na;
2316	const struct opcode_info_t *oip;
2317	const struct opcode_info_t *r_oip;
2318	u8 *arr;
2319	u8 *cmd = scp->cmnd;
2320
2321	rctd = !!(cmd[2] & 0x80);
2322	reporting_opts = cmd[2] & 0x7;
2323	req_opcode = cmd[3];
2324	req_sa = get_unaligned_be16(cmd + 4);
2325	alloc_len = get_unaligned_be32(cmd + 6);
2326	if (alloc_len < 4 || alloc_len > 0xffff) {
2327		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2328		return check_condition_result;
2329	}
2330	if (alloc_len > 8192)
2331		a_len = 8192;
2332	else
2333		a_len = alloc_len;
2334	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2335	if (NULL == arr) {
2336		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2337				INSUFF_RES_ASCQ);
2338		return check_condition_result;
2339	}
2340	switch (reporting_opts) {
2341	case 0:	/* all commands */
2342		/* count number of commands */
2343		for (count = 0, oip = opcode_info_arr;
2344		     oip->num_attached != 0xff; ++oip) {
2345			if (F_INV_OP & oip->flags)
2346				continue;
2347			count += (oip->num_attached + 1);
2348		}
2349		bump = rctd ? 20 : 8;
2350		put_unaligned_be32(count * bump, arr);
2351		for (offset = 4, oip = opcode_info_arr;
2352		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2353			if (F_INV_OP & oip->flags)
2354				continue;
2355			na = oip->num_attached;
2356			arr[offset] = oip->opcode;
2357			put_unaligned_be16(oip->sa, arr + offset + 2);
2358			if (rctd)
2359				arr[offset + 5] |= 0x2;
2360			if (FF_SA & oip->flags)
2361				arr[offset + 5] |= 0x1;
2362			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2363			if (rctd)
2364				put_unaligned_be16(0xa, arr + offset + 8);
2365			r_oip = oip;
2366			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2367				if (F_INV_OP & oip->flags)
2368					continue;
2369				offset += bump;
2370				arr[offset] = oip->opcode;
2371				put_unaligned_be16(oip->sa, arr + offset + 2);
2372				if (rctd)
2373					arr[offset + 5] |= 0x2;
2374				if (FF_SA & oip->flags)
2375					arr[offset + 5] |= 0x1;
2376				put_unaligned_be16(oip->len_mask[0],
2377						   arr + offset + 6);
2378				if (rctd)
2379					put_unaligned_be16(0xa,
2380							   arr + offset + 8);
2381			}
2382			oip = r_oip;
2383			offset += bump;
2384		}
2385		break;
2386	case 1:	/* one command: opcode only */
2387	case 2:	/* one command: opcode plus service action */
2388	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2389		sdeb_i = opcode_ind_arr[req_opcode];
2390		oip = &opcode_info_arr[sdeb_i];
2391		if (F_INV_OP & oip->flags) {
2392			supp = 1;
2393			offset = 4;
2394		} else {
2395			if (1 == reporting_opts) {
2396				if (FF_SA & oip->flags) {
2397					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2398							     2, 2);
2399					kfree(arr);
2400					return check_condition_result;
2401				}
2402				req_sa = 0;
2403			} else if (2 == reporting_opts &&
2404				   0 == (FF_SA & oip->flags)) {
2405				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2406				kfree(arr);	/* point at requested sa */
2407				return check_condition_result;
2408			}
2409			if (0 == (FF_SA & oip->flags) &&
2410			    req_opcode == oip->opcode)
2411				supp = 3;
2412			else if (0 == (FF_SA & oip->flags)) {
2413				na = oip->num_attached;
2414				for (k = 0, oip = oip->arrp; k < na;
2415				     ++k, ++oip) {
2416					if (req_opcode == oip->opcode)
2417						break;
2418				}
2419				supp = (k >= na) ? 1 : 3;
2420			} else if (req_sa != oip->sa) {
2421				na = oip->num_attached;
2422				for (k = 0, oip = oip->arrp; k < na;
2423				     ++k, ++oip) {
2424					if (req_sa == oip->sa)
2425						break;
2426				}
2427				supp = (k >= na) ? 1 : 3;
2428			} else
2429				supp = 3;
2430			if (3 == supp) {
2431				u = oip->len_mask[0];
2432				put_unaligned_be16(u, arr + 2);
2433				arr[4] = oip->opcode;
2434				for (k = 1; k < u; ++k)
2435					arr[4 + k] = (k < 16) ?
2436						 oip->len_mask[k] : 0xff;
2437				offset = 4 + u;
2438			} else
2439				offset = 4;
2440		}
2441		arr[1] = (rctd ? 0x80 : 0) | supp;
2442		if (rctd) {
2443			put_unaligned_be16(0xa, arr + offset);
2444			offset += 12;
2445		}
2446		break;
2447	default:
2448		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2449		kfree(arr);
2450		return check_condition_result;
2451	}
2452	offset = (offset < a_len) ? offset : a_len;
2453	len = (offset < alloc_len) ? offset : alloc_len;
2454	errsts = fill_from_dev_buffer(scp, arr, len);
2455	kfree(arr);
2456	return errsts;
2457}
2458
2459static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2460			  struct sdebug_dev_info *devip)
2461{
2462	bool repd;
2463	u32 alloc_len, len;
2464	u8 arr[16];
2465	u8 *cmd = scp->cmnd;
2466
2467	memset(arr, 0, sizeof(arr));
2468	repd = !!(cmd[2] & 0x80);
2469	alloc_len = get_unaligned_be32(cmd + 6);
2470	if (alloc_len < 4) {
2471		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2472		return check_condition_result;
2473	}
2474	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2475	arr[1] = 0x1;		/* ITNRS */
2476	if (repd) {
2477		arr[3] = 0xc;
2478		len = 16;
2479	} else
2480		len = 4;
2481
2482	len = (len < alloc_len) ? len : alloc_len;
2483	return fill_from_dev_buffer(scp, arr, len);
2484}
2485
2486/* <<Following mode page info copied from ST318451LW>> */
2487
2488static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2489{	/* Read-Write Error Recovery page for mode_sense */
2490	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2491					5, 0, 0xff, 0xff};
2492
2493	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2494	if (1 == pcontrol)
2495		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2496	return sizeof(err_recov_pg);
2497}
2498
2499static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2500{ 	/* Disconnect-Reconnect page for mode_sense */
2501	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2502					 0, 0, 0, 0, 0, 0, 0, 0};
2503
2504	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2505	if (1 == pcontrol)
2506		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2507	return sizeof(disconnect_pg);
2508}
2509
2510static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2511{       /* Format device page for mode_sense */
2512	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2513				     0, 0, 0, 0, 0, 0, 0, 0,
2514				     0, 0, 0, 0, 0x40, 0, 0, 0};
2515
2516	memcpy(p, format_pg, sizeof(format_pg));
2517	put_unaligned_be16(sdebug_sectors_per, p + 10);
2518	put_unaligned_be16(sdebug_sector_size, p + 12);
2519	if (sdebug_removable)
2520		p[20] |= 0x20; /* should agree with INQUIRY */
2521	if (1 == pcontrol)
2522		memset(p + 2, 0, sizeof(format_pg) - 2);
2523	return sizeof(format_pg);
2524}
2525
2526static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2527				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2528				     0, 0, 0, 0};
2529
2530static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2531{ 	/* Caching page for mode_sense */
2532	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2533		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2534	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2535		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2536
2537	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2538		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2539	memcpy(p, caching_pg, sizeof(caching_pg));
2540	if (1 == pcontrol)
2541		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2542	else if (2 == pcontrol)
2543		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2544	return sizeof(caching_pg);
2545}
2546
2547static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2548				    0, 0, 0x2, 0x4b};
2549
2550static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2551{ 	/* Control mode page for mode_sense */
2552	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2553					0, 0, 0, 0};
2554	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2555				     0, 0, 0x2, 0x4b};
2556
2557	if (sdebug_dsense)
2558		ctrl_m_pg[2] |= 0x4;
2559	else
2560		ctrl_m_pg[2] &= ~0x4;
2561
2562	if (sdebug_ato)
2563		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2564
2565	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2566	if (1 == pcontrol)
2567		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2568	else if (2 == pcontrol)
2569		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2570	return sizeof(ctrl_m_pg);
2571}
2572
2573/* IO Advice Hints Grouping mode page */
2574static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2575{
2576	/* IO Advice Hints Grouping mode page */
2577	struct grouping_m_pg {
2578		u8 page_code;	/* OR 0x40 when subpage_code > 0 */
2579		u8 subpage_code;
2580		__be16 page_length;
2581		u8 reserved[12];
2582		struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2583	};
2584	static const struct grouping_m_pg gr_m_pg = {
2585		.page_code = 0xa | 0x40,
2586		.subpage_code = 5,
2587		.page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2588		.descr = {
2589			{ .st_enble = 1 },
2590			{ .st_enble = 1 },
2591			{ .st_enble = 1 },
2592			{ .st_enble = 1 },
2593			{ .st_enble = 1 },
2594			{ .st_enble = 0 },
2595		}
2596	};
2597
2598	BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2599		     16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2600	memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2601	if (1 == pcontrol) {
2602		/* There are no changeable values so clear from byte 4 on. */
2603		memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2604	}
2605	return sizeof(gr_m_pg);
2606}
2607
2608static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2609{	/* Informational Exceptions control mode page for mode_sense */
2610	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2611				       0, 0, 0x0, 0x0};
2612	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2613				      0, 0, 0x0, 0x0};
2614
2615	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2616	if (1 == pcontrol)
2617		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2618	else if (2 == pcontrol)
2619		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2620	return sizeof(iec_m_pg);
2621}
2622
2623static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2624{	/* SAS SSP mode page - short format for mode_sense */
2625	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2626		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2627
2628	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2629	if (1 == pcontrol)
2630		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2631	return sizeof(sas_sf_m_pg);
2632}
2633
2634
2635static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2636			      int target_dev_id)
2637{	/* SAS phy control and discover mode page for mode_sense */
2638	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2639		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2640		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2641		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2642		    0x2, 0, 0, 0, 0, 0, 0, 0,
2643		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2644		    0, 0, 0, 0, 0, 0, 0, 0,
2645		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2646		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2647		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2648		    0x3, 0, 0, 0, 0, 0, 0, 0,
2649		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2650		    0, 0, 0, 0, 0, 0, 0, 0,
2651		};
2652	int port_a, port_b;
2653
2654	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2655	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2656	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2657	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2658	port_a = target_dev_id + 1;
2659	port_b = port_a + 1;
2660	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2661	put_unaligned_be32(port_a, p + 20);
2662	put_unaligned_be32(port_b, p + 48 + 20);
2663	if (1 == pcontrol)
2664		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2665	return sizeof(sas_pcd_m_pg);
2666}
2667
2668static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2669{	/* SAS SSP shared protocol specific port mode subpage */
2670	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2671		    0, 0, 0, 0, 0, 0, 0, 0,
2672		};
2673
2674	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2675	if (1 == pcontrol)
2676		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2677	return sizeof(sas_sha_m_pg);
2678}
2679
2680/* PAGE_SIZE is more than necessary but provides room for future expansion. */
2681#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2682
2683static int resp_mode_sense(struct scsi_cmnd *scp,
2684			   struct sdebug_dev_info *devip)
2685{
2686	int pcontrol, pcode, subpcode, bd_len;
2687	unsigned char dev_spec;
2688	u32 alloc_len, offset, len;
2689	int target_dev_id;
2690	int target = scp->device->id;
2691	unsigned char *ap;
2692	unsigned char *arr __free(kfree);
2693	unsigned char *cmd = scp->cmnd;
2694	bool dbd, llbaa, msense_6, is_disk, is_zbc;
2695
2696	arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2697	if (!arr)
2698		return -ENOMEM;
2699	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2700	pcontrol = (cmd[2] & 0xc0) >> 6;
2701	pcode = cmd[2] & 0x3f;
2702	subpcode = cmd[3];
2703	msense_6 = (MODE_SENSE == cmd[0]);
2704	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2705	is_disk = (sdebug_ptype == TYPE_DISK);
2706	is_zbc = devip->zoned;
2707	if ((is_disk || is_zbc) && !dbd)
2708		bd_len = llbaa ? 16 : 8;
2709	else
2710		bd_len = 0;
2711	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2712	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2713	if (0x3 == pcontrol) {  /* Saving values not supported */
2714		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2715		return check_condition_result;
2716	}
2717	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2718			(devip->target * 1000) - 3;
2719	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2720	if (is_disk || is_zbc) {
2721		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2722		if (sdebug_wp)
2723			dev_spec |= 0x80;
2724	} else
2725		dev_spec = 0x0;
2726	if (msense_6) {
2727		arr[2] = dev_spec;
2728		arr[3] = bd_len;
2729		offset = 4;
2730	} else {
2731		arr[3] = dev_spec;
2732		if (16 == bd_len)
2733			arr[4] = 0x1;	/* set LONGLBA bit */
2734		arr[7] = bd_len;	/* assume 255 or less */
2735		offset = 8;
2736	}
2737	ap = arr + offset;
2738	if ((bd_len > 0) && (!sdebug_capacity))
2739		sdebug_capacity = get_sdebug_capacity();
2740
2741	if (8 == bd_len) {
2742		if (sdebug_capacity > 0xfffffffe)
2743			put_unaligned_be32(0xffffffff, ap + 0);
2744		else
2745			put_unaligned_be32(sdebug_capacity, ap + 0);
2746		put_unaligned_be16(sdebug_sector_size, ap + 6);
2747		offset += bd_len;
2748		ap = arr + offset;
2749	} else if (16 == bd_len) {
2750		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2751		put_unaligned_be32(sdebug_sector_size, ap + 12);
2752		offset += bd_len;
2753		ap = arr + offset;
2754	}
2755
2756	/*
2757	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2758	 *        len += resp_*_pg(ap + len, pcontrol, target);
2759	 */
 
 
 
2760	switch (pcode) {
2761	case 0x1:	/* Read-Write error recovery page, direct access */
2762		if (subpcode > 0x0 && subpcode < 0xff)
2763			goto bad_subpcode;
2764		len = resp_err_recov_pg(ap, pcontrol, target);
2765		offset += len;
2766		break;
2767	case 0x2:	/* Disconnect-Reconnect page, all devices */
2768		if (subpcode > 0x0 && subpcode < 0xff)
2769			goto bad_subpcode;
2770		len = resp_disconnect_pg(ap, pcontrol, target);
2771		offset += len;
2772		break;
2773	case 0x3:       /* Format device page, direct access */
2774		if (subpcode > 0x0 && subpcode < 0xff)
2775			goto bad_subpcode;
2776		if (is_disk) {
2777			len = resp_format_pg(ap, pcontrol, target);
2778			offset += len;
2779		} else {
2780			goto bad_pcode;
2781		}
2782		break;
2783	case 0x8:	/* Caching page, direct access */
2784		if (subpcode > 0x0 && subpcode < 0xff)
2785			goto bad_subpcode;
2786		if (is_disk || is_zbc) {
2787			len = resp_caching_pg(ap, pcontrol, target);
2788			offset += len;
2789		} else {
2790			goto bad_pcode;
2791		}
2792		break;
2793	case 0xa:	/* Control Mode page, all devices */
2794		switch (subpcode) {
2795		case 0:
2796			len = resp_ctrl_m_pg(ap, pcontrol, target);
2797			break;
2798		case 0x05:
2799			len = resp_grouping_m_pg(ap, pcontrol, target);
2800			break;
2801		case 0xff:
2802			len = resp_ctrl_m_pg(ap, pcontrol, target);
2803			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2804			break;
2805		default:
2806			goto bad_subpcode;
2807		}
2808		offset += len;
2809		break;
2810	case 0x19:	/* if spc==1 then sas phy, control+discover */
2811		if (subpcode > 0x2 && subpcode < 0xff)
2812			goto bad_subpcode;
 
 
2813		len = 0;
2814		if ((0x0 == subpcode) || (0xff == subpcode))
2815			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2816		if ((0x1 == subpcode) || (0xff == subpcode))
2817			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2818						  target_dev_id);
2819		if ((0x2 == subpcode) || (0xff == subpcode))
2820			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2821		offset += len;
2822		break;
2823	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2824		if (subpcode > 0x0 && subpcode < 0xff)
2825			goto bad_subpcode;
2826		len = resp_iec_m_pg(ap, pcontrol, target);
2827		offset += len;
2828		break;
2829	case 0x3f:	/* Read all Mode pages */
2830		if (subpcode > 0x0 && subpcode < 0xff)
2831			goto bad_subpcode;
2832		len = resp_err_recov_pg(ap, pcontrol, target);
2833		len += resp_disconnect_pg(ap + len, pcontrol, target);
2834		if (is_disk) {
2835			len += resp_format_pg(ap + len, pcontrol, target);
2836			len += resp_caching_pg(ap + len, pcontrol, target);
2837		} else if (is_zbc) {
2838			len += resp_caching_pg(ap + len, pcontrol, target);
2839		}
2840		len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2841		if (0xff == subpcode)
2842			len += resp_grouping_m_pg(ap + len, pcontrol, target);
2843		len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2844		if (0xff == subpcode) {
2845			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2846						  target_dev_id);
2847			len += resp_sas_sha_m_spg(ap + len, pcontrol);
 
 
 
2848		}
2849		len += resp_iec_m_pg(ap + len, pcontrol, target);
2850		offset += len;
2851		break;
2852	default:
2853		goto bad_pcode;
 
 
 
 
 
2854	}
2855	if (msense_6)
2856		arr[0] = offset - 1;
2857	else
2858		put_unaligned_be16((offset - 2), arr + 0);
2859	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2860
2861bad_pcode:
2862	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2863	return check_condition_result;
2864
2865bad_subpcode:
2866	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2867	return check_condition_result;
2868}
2869
2870#define SDEBUG_MAX_MSELECT_SZ 512
2871
2872static int resp_mode_select(struct scsi_cmnd *scp,
2873			    struct sdebug_dev_info *devip)
2874{
2875	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2876	int param_len, res, mpage;
2877	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2878	unsigned char *cmd = scp->cmnd;
2879	int mselect6 = (MODE_SELECT == cmd[0]);
2880
2881	memset(arr, 0, sizeof(arr));
2882	pf = cmd[1] & 0x10;
2883	sp = cmd[1] & 0x1;
2884	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2885	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2886		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2887		return check_condition_result;
2888	}
2889	res = fetch_to_dev_buffer(scp, arr, param_len);
2890	if (-1 == res)
2891		return DID_ERROR << 16;
2892	else if (sdebug_verbose && (res < param_len))
2893		sdev_printk(KERN_INFO, scp->device,
2894			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2895			    __func__, param_len, res);
2896	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2897	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2898	off = bd_len + (mselect6 ? 4 : 8);
2899	if (md_len > 2 || off >= res) {
2900		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2901		return check_condition_result;
2902	}
 
2903	mpage = arr[off] & 0x3f;
2904	ps = !!(arr[off] & 0x80);
2905	if (ps) {
2906		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2907		return check_condition_result;
2908	}
2909	spf = !!(arr[off] & 0x40);
2910	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2911		       (arr[off + 1] + 2);
2912	if ((pg_len + off) > param_len) {
2913		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2914				PARAMETER_LIST_LENGTH_ERR, 0);
2915		return check_condition_result;
2916	}
2917	switch (mpage) {
2918	case 0x8:      /* Caching Mode page */
2919		if (caching_pg[1] == arr[off + 1]) {
2920			memcpy(caching_pg + 2, arr + off + 2,
2921			       sizeof(caching_pg) - 2);
2922			goto set_mode_changed_ua;
2923		}
2924		break;
2925	case 0xa:      /* Control Mode page */
2926		if (ctrl_m_pg[1] == arr[off + 1]) {
2927			memcpy(ctrl_m_pg + 2, arr + off + 2,
2928			       sizeof(ctrl_m_pg) - 2);
2929			if (ctrl_m_pg[4] & 0x8)
2930				sdebug_wp = true;
2931			else
2932				sdebug_wp = false;
2933			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2934			goto set_mode_changed_ua;
2935		}
2936		break;
2937	case 0x1c:      /* Informational Exceptions Mode page */
2938		if (iec_m_pg[1] == arr[off + 1]) {
2939			memcpy(iec_m_pg + 2, arr + off + 2,
2940			       sizeof(iec_m_pg) - 2);
2941			goto set_mode_changed_ua;
2942		}
2943		break;
2944	default:
2945		break;
2946	}
2947	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2948	return check_condition_result;
2949set_mode_changed_ua:
2950	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2951	return 0;
2952}
2953
2954static int resp_temp_l_pg(unsigned char *arr)
2955{
2956	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2957				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2958		};
2959
2960	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2961	return sizeof(temp_l_pg);
2962}
2963
2964static int resp_ie_l_pg(unsigned char *arr)
2965{
2966	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2967		};
2968
2969	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2970	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2971		arr[4] = THRESHOLD_EXCEEDED;
2972		arr[5] = 0xff;
2973	}
2974	return sizeof(ie_l_pg);
2975}
2976
2977static int resp_env_rep_l_spg(unsigned char *arr)
2978{
2979	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2980					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2981					 0x1, 0x0, 0x23, 0x8,
2982					 0x0, 55, 72, 35, 55, 45, 0, 0,
2983		};
2984
2985	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2986	return sizeof(env_rep_l_spg);
2987}
2988
2989#define SDEBUG_MAX_LSENSE_SZ 512
2990
2991static int resp_log_sense(struct scsi_cmnd *scp,
2992			  struct sdebug_dev_info *devip)
2993{
2994	int ppc, sp, pcode, subpcode;
2995	u32 alloc_len, len, n;
2996	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2997	unsigned char *cmd = scp->cmnd;
2998
2999	memset(arr, 0, sizeof(arr));
3000	ppc = cmd[1] & 0x2;
3001	sp = cmd[1] & 0x1;
3002	if (ppc || sp) {
3003		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3004		return check_condition_result;
3005	}
3006	pcode = cmd[2] & 0x3f;
3007	subpcode = cmd[3] & 0xff;
3008	alloc_len = get_unaligned_be16(cmd + 7);
3009	arr[0] = pcode;
3010	if (0 == subpcode) {
3011		switch (pcode) {
3012		case 0x0:	/* Supported log pages log page */
3013			n = 4;
3014			arr[n++] = 0x0;		/* this page */
3015			arr[n++] = 0xd;		/* Temperature */
3016			arr[n++] = 0x2f;	/* Informational exceptions */
3017			arr[3] = n - 4;
3018			break;
3019		case 0xd:	/* Temperature log page */
3020			arr[3] = resp_temp_l_pg(arr + 4);
3021			break;
3022		case 0x2f:	/* Informational exceptions log page */
3023			arr[3] = resp_ie_l_pg(arr + 4);
3024			break;
3025		default:
3026			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3027			return check_condition_result;
3028		}
3029	} else if (0xff == subpcode) {
3030		arr[0] |= 0x40;
3031		arr[1] = subpcode;
3032		switch (pcode) {
3033		case 0x0:	/* Supported log pages and subpages log page */
3034			n = 4;
3035			arr[n++] = 0x0;
3036			arr[n++] = 0x0;		/* 0,0 page */
3037			arr[n++] = 0x0;
3038			arr[n++] = 0xff;	/* this page */
3039			arr[n++] = 0xd;
3040			arr[n++] = 0x0;		/* Temperature */
3041			arr[n++] = 0xd;
3042			arr[n++] = 0x1;		/* Environment reporting */
3043			arr[n++] = 0xd;
3044			arr[n++] = 0xff;	/* all 0xd subpages */
3045			arr[n++] = 0x2f;
3046			arr[n++] = 0x0;	/* Informational exceptions */
3047			arr[n++] = 0x2f;
3048			arr[n++] = 0xff;	/* all 0x2f subpages */
3049			arr[3] = n - 4;
3050			break;
3051		case 0xd:	/* Temperature subpages */
3052			n = 4;
3053			arr[n++] = 0xd;
3054			arr[n++] = 0x0;		/* Temperature */
3055			arr[n++] = 0xd;
3056			arr[n++] = 0x1;		/* Environment reporting */
3057			arr[n++] = 0xd;
3058			arr[n++] = 0xff;	/* these subpages */
3059			arr[3] = n - 4;
3060			break;
3061		case 0x2f:	/* Informational exceptions subpages */
3062			n = 4;
3063			arr[n++] = 0x2f;
3064			arr[n++] = 0x0;		/* Informational exceptions */
3065			arr[n++] = 0x2f;
3066			arr[n++] = 0xff;	/* these subpages */
3067			arr[3] = n - 4;
3068			break;
3069		default:
3070			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3071			return check_condition_result;
3072		}
3073	} else if (subpcode > 0) {
3074		arr[0] |= 0x40;
3075		arr[1] = subpcode;
3076		if (pcode == 0xd && subpcode == 1)
3077			arr[3] = resp_env_rep_l_spg(arr + 4);
3078		else {
3079			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3080			return check_condition_result;
3081		}
3082	} else {
3083		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3084		return check_condition_result;
3085	}
3086	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3087	return fill_from_dev_buffer(scp, arr,
3088		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3089}
3090
3091static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3092{
3093	return devip->nr_zones != 0;
3094}
3095
3096static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3097					unsigned long long lba)
3098{
3099	u32 zno = lba >> devip->zsize_shift;
3100	struct sdeb_zone_state *zsp;
3101
3102	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3103		return &devip->zstate[zno];
3104
3105	/*
3106	 * If the zone capacity is less than the zone size, adjust for gap
3107	 * zones.
3108	 */
3109	zno = 2 * zno - devip->nr_conv_zones;
3110	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3111	zsp = &devip->zstate[zno];
3112	if (lba >= zsp->z_start + zsp->z_size)
3113		zsp++;
3114	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3115	return zsp;
3116}
3117
3118static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3119{
3120	return zsp->z_type == ZBC_ZTYPE_CNV;
3121}
3122
3123static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3124{
3125	return zsp->z_type == ZBC_ZTYPE_GAP;
3126}
3127
3128static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3129{
3130	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3131}
3132
3133static void zbc_close_zone(struct sdebug_dev_info *devip,
3134			   struct sdeb_zone_state *zsp)
3135{
3136	enum sdebug_z_cond zc;
3137
3138	if (!zbc_zone_is_seq(zsp))
3139		return;
3140
3141	zc = zsp->z_cond;
3142	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3143		return;
3144
3145	if (zc == ZC2_IMPLICIT_OPEN)
3146		devip->nr_imp_open--;
3147	else
3148		devip->nr_exp_open--;
3149
3150	if (zsp->z_wp == zsp->z_start) {
3151		zsp->z_cond = ZC1_EMPTY;
3152	} else {
3153		zsp->z_cond = ZC4_CLOSED;
3154		devip->nr_closed++;
3155	}
3156}
3157
3158static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3159{
3160	struct sdeb_zone_state *zsp = &devip->zstate[0];
3161	unsigned int i;
3162
3163	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3164		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3165			zbc_close_zone(devip, zsp);
3166			return;
3167		}
3168	}
3169}
3170
3171static void zbc_open_zone(struct sdebug_dev_info *devip,
3172			  struct sdeb_zone_state *zsp, bool explicit)
3173{
3174	enum sdebug_z_cond zc;
3175
3176	if (!zbc_zone_is_seq(zsp))
3177		return;
3178
3179	zc = zsp->z_cond;
3180	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3181	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3182		return;
3183
3184	/* Close an implicit open zone if necessary */
3185	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3186		zbc_close_zone(devip, zsp);
3187	else if (devip->max_open &&
3188		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3189		zbc_close_imp_open_zone(devip);
3190
3191	if (zsp->z_cond == ZC4_CLOSED)
3192		devip->nr_closed--;
3193	if (explicit) {
3194		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3195		devip->nr_exp_open++;
3196	} else {
3197		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3198		devip->nr_imp_open++;
3199	}
3200}
3201
3202static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3203				     struct sdeb_zone_state *zsp)
3204{
3205	switch (zsp->z_cond) {
3206	case ZC2_IMPLICIT_OPEN:
3207		devip->nr_imp_open--;
3208		break;
3209	case ZC3_EXPLICIT_OPEN:
3210		devip->nr_exp_open--;
3211		break;
3212	default:
3213		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3214			  zsp->z_start, zsp->z_cond);
3215		break;
3216	}
3217	zsp->z_cond = ZC5_FULL;
3218}
3219
3220static void zbc_inc_wp(struct sdebug_dev_info *devip,
3221		       unsigned long long lba, unsigned int num)
3222{
3223	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3224	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3225
3226	if (!zbc_zone_is_seq(zsp))
3227		return;
3228
3229	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3230		zsp->z_wp += num;
3231		if (zsp->z_wp >= zend)
3232			zbc_set_zone_full(devip, zsp);
3233		return;
3234	}
3235
3236	while (num) {
3237		if (lba != zsp->z_wp)
3238			zsp->z_non_seq_resource = true;
3239
3240		end = lba + num;
3241		if (end >= zend) {
3242			n = zend - lba;
3243			zsp->z_wp = zend;
3244		} else if (end > zsp->z_wp) {
3245			n = num;
3246			zsp->z_wp = end;
3247		} else {
3248			n = num;
3249		}
3250		if (zsp->z_wp >= zend)
3251			zbc_set_zone_full(devip, zsp);
3252
3253		num -= n;
3254		lba += n;
3255		if (num) {
3256			zsp++;
3257			zend = zsp->z_start + zsp->z_size;
3258		}
3259	}
3260}
3261
3262static int check_zbc_access_params(struct scsi_cmnd *scp,
3263			unsigned long long lba, unsigned int num, bool write)
3264{
3265	struct scsi_device *sdp = scp->device;
3266	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3267	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3268	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3269
3270	if (!write) {
3271		/* For host-managed, reads cannot cross zone types boundaries */
3272		if (zsp->z_type != zsp_end->z_type) {
3273			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3274					LBA_OUT_OF_RANGE,
3275					READ_INVDATA_ASCQ);
3276			return check_condition_result;
3277		}
3278		return 0;
3279	}
3280
3281	/* Writing into a gap zone is not allowed */
3282	if (zbc_zone_is_gap(zsp)) {
3283		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3284				ATTEMPT_ACCESS_GAP);
3285		return check_condition_result;
3286	}
3287
3288	/* No restrictions for writes within conventional zones */
3289	if (zbc_zone_is_conv(zsp)) {
3290		if (!zbc_zone_is_conv(zsp_end)) {
3291			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3292					LBA_OUT_OF_RANGE,
3293					WRITE_BOUNDARY_ASCQ);
3294			return check_condition_result;
3295		}
3296		return 0;
3297	}
3298
3299	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3300		/* Writes cannot cross sequential zone boundaries */
3301		if (zsp_end != zsp) {
3302			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3303					LBA_OUT_OF_RANGE,
3304					WRITE_BOUNDARY_ASCQ);
3305			return check_condition_result;
3306		}
3307		/* Cannot write full zones */
3308		if (zsp->z_cond == ZC5_FULL) {
3309			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3310					INVALID_FIELD_IN_CDB, 0);
3311			return check_condition_result;
3312		}
3313		/* Writes must be aligned to the zone WP */
3314		if (lba != zsp->z_wp) {
3315			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3316					LBA_OUT_OF_RANGE,
3317					UNALIGNED_WRITE_ASCQ);
3318			return check_condition_result;
3319		}
3320	}
3321
3322	/* Handle implicit open of closed and empty zones */
3323	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3324		if (devip->max_open &&
3325		    devip->nr_exp_open >= devip->max_open) {
3326			mk_sense_buffer(scp, DATA_PROTECT,
3327					INSUFF_RES_ASC,
3328					INSUFF_ZONE_ASCQ);
3329			return check_condition_result;
3330		}
3331		zbc_open_zone(devip, zsp, false);
3332	}
3333
3334	return 0;
3335}
3336
3337static inline int check_device_access_params
3338			(struct scsi_cmnd *scp, unsigned long long lba,
3339			 unsigned int num, bool write)
3340{
3341	struct scsi_device *sdp = scp->device;
3342	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3343
3344	if (lba + num > sdebug_capacity) {
3345		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3346		return check_condition_result;
3347	}
3348	/* transfer length excessive (tie in to block limits VPD page) */
3349	if (num > sdebug_store_sectors) {
3350		/* needs work to find which cdb byte 'num' comes from */
3351		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3352		return check_condition_result;
3353	}
3354	if (write && unlikely(sdebug_wp)) {
3355		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3356		return check_condition_result;
3357	}
3358	if (sdebug_dev_is_zoned(devip))
3359		return check_zbc_access_params(scp, lba, num, write);
3360
3361	return 0;
3362}
3363
3364/*
3365 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3366 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3367 * that access any of the "stores" in struct sdeb_store_info should call this
3368 * function with bug_if_fake_rw set to true.
3369 */
3370static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3371						bool bug_if_fake_rw)
3372{
3373	if (sdebug_fake_rw) {
3374		BUG_ON(bug_if_fake_rw);	/* See note above */
3375		return NULL;
3376	}
3377	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3378}
3379
3380/* Returns number of bytes copied or -1 if error. */
3381static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3382			    u32 sg_skip, u64 lba, u32 num, bool do_write,
3383			    u8 group_number)
3384{
3385	int ret;
3386	u64 block, rest = 0;
 
3387	enum dma_data_direction dir;
3388	struct scsi_data_buffer *sdb = &scp->sdb;
3389	u8 *fsp;
3390
3391	if (do_write) {
 
3392		dir = DMA_TO_DEVICE;
3393		write_since_sync = true;
3394	} else {
 
3395		dir = DMA_FROM_DEVICE;
3396	}
3397
3398	if (!sdb->length || !sip)
3399		return 0;
3400	if (scp->sc_data_direction != dir)
3401		return -1;
3402
3403	if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
3404		atomic_long_inc(&writes_by_group_number[group_number]);
3405
3406	fsp = sip->storep;
3407
3408	block = do_div(lba, sdebug_store_sectors);
3409	if (block + num > sdebug_store_sectors)
3410		rest = block + num - sdebug_store_sectors;
3411
3412	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3413		   fsp + (block * sdebug_sector_size),
3414		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3415	if (ret != (num - rest) * sdebug_sector_size)
3416		return ret;
3417
3418	if (rest) {
3419		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3420			    fsp, rest * sdebug_sector_size,
3421			    sg_skip + ((num - rest) * sdebug_sector_size),
3422			    do_write);
3423	}
3424
3425	return ret;
3426}
3427
3428/* Returns number of bytes copied or -1 if error. */
3429static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3430{
3431	struct scsi_data_buffer *sdb = &scp->sdb;
3432
3433	if (!sdb->length)
3434		return 0;
3435	if (scp->sc_data_direction != DMA_TO_DEVICE)
3436		return -1;
3437	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3438			      num * sdebug_sector_size, 0, true);
3439}
3440
3441/* If sip->storep+lba compares equal to arr(num), then copy top half of
3442 * arr into sip->storep+lba and return true. If comparison fails then
3443 * return false. */
3444static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3445			      const u8 *arr, bool compare_only)
3446{
3447	bool res;
3448	u64 block, rest = 0;
3449	u32 store_blks = sdebug_store_sectors;
3450	u32 lb_size = sdebug_sector_size;
3451	u8 *fsp = sip->storep;
3452
3453	block = do_div(lba, store_blks);
3454	if (block + num > store_blks)
3455		rest = block + num - store_blks;
3456
3457	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
 
3458	if (!res)
3459		return res;
3460	if (rest)
3461		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3462			     rest * lb_size);
3463	if (!res)
3464		return res;
3465	if (compare_only)
3466		return true;
3467	arr += num * lb_size;
3468	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3469	if (rest)
3470		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
 
3471	return res;
3472}
3473
3474static __be16 dif_compute_csum(const void *buf, int len)
3475{
3476	__be16 csum;
3477
3478	if (sdebug_guard)
3479		csum = (__force __be16)ip_compute_csum(buf, len);
3480	else
3481		csum = cpu_to_be16(crc_t10dif(buf, len));
3482
3483	return csum;
3484}
3485
3486static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3487		      sector_t sector, u32 ei_lba)
3488{
3489	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3490
3491	if (sdt->guard_tag != csum) {
3492		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3493			(unsigned long)sector,
3494			be16_to_cpu(sdt->guard_tag),
3495			be16_to_cpu(csum));
3496		return 0x01;
3497	}
3498	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3499	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3500		pr_err("REF check failed on sector %lu\n",
3501			(unsigned long)sector);
3502		return 0x03;
3503	}
3504	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3505	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3506		pr_err("REF check failed on sector %lu\n",
3507			(unsigned long)sector);
3508		return 0x03;
3509	}
3510	return 0;
3511}
3512
3513static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3514			  unsigned int sectors, bool read)
3515{
3516	size_t resid;
3517	void *paddr;
3518	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3519						scp->device->hostdata, true);
3520	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3521	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3522	struct sg_mapping_iter miter;
3523
3524	/* Bytes of protection data to copy into sgl */
3525	resid = sectors * sizeof(*dif_storep);
3526
3527	sg_miter_start(&miter, scsi_prot_sglist(scp),
3528		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3529		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3530
3531	while (sg_miter_next(&miter) && resid > 0) {
3532		size_t len = min_t(size_t, miter.length, resid);
3533		void *start = dif_store(sip, sector);
3534		size_t rest = 0;
3535
3536		if (dif_store_end < start + len)
3537			rest = start + len - dif_store_end;
3538
3539		paddr = miter.addr;
3540
3541		if (read)
3542			memcpy(paddr, start, len - rest);
3543		else
3544			memcpy(start, paddr, len - rest);
3545
3546		if (rest) {
3547			if (read)
3548				memcpy(paddr + len - rest, dif_storep, rest);
3549			else
3550				memcpy(dif_storep, paddr + len - rest, rest);
3551		}
3552
3553		sector += len / sizeof(*dif_storep);
3554		resid -= len;
3555	}
3556	sg_miter_stop(&miter);
3557}
3558
3559static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3560			    unsigned int sectors, u32 ei_lba)
3561{
3562	int ret = 0;
3563	unsigned int i;
3564	sector_t sector;
3565	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3566						scp->device->hostdata, true);
3567	struct t10_pi_tuple *sdt;
 
3568
3569	for (i = 0; i < sectors; i++, ei_lba++) {
 
 
3570		sector = start_sec + i;
3571		sdt = dif_store(sip, sector);
3572
3573		if (sdt->app_tag == cpu_to_be16(0xffff))
3574			continue;
3575
3576		/*
3577		 * Because scsi_debug acts as both initiator and
3578		 * target we proceed to verify the PI even if
3579		 * RDPROTECT=3. This is done so the "initiator" knows
3580		 * which type of error to return. Otherwise we would
3581		 * have to iterate over the PI twice.
3582		 */
3583		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3584			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3585					 sector, ei_lba);
3586			if (ret) {
3587				dif_errors++;
3588				break;
3589			}
3590		}
3591	}
3592
3593	dif_copy_prot(scp, start_sec, sectors, true);
3594	dix_reads++;
3595
3596	return ret;
3597}
3598
3599static inline void
3600sdeb_read_lock(struct sdeb_store_info *sip)
3601{
3602	if (sdebug_no_rwlock) {
3603		if (sip)
3604			__acquire(&sip->macc_lck);
3605		else
3606			__acquire(&sdeb_fake_rw_lck);
3607	} else {
3608		if (sip)
3609			read_lock(&sip->macc_lck);
3610		else
3611			read_lock(&sdeb_fake_rw_lck);
3612	}
3613}
3614
3615static inline void
3616sdeb_read_unlock(struct sdeb_store_info *sip)
3617{
3618	if (sdebug_no_rwlock) {
3619		if (sip)
3620			__release(&sip->macc_lck);
3621		else
3622			__release(&sdeb_fake_rw_lck);
3623	} else {
3624		if (sip)
3625			read_unlock(&sip->macc_lck);
3626		else
3627			read_unlock(&sdeb_fake_rw_lck);
3628	}
3629}
3630
3631static inline void
3632sdeb_write_lock(struct sdeb_store_info *sip)
3633{
3634	if (sdebug_no_rwlock) {
3635		if (sip)
3636			__acquire(&sip->macc_lck);
3637		else
3638			__acquire(&sdeb_fake_rw_lck);
3639	} else {
3640		if (sip)
3641			write_lock(&sip->macc_lck);
3642		else
3643			write_lock(&sdeb_fake_rw_lck);
3644	}
3645}
3646
3647static inline void
3648sdeb_write_unlock(struct sdeb_store_info *sip)
3649{
3650	if (sdebug_no_rwlock) {
3651		if (sip)
3652			__release(&sip->macc_lck);
3653		else
3654			__release(&sdeb_fake_rw_lck);
3655	} else {
3656		if (sip)
3657			write_unlock(&sip->macc_lck);
3658		else
3659			write_unlock(&sdeb_fake_rw_lck);
3660	}
3661}
3662
3663static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3664{
3665	bool check_prot;
 
 
3666	u32 num;
3667	u32 ei_lba;
 
3668	int ret;
3669	u64 lba;
3670	struct sdeb_store_info *sip = devip2sip(devip, true);
3671	u8 *cmd = scp->cmnd;
3672
3673	switch (cmd[0]) {
3674	case READ_16:
3675		ei_lba = 0;
3676		lba = get_unaligned_be64(cmd + 2);
3677		num = get_unaligned_be32(cmd + 10);
3678		check_prot = true;
3679		break;
3680	case READ_10:
3681		ei_lba = 0;
3682		lba = get_unaligned_be32(cmd + 2);
3683		num = get_unaligned_be16(cmd + 7);
3684		check_prot = true;
3685		break;
3686	case READ_6:
3687		ei_lba = 0;
3688		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3689		      (u32)(cmd[1] & 0x1f) << 16;
3690		num = (0 == cmd[4]) ? 256 : cmd[4];
3691		check_prot = true;
3692		break;
3693	case READ_12:
3694		ei_lba = 0;
3695		lba = get_unaligned_be32(cmd + 2);
3696		num = get_unaligned_be32(cmd + 6);
3697		check_prot = true;
3698		break;
3699	case XDWRITEREAD_10:
3700		ei_lba = 0;
3701		lba = get_unaligned_be32(cmd + 2);
3702		num = get_unaligned_be16(cmd + 7);
3703		check_prot = false;
3704		break;
3705	default:	/* assume READ(32) */
3706		lba = get_unaligned_be64(cmd + 12);
3707		ei_lba = get_unaligned_be32(cmd + 20);
3708		num = get_unaligned_be32(cmd + 28);
3709		check_prot = false;
3710		break;
3711	}
3712	if (unlikely(have_dif_prot && check_prot)) {
3713		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3714		    (cmd[1] & 0xe0)) {
3715			mk_sense_invalid_opcode(scp);
3716			return check_condition_result;
3717		}
3718		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3719		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3720		    (cmd[1] & 0xe0) == 0)
3721			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3722				    "to DIF device\n");
3723	}
3724	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3725		     atomic_read(&sdeb_inject_pending))) {
3726		num /= 2;
3727		atomic_set(&sdeb_inject_pending, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3728	}
3729
3730	ret = check_device_access_params(scp, lba, num, false);
3731	if (ret)
3732		return ret;
3733	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3734		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3735		     ((lba + num) > sdebug_medium_error_start))) {
3736		/* claim unrecoverable read error */
3737		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3738		/* set info field and valid bit for fixed descriptor */
3739		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3740			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3741			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3742			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3743			put_unaligned_be32(ret, scp->sense_buffer + 3);
3744		}
3745		scsi_set_resid(scp, scsi_bufflen(scp));
3746		return check_condition_result;
3747	}
3748
3749	sdeb_read_lock(sip);
3750
3751	/* DIX + T10 DIF */
3752	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3753		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3754		case 1: /* Guard tag error */
3755			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3756				sdeb_read_unlock(sip);
3757				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3758				return check_condition_result;
3759			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3760				sdeb_read_unlock(sip);
3761				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3762				return illegal_condition_result;
3763			}
3764			break;
3765		case 3: /* Reference tag error */
3766			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3767				sdeb_read_unlock(sip);
3768				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3769				return check_condition_result;
3770			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3771				sdeb_read_unlock(sip);
3772				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3773				return illegal_condition_result;
3774			}
3775			break;
3776		}
3777	}
3778
3779	ret = do_device_access(sip, scp, 0, lba, num, false, 0);
3780	sdeb_read_unlock(sip);
3781	if (unlikely(ret == -1))
3782		return DID_ERROR << 16;
3783
3784	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3785
3786	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3787		     atomic_read(&sdeb_inject_pending))) {
3788		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3789			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3790			atomic_set(&sdeb_inject_pending, 0);
 
 
 
3791			return check_condition_result;
3792		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3793			/* Logical block guard check failed */
3794			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3795			atomic_set(&sdeb_inject_pending, 0);
3796			return illegal_condition_result;
3797		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3798			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3799			atomic_set(&sdeb_inject_pending, 0);
3800			return illegal_condition_result;
3801		}
3802	}
3803	return 0;
3804}
3805
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3806static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3807			     unsigned int sectors, u32 ei_lba)
3808{
3809	int ret;
3810	struct t10_pi_tuple *sdt;
3811	void *daddr;
3812	sector_t sector = start_sec;
3813	int ppage_offset;
3814	int dpage_offset;
3815	struct sg_mapping_iter diter;
3816	struct sg_mapping_iter piter;
3817
3818	BUG_ON(scsi_sg_count(SCpnt) == 0);
3819	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3820
3821	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3822			scsi_prot_sg_count(SCpnt),
3823			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3824	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3825			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3826
3827	/* For each protection page */
3828	while (sg_miter_next(&piter)) {
3829		dpage_offset = 0;
3830		if (WARN_ON(!sg_miter_next(&diter))) {
3831			ret = 0x01;
3832			goto out;
3833		}
3834
3835		for (ppage_offset = 0; ppage_offset < piter.length;
3836		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3837			/* If we're at the end of the current
3838			 * data page advance to the next one
3839			 */
3840			if (dpage_offset >= diter.length) {
3841				if (WARN_ON(!sg_miter_next(&diter))) {
3842					ret = 0x01;
3843					goto out;
3844				}
3845				dpage_offset = 0;
3846			}
3847
3848			sdt = piter.addr + ppage_offset;
3849			daddr = diter.addr + dpage_offset;
3850
3851			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3852				ret = dif_verify(sdt, daddr, sector, ei_lba);
3853				if (ret)
3854					goto out;
3855			}
3856
3857			sector++;
3858			ei_lba++;
3859			dpage_offset += sdebug_sector_size;
3860		}
3861		diter.consumed = dpage_offset;
3862		sg_miter_stop(&diter);
3863	}
3864	sg_miter_stop(&piter);
3865
3866	dif_copy_prot(SCpnt, start_sec, sectors, false);
3867	dix_writes++;
3868
3869	return 0;
3870
3871out:
3872	dif_errors++;
3873	sg_miter_stop(&diter);
3874	sg_miter_stop(&piter);
3875	return ret;
3876}
3877
3878static unsigned long lba_to_map_index(sector_t lba)
3879{
3880	if (sdebug_unmap_alignment)
3881		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3882	sector_div(lba, sdebug_unmap_granularity);
3883	return lba;
3884}
3885
3886static sector_t map_index_to_lba(unsigned long index)
3887{
3888	sector_t lba = index * sdebug_unmap_granularity;
3889
3890	if (sdebug_unmap_alignment)
3891		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3892	return lba;
3893}
3894
3895static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3896			      unsigned int *num)
3897{
3898	sector_t end;
3899	unsigned int mapped;
3900	unsigned long index;
3901	unsigned long next;
3902
3903	index = lba_to_map_index(lba);
3904	mapped = test_bit(index, sip->map_storep);
3905
3906	if (mapped)
3907		next = find_next_zero_bit(sip->map_storep, map_size, index);
3908	else
3909		next = find_next_bit(sip->map_storep, map_size, index);
3910
3911	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3912	*num = end - lba;
3913	return mapped;
3914}
3915
3916static void map_region(struct sdeb_store_info *sip, sector_t lba,
3917		       unsigned int len)
3918{
3919	sector_t end = lba + len;
3920
3921	while (lba < end) {
3922		unsigned long index = lba_to_map_index(lba);
3923
3924		if (index < map_size)
3925			set_bit(index, sip->map_storep);
3926
3927		lba = map_index_to_lba(index + 1);
3928	}
3929}
3930
3931static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3932			 unsigned int len)
3933{
3934	sector_t end = lba + len;
3935	u8 *fsp = sip->storep;
3936
3937	while (lba < end) {
3938		unsigned long index = lba_to_map_index(lba);
3939
3940		if (lba == map_index_to_lba(index) &&
3941		    lba + sdebug_unmap_granularity <= end &&
3942		    index < map_size) {
3943			clear_bit(index, sip->map_storep);
3944			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3945				memset(fsp + lba * sdebug_sector_size,
 
3946				       (sdebug_lbprz & 1) ? 0 : 0xff,
3947				       sdebug_sector_size *
3948				       sdebug_unmap_granularity);
3949			}
3950			if (sip->dif_storep) {
3951				memset(sip->dif_storep + lba, 0xff,
3952				       sizeof(*sip->dif_storep) *
3953				       sdebug_unmap_granularity);
3954			}
3955		}
3956		lba = map_index_to_lba(index + 1);
3957	}
3958}
3959
3960static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3961{
3962	bool check_prot;
 
3963	u32 num;
3964	u8 group = 0;
3965	u32 ei_lba;
 
3966	int ret;
3967	u64 lba;
3968	struct sdeb_store_info *sip = devip2sip(devip, true);
3969	u8 *cmd = scp->cmnd;
3970
3971	switch (cmd[0]) {
3972	case WRITE_16:
3973		ei_lba = 0;
3974		lba = get_unaligned_be64(cmd + 2);
3975		num = get_unaligned_be32(cmd + 10);
3976		group = cmd[14] & 0x3f;
3977		check_prot = true;
3978		break;
3979	case WRITE_10:
3980		ei_lba = 0;
3981		lba = get_unaligned_be32(cmd + 2);
3982		group = cmd[6] & 0x3f;
3983		num = get_unaligned_be16(cmd + 7);
3984		check_prot = true;
3985		break;
3986	case WRITE_6:
3987		ei_lba = 0;
3988		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3989		      (u32)(cmd[1] & 0x1f) << 16;
3990		num = (0 == cmd[4]) ? 256 : cmd[4];
3991		check_prot = true;
3992		break;
3993	case WRITE_12:
3994		ei_lba = 0;
3995		lba = get_unaligned_be32(cmd + 2);
3996		num = get_unaligned_be32(cmd + 6);
3997		group = cmd[6] & 0x3f;
3998		check_prot = true;
3999		break;
4000	case 0x53:	/* XDWRITEREAD(10) */
4001		ei_lba = 0;
4002		lba = get_unaligned_be32(cmd + 2);
4003		group = cmd[6] & 0x1f;
4004		num = get_unaligned_be16(cmd + 7);
4005		check_prot = false;
4006		break;
4007	default:	/* assume WRITE(32) */
4008		group = cmd[6] & 0x3f;
4009		lba = get_unaligned_be64(cmd + 12);
4010		ei_lba = get_unaligned_be32(cmd + 20);
4011		num = get_unaligned_be32(cmd + 28);
4012		check_prot = false;
4013		break;
4014	}
4015	if (unlikely(have_dif_prot && check_prot)) {
4016		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4017		    (cmd[1] & 0xe0)) {
4018			mk_sense_invalid_opcode(scp);
4019			return check_condition_result;
4020		}
4021		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4022		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4023		    (cmd[1] & 0xe0) == 0)
4024			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4025				    "to DIF device\n");
4026	}
4027
4028	sdeb_write_lock(sip);
4029	ret = check_device_access_params(scp, lba, num, true);
4030	if (ret) {
4031		sdeb_write_unlock(sip);
4032		return ret;
 
 
 
 
 
4033	}
4034
 
 
4035	/* DIX + T10 DIF */
4036	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4037		switch (prot_verify_write(scp, lba, num, ei_lba)) {
4038		case 1: /* Guard tag error */
4039			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4040				sdeb_write_unlock(sip);
4041				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4042				return illegal_condition_result;
4043			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4044				sdeb_write_unlock(sip);
4045				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4046				return check_condition_result;
4047			}
4048			break;
4049		case 3: /* Reference tag error */
4050			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4051				sdeb_write_unlock(sip);
4052				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4053				return illegal_condition_result;
4054			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4055				sdeb_write_unlock(sip);
4056				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4057				return check_condition_result;
4058			}
4059			break;
4060		}
4061	}
4062
4063	ret = do_device_access(sip, scp, 0, lba, num, true, group);
4064	if (unlikely(scsi_debug_lbp()))
4065		map_region(sip, lba, num);
4066	/* If ZBC zone then bump its write pointer */
4067	if (sdebug_dev_is_zoned(devip))
4068		zbc_inc_wp(devip, lba, num);
4069	sdeb_write_unlock(sip);
4070	if (unlikely(-1 == ret))
4071		return DID_ERROR << 16;
4072	else if (unlikely(sdebug_verbose &&
4073			  (ret < (num * sdebug_sector_size))))
4074		sdev_printk(KERN_INFO, scp->device,
4075			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4076			    my_name, num * sdebug_sector_size, ret);
4077
4078	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4079		     atomic_read(&sdeb_inject_pending))) {
4080		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4081			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4082			atomic_set(&sdeb_inject_pending, 0);
4083			return check_condition_result;
4084		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4085			/* Logical block guard check failed */
4086			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4087			atomic_set(&sdeb_inject_pending, 0);
4088			return illegal_condition_result;
4089		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4090			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4091			atomic_set(&sdeb_inject_pending, 0);
4092			return illegal_condition_result;
 
 
4093		}
4094	}
4095	return 0;
4096}
4097
4098/*
4099 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4100 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4101 */
4102static int resp_write_scat(struct scsi_cmnd *scp,
4103			   struct sdebug_dev_info *devip)
4104{
4105	u8 *cmd = scp->cmnd;
4106	u8 *lrdp = NULL;
4107	u8 *up;
4108	struct sdeb_store_info *sip = devip2sip(devip, true);
4109	u8 wrprotect;
4110	u16 lbdof, num_lrd, k;
4111	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4112	u32 lb_size = sdebug_sector_size;
4113	u32 ei_lba;
4114	u64 lba;
4115	u8 group;
4116	int ret, res;
4117	bool is_16;
4118	static const u32 lrd_size = 32; /* + parameter list header size */
4119
4120	if (cmd[0] == VARIABLE_LENGTH_CMD) {
4121		is_16 = false;
4122		group = cmd[6] & 0x3f;
4123		wrprotect = (cmd[10] >> 5) & 0x7;
4124		lbdof = get_unaligned_be16(cmd + 12);
4125		num_lrd = get_unaligned_be16(cmd + 16);
4126		bt_len = get_unaligned_be32(cmd + 28);
4127	} else {        /* that leaves WRITE SCATTERED(16) */
4128		is_16 = true;
4129		wrprotect = (cmd[2] >> 5) & 0x7;
4130		lbdof = get_unaligned_be16(cmd + 4);
4131		num_lrd = get_unaligned_be16(cmd + 8);
4132		bt_len = get_unaligned_be32(cmd + 10);
4133		group = cmd[14] & 0x3f;
4134		if (unlikely(have_dif_prot)) {
4135			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4136			    wrprotect) {
4137				mk_sense_invalid_opcode(scp);
4138				return illegal_condition_result;
4139			}
4140			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4141			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4142			     wrprotect == 0)
4143				sdev_printk(KERN_ERR, scp->device,
4144					    "Unprotected WR to DIF device\n");
4145		}
4146	}
4147	if ((num_lrd == 0) || (bt_len == 0))
4148		return 0;       /* T10 says these do-nothings are not errors */
4149	if (lbdof == 0) {
4150		if (sdebug_verbose)
4151			sdev_printk(KERN_INFO, scp->device,
4152				"%s: %s: LB Data Offset field bad\n",
4153				my_name, __func__);
4154		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4155		return illegal_condition_result;
4156	}
4157	lbdof_blen = lbdof * lb_size;
4158	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4159		if (sdebug_verbose)
4160			sdev_printk(KERN_INFO, scp->device,
4161				"%s: %s: LBA range descriptors don't fit\n",
4162				my_name, __func__);
4163		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4164		return illegal_condition_result;
4165	}
4166	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4167	if (lrdp == NULL)
4168		return SCSI_MLQUEUE_HOST_BUSY;
4169	if (sdebug_verbose)
4170		sdev_printk(KERN_INFO, scp->device,
4171			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4172			my_name, __func__, lbdof_blen);
4173	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4174	if (res == -1) {
4175		ret = DID_ERROR << 16;
4176		goto err_out;
4177	}
4178
4179	sdeb_write_lock(sip);
4180	sg_off = lbdof_blen;
4181	/* Spec says Buffer xfer Length field in number of LBs in dout */
4182	cum_lb = 0;
4183	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4184		lba = get_unaligned_be64(up + 0);
4185		num = get_unaligned_be32(up + 8);
4186		if (sdebug_verbose)
4187			sdev_printk(KERN_INFO, scp->device,
4188				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
4189				my_name, __func__, k, lba, num, sg_off);
4190		if (num == 0)
4191			continue;
4192		ret = check_device_access_params(scp, lba, num, true);
4193		if (ret)
4194			goto err_out_unlock;
4195		num_by = num * lb_size;
4196		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4197
4198		if ((cum_lb + num) > bt_len) {
4199			if (sdebug_verbose)
4200				sdev_printk(KERN_INFO, scp->device,
4201				    "%s: %s: sum of blocks > data provided\n",
4202				    my_name, __func__);
4203			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4204					0);
4205			ret = illegal_condition_result;
4206			goto err_out_unlock;
4207		}
4208
4209		/* DIX + T10 DIF */
4210		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4211			int prot_ret = prot_verify_write(scp, lba, num,
4212							 ei_lba);
4213
4214			if (prot_ret) {
4215				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4216						prot_ret);
4217				ret = illegal_condition_result;
4218				goto err_out_unlock;
4219			}
4220		}
4221
4222		ret = do_device_access(sip, scp, sg_off, lba, num, true, group);
4223		/* If ZBC zone then bump its write pointer */
4224		if (sdebug_dev_is_zoned(devip))
4225			zbc_inc_wp(devip, lba, num);
4226		if (unlikely(scsi_debug_lbp()))
4227			map_region(sip, lba, num);
4228		if (unlikely(-1 == ret)) {
4229			ret = DID_ERROR << 16;
4230			goto err_out_unlock;
4231		} else if (unlikely(sdebug_verbose && (ret < num_by)))
4232			sdev_printk(KERN_INFO, scp->device,
4233			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4234			    my_name, num_by, ret);
4235
4236		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4237			     atomic_read(&sdeb_inject_pending))) {
4238			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4239				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4240				atomic_set(&sdeb_inject_pending, 0);
4241				ret = check_condition_result;
4242				goto err_out_unlock;
4243			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4244				/* Logical block guard check failed */
4245				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4246				atomic_set(&sdeb_inject_pending, 0);
4247				ret = illegal_condition_result;
4248				goto err_out_unlock;
4249			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4250				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4251				atomic_set(&sdeb_inject_pending, 0);
4252				ret = illegal_condition_result;
4253				goto err_out_unlock;
 
 
 
 
4254			}
4255		}
4256		sg_off += num_by;
4257		cum_lb += num;
4258	}
4259	ret = 0;
4260err_out_unlock:
4261	sdeb_write_unlock(sip);
4262err_out:
4263	kfree(lrdp);
4264	return ret;
4265}
4266
4267static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4268			   u32 ei_lba, bool unmap, bool ndob)
4269{
4270	struct scsi_device *sdp = scp->device;
4271	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4272	unsigned long long i;
4273	u64 block, lbaa;
4274	u32 lb_size = sdebug_sector_size;
4275	int ret;
4276	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4277						scp->device->hostdata, true);
4278	u8 *fs1p;
4279	u8 *fsp;
4280
4281	sdeb_write_lock(sip);
4282
4283	ret = check_device_access_params(scp, lba, num, true);
4284	if (ret) {
4285		sdeb_write_unlock(sip);
4286		return ret;
4287	}
 
4288
4289	if (unmap && scsi_debug_lbp()) {
4290		unmap_region(sip, lba, num);
4291		goto out;
4292	}
4293	lbaa = lba;
4294	block = do_div(lbaa, sdebug_store_sectors);
4295	/* if ndob then zero 1 logical block, else fetch 1 logical block */
4296	fsp = sip->storep;
4297	fs1p = fsp + (block * lb_size);
4298	if (ndob) {
4299		memset(fs1p, 0, lb_size);
4300		ret = 0;
4301	} else
4302		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
 
4303
4304	if (-1 == ret) {
4305		sdeb_write_unlock(sip);
4306		return DID_ERROR << 16;
4307	} else if (sdebug_verbose && !ndob && (ret < lb_size))
4308		sdev_printk(KERN_INFO, scp->device,
4309			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
4310			    my_name, "write same", lb_size, ret);
 
4311
4312	/* Copy first sector to remaining blocks */
4313	for (i = 1 ; i < num ; i++) {
4314		lbaa = lba + i;
4315		block = do_div(lbaa, sdebug_store_sectors);
4316		memmove(fsp + (block * lb_size), fs1p, lb_size);
4317	}
4318	if (scsi_debug_lbp())
4319		map_region(sip, lba, num);
4320	/* If ZBC zone then bump its write pointer */
4321	if (sdebug_dev_is_zoned(devip))
4322		zbc_inc_wp(devip, lba, num);
4323out:
4324	sdeb_write_unlock(sip);
4325
4326	return 0;
4327}
4328
4329static int resp_write_same_10(struct scsi_cmnd *scp,
4330			      struct sdebug_dev_info *devip)
4331{
4332	u8 *cmd = scp->cmnd;
4333	u32 lba;
4334	u16 num;
4335	u32 ei_lba = 0;
4336	bool unmap = false;
4337
4338	if (cmd[1] & 0x8) {
4339		if (sdebug_lbpws10 == 0) {
4340			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4341			return check_condition_result;
4342		} else
4343			unmap = true;
4344	}
4345	lba = get_unaligned_be32(cmd + 2);
4346	num = get_unaligned_be16(cmd + 7);
4347	if (num > sdebug_write_same_length) {
4348		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4349		return check_condition_result;
4350	}
4351	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4352}
4353
4354static int resp_write_same_16(struct scsi_cmnd *scp,
4355			      struct sdebug_dev_info *devip)
4356{
4357	u8 *cmd = scp->cmnd;
4358	u64 lba;
4359	u32 num;
4360	u32 ei_lba = 0;
4361	bool unmap = false;
4362	bool ndob = false;
4363
4364	if (cmd[1] & 0x8) {	/* UNMAP */
4365		if (sdebug_lbpws == 0) {
4366			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4367			return check_condition_result;
4368		} else
4369			unmap = true;
4370	}
4371	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
4372		ndob = true;
4373	lba = get_unaligned_be64(cmd + 2);
4374	num = get_unaligned_be32(cmd + 10);
4375	if (num > sdebug_write_same_length) {
4376		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4377		return check_condition_result;
4378	}
4379	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4380}
4381
4382/* Note the mode field is in the same position as the (lower) service action
4383 * field. For the Report supported operation codes command, SPC-4 suggests
4384 * each mode of this command should be reported separately; for future. */
4385static int resp_write_buffer(struct scsi_cmnd *scp,
4386			     struct sdebug_dev_info *devip)
4387{
4388	u8 *cmd = scp->cmnd;
4389	struct scsi_device *sdp = scp->device;
4390	struct sdebug_dev_info *dp;
4391	u8 mode;
4392
4393	mode = cmd[1] & 0x1f;
4394	switch (mode) {
4395	case 0x4:	/* download microcode (MC) and activate (ACT) */
4396		/* set UAs on this device only */
4397		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4398		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4399		break;
4400	case 0x5:	/* download MC, save and ACT */
4401		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4402		break;
4403	case 0x6:	/* download MC with offsets and ACT */
4404		/* set UAs on most devices (LUs) in this target */
4405		list_for_each_entry(dp,
4406				    &devip->sdbg_host->dev_info_list,
4407				    dev_list)
4408			if (dp->target == sdp->id) {
4409				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4410				if (devip != dp)
4411					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4412						dp->uas_bm);
4413			}
4414		break;
4415	case 0x7:	/* download MC with offsets, save, and ACT */
4416		/* set UA on all devices (LUs) in this target */
4417		list_for_each_entry(dp,
4418				    &devip->sdbg_host->dev_info_list,
4419				    dev_list)
4420			if (dp->target == sdp->id)
4421				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4422					dp->uas_bm);
4423		break;
4424	default:
4425		/* do nothing for this command for other mode values */
4426		break;
4427	}
4428	return 0;
4429}
4430
4431static int resp_comp_write(struct scsi_cmnd *scp,
4432			   struct sdebug_dev_info *devip)
4433{
4434	u8 *cmd = scp->cmnd;
4435	u8 *arr;
4436	struct sdeb_store_info *sip = devip2sip(devip, true);
4437	u64 lba;
4438	u32 dnum;
4439	u32 lb_size = sdebug_sector_size;
4440	u8 num;
 
4441	int ret;
4442	int retval = 0;
4443
4444	lba = get_unaligned_be64(cmd + 2);
4445	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4446	if (0 == num)
4447		return 0;	/* degenerate case, not an error */
4448	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4449	    (cmd[1] & 0xe0)) {
4450		mk_sense_invalid_opcode(scp);
4451		return check_condition_result;
4452	}
4453	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4454	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4455	    (cmd[1] & 0xe0) == 0)
4456		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4457			    "to DIF device\n");
4458	ret = check_device_access_params(scp, lba, num, false);
4459	if (ret)
4460		return ret;
 
 
 
 
 
 
 
 
 
4461	dnum = 2 * num;
4462	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4463	if (NULL == arr) {
4464		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4465				INSUFF_RES_ASCQ);
4466		return check_condition_result;
4467	}
4468
4469	sdeb_write_lock(sip);
4470
4471	ret = do_dout_fetch(scp, dnum, arr);
 
 
 
 
 
4472	if (ret == -1) {
4473		retval = DID_ERROR << 16;
4474		goto cleanup;
4475	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4476		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4477			    "indicated=%u, IO sent=%d bytes\n", my_name,
4478			    dnum * lb_size, ret);
4479	if (!comp_write_worker(sip, lba, num, arr, false)) {
4480		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4481		retval = check_condition_result;
4482		goto cleanup;
4483	}
4484	if (scsi_debug_lbp())
4485		map_region(sip, lba, num);
4486cleanup:
4487	sdeb_write_unlock(sip);
4488	kfree(arr);
4489	return retval;
4490}
4491
4492struct unmap_block_desc {
4493	__be64	lba;
4494	__be32	blocks;
4495	__be32	__reserved;
4496};
4497
4498static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4499{
4500	unsigned char *buf;
4501	struct unmap_block_desc *desc;
4502	struct sdeb_store_info *sip = devip2sip(devip, true);
4503	unsigned int i, payload_len, descriptors;
4504	int ret;
 
 
4505
4506	if (!scsi_debug_lbp())
4507		return 0;	/* fib and say its done */
4508	payload_len = get_unaligned_be16(scp->cmnd + 7);
4509	BUG_ON(scsi_bufflen(scp) != payload_len);
4510
4511	descriptors = (payload_len - 8) / 16;
4512	if (descriptors > sdebug_unmap_max_desc) {
4513		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4514		return check_condition_result;
4515	}
4516
4517	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4518	if (!buf) {
4519		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4520				INSUFF_RES_ASCQ);
4521		return check_condition_result;
4522	}
4523
4524	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4525
4526	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4527	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4528
4529	desc = (void *)&buf[8];
4530
4531	sdeb_write_lock(sip);
4532
4533	for (i = 0 ; i < descriptors ; i++) {
4534		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4535		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4536
4537		ret = check_device_access_params(scp, lba, num, true);
4538		if (ret)
4539			goto out;
4540
4541		unmap_region(sip, lba, num);
4542	}
4543
4544	ret = 0;
4545
4546out:
4547	sdeb_write_unlock(sip);
4548	kfree(buf);
4549
4550	return ret;
4551}
4552
4553#define SDEBUG_GET_LBA_STATUS_LEN 32
4554
4555static int resp_get_lba_status(struct scsi_cmnd *scp,
4556			       struct sdebug_dev_info *devip)
4557{
4558	u8 *cmd = scp->cmnd;
4559	u64 lba;
4560	u32 alloc_len, mapped, num;
4561	int ret;
4562	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
 
4563
4564	lba = get_unaligned_be64(cmd + 2);
4565	alloc_len = get_unaligned_be32(cmd + 10);
4566
4567	if (alloc_len < 24)
4568		return 0;
4569
4570	ret = check_device_access_params(scp, lba, 1, false);
4571	if (ret)
4572		return ret;
4573
4574	if (scsi_debug_lbp()) {
4575		struct sdeb_store_info *sip = devip2sip(devip, true);
4576
4577		mapped = map_state(sip, lba, &num);
4578	} else {
4579		mapped = 1;
4580		/* following just in case virtual_gb changed */
4581		sdebug_capacity = get_sdebug_capacity();
4582		if (sdebug_capacity - lba <= 0xffffffff)
4583			num = sdebug_capacity - lba;
4584		else
4585			num = 0xffffffff;
4586	}
4587
4588	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4589	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4590	put_unaligned_be64(lba, arr + 8);	/* LBA */
4591	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4592	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4593
4594	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4595}
4596
4597static int resp_get_stream_status(struct scsi_cmnd *scp,
4598				  struct sdebug_dev_info *devip)
4599{
4600	u16 starting_stream_id, stream_id;
4601	const u8 *cmd = scp->cmnd;
4602	u32 alloc_len, offset;
4603	u8 arr[256] = {};
4604	struct scsi_stream_status_header *h = (void *)arr;
4605
4606	starting_stream_id = get_unaligned_be16(cmd + 4);
4607	alloc_len = get_unaligned_be32(cmd + 10);
4608
4609	if (alloc_len < 8) {
4610		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4611		return check_condition_result;
4612	}
4613
4614	if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
4615		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
4616		return check_condition_result;
4617	}
4618
4619	/*
4620	 * The GET STREAM STATUS command only reports status information
4621	 * about open streams. Treat the non-permanent stream as open.
4622	 */
4623	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
4624			   &h->number_of_open_streams);
4625
4626	for (offset = 8, stream_id = starting_stream_id;
4627	     offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
4628		     stream_id < MAXIMUM_NUMBER_OF_STREAMS;
4629	     offset += 8, stream_id++) {
4630		struct scsi_stream_status *stream_status = (void *)arr + offset;
4631
4632		stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
4633		put_unaligned_be16(stream_id,
4634				   &stream_status->stream_identifier);
4635		stream_status->rel_lifetime = stream_id + 1;
4636	}
4637	put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
4638
4639	return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
4640}
4641
4642static int resp_sync_cache(struct scsi_cmnd *scp,
4643			   struct sdebug_dev_info *devip)
4644{
4645	int res = 0;
4646	u64 lba;
4647	u32 num_blocks;
4648	u8 *cmd = scp->cmnd;
4649
4650	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4651		lba = get_unaligned_be32(cmd + 2);
4652		num_blocks = get_unaligned_be16(cmd + 7);
4653	} else {				/* SYNCHRONIZE_CACHE(16) */
4654		lba = get_unaligned_be64(cmd + 2);
4655		num_blocks = get_unaligned_be32(cmd + 10);
4656	}
4657	if (lba + num_blocks > sdebug_capacity) {
4658		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4659		return check_condition_result;
4660	}
4661	if (!write_since_sync || (cmd[1] & 0x2))
4662		res = SDEG_RES_IMMED_MASK;
4663	else		/* delay if write_since_sync and IMMED clear */
4664		write_since_sync = false;
4665	return res;
4666}
4667
4668/*
4669 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4670 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4671 * a GOOD status otherwise. Model a disk with a big cache and yield
4672 * CONDITION MET. Actually tries to bring range in main memory into the
4673 * cache associated with the CPU(s).
4674 */
4675static int resp_pre_fetch(struct scsi_cmnd *scp,
4676			  struct sdebug_dev_info *devip)
4677{
4678	int res = 0;
4679	u64 lba;
4680	u64 block, rest = 0;
4681	u32 nblks;
4682	u8 *cmd = scp->cmnd;
4683	struct sdeb_store_info *sip = devip2sip(devip, true);
4684	u8 *fsp = sip->storep;
4685
4686	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4687		lba = get_unaligned_be32(cmd + 2);
4688		nblks = get_unaligned_be16(cmd + 7);
4689	} else {			/* PRE-FETCH(16) */
4690		lba = get_unaligned_be64(cmd + 2);
4691		nblks = get_unaligned_be32(cmd + 10);
4692	}
4693	if (lba + nblks > sdebug_capacity) {
4694		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4695		return check_condition_result;
4696	}
4697	if (!fsp)
4698		goto fini;
4699	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4700	block = do_div(lba, sdebug_store_sectors);
4701	if (block + nblks > sdebug_store_sectors)
4702		rest = block + nblks - sdebug_store_sectors;
4703
4704	/* Try to bring the PRE-FETCH range into CPU's cache */
4705	sdeb_read_lock(sip);
4706	prefetch_range(fsp + (sdebug_sector_size * block),
4707		       (nblks - rest) * sdebug_sector_size);
4708	if (rest)
4709		prefetch_range(fsp, rest * sdebug_sector_size);
4710	sdeb_read_unlock(sip);
4711fini:
4712	if (cmd[1] & 0x2)
4713		res = SDEG_RES_IMMED_MASK;
4714	return res | condition_met_result;
4715}
4716
4717#define RL_BUCKET_ELEMS 8
4718
4719/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4720 * (W-LUN), the normal Linux scanning logic does not associate it with a
4721 * device (e.g. /dev/sg7). The following magic will make that association:
4722 *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4723 * where <n> is a host number. If there are multiple targets in a host then
4724 * the above will associate a W-LUN to each target. To only get a W-LUN
4725 * for target 2, then use "echo '- 2 49409' > scan" .
4726 */
4727static int resp_report_luns(struct scsi_cmnd *scp,
4728			    struct sdebug_dev_info *devip)
4729{
4730	unsigned char *cmd = scp->cmnd;
4731	unsigned int alloc_len;
4732	unsigned char select_report;
4733	u64 lun;
4734	struct scsi_lun *lun_p;
4735	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4736	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4737	unsigned int wlun_cnt;	/* report luns W-LUN count */
4738	unsigned int tlun_cnt;	/* total LUN count */
4739	unsigned int rlen;	/* response length (in bytes) */
4740	int k, j, n, res;
4741	unsigned int off_rsp = 0;
4742	const int sz_lun = sizeof(struct scsi_lun);
4743
4744	clear_luns_changed_on_target(devip);
4745
4746	select_report = cmd[2];
4747	alloc_len = get_unaligned_be32(cmd + 6);
4748
4749	if (alloc_len < 4) {
4750		pr_err("alloc len too small %d\n", alloc_len);
4751		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4752		return check_condition_result;
4753	}
4754
4755	switch (select_report) {
4756	case 0:		/* all LUNs apart from W-LUNs */
4757		lun_cnt = sdebug_max_luns;
4758		wlun_cnt = 0;
4759		break;
4760	case 1:		/* only W-LUNs */
4761		lun_cnt = 0;
4762		wlun_cnt = 1;
4763		break;
4764	case 2:		/* all LUNs */
4765		lun_cnt = sdebug_max_luns;
4766		wlun_cnt = 1;
4767		break;
4768	case 0x10:	/* only administrative LUs */
4769	case 0x11:	/* see SPC-5 */
4770	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4771	default:
4772		pr_debug("select report invalid %d\n", select_report);
4773		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4774		return check_condition_result;
4775	}
4776
4777	if (sdebug_no_lun_0 && (lun_cnt > 0))
4778		--lun_cnt;
4779
4780	tlun_cnt = lun_cnt + wlun_cnt;
4781	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4782	scsi_set_resid(scp, scsi_bufflen(scp));
4783	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4784		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4785
4786	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4787	lun = sdebug_no_lun_0 ? 1 : 0;
4788	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4789		memset(arr, 0, sizeof(arr));
4790		lun_p = (struct scsi_lun *)&arr[0];
4791		if (k == 0) {
4792			put_unaligned_be32(rlen, &arr[0]);
4793			++lun_p;
4794			j = 1;
4795		}
4796		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4797			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4798				break;
4799			int_to_scsilun(lun++, lun_p);
4800			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4801				lun_p->scsi_lun[0] |= 0x40;
4802		}
4803		if (j < RL_BUCKET_ELEMS)
4804			break;
4805		n = j * sz_lun;
4806		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4807		if (res)
4808			return res;
4809		off_rsp += n;
4810	}
4811	if (wlun_cnt) {
4812		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4813		++j;
4814	}
4815	if (j > 0)
4816		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4817	return res;
4818}
4819
4820static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4821{
4822	bool is_bytchk3 = false;
4823	u8 bytchk;
4824	int ret, j;
4825	u32 vnum, a_num, off;
4826	const u32 lb_size = sdebug_sector_size;
4827	u64 lba;
4828	u8 *arr;
4829	u8 *cmd = scp->cmnd;
4830	struct sdeb_store_info *sip = devip2sip(devip, true);
4831
4832	bytchk = (cmd[1] >> 1) & 0x3;
4833	if (bytchk == 0) {
4834		return 0;	/* always claim internal verify okay */
4835	} else if (bytchk == 2) {
4836		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4837		return check_condition_result;
4838	} else if (bytchk == 3) {
4839		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4840	}
4841	switch (cmd[0]) {
4842	case VERIFY_16:
4843		lba = get_unaligned_be64(cmd + 2);
4844		vnum = get_unaligned_be32(cmd + 10);
4845		break;
4846	case VERIFY:		/* is VERIFY(10) */
4847		lba = get_unaligned_be32(cmd + 2);
4848		vnum = get_unaligned_be16(cmd + 7);
4849		break;
4850	default:
4851		mk_sense_invalid_opcode(scp);
4852		return check_condition_result;
4853	}
4854	if (vnum == 0)
4855		return 0;	/* not an error */
4856	a_num = is_bytchk3 ? 1 : vnum;
4857	/* Treat following check like one for read (i.e. no write) access */
4858	ret = check_device_access_params(scp, lba, a_num, false);
4859	if (ret)
4860		return ret;
4861
4862	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4863	if (!arr) {
4864		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4865				INSUFF_RES_ASCQ);
4866		return check_condition_result;
4867	}
4868	/* Not changing store, so only need read access */
4869	sdeb_read_lock(sip);
4870
4871	ret = do_dout_fetch(scp, a_num, arr);
4872	if (ret == -1) {
4873		ret = DID_ERROR << 16;
4874		goto cleanup;
4875	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4876		sdev_printk(KERN_INFO, scp->device,
4877			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4878			    my_name, __func__, a_num * lb_size, ret);
4879	}
4880	if (is_bytchk3) {
4881		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4882			memcpy(arr + off, arr, lb_size);
4883	}
4884	ret = 0;
4885	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4886		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4887		ret = check_condition_result;
4888		goto cleanup;
4889	}
4890cleanup:
4891	sdeb_read_unlock(sip);
4892	kfree(arr);
4893	return ret;
4894}
4895
4896#define RZONES_DESC_HD 64
4897
4898/* Report zones depending on start LBA and reporting options */
4899static int resp_report_zones(struct scsi_cmnd *scp,
4900			     struct sdebug_dev_info *devip)
4901{
4902	unsigned int rep_max_zones, nrz = 0;
4903	int ret = 0;
4904	u32 alloc_len, rep_opts, rep_len;
4905	bool partial;
4906	u64 lba, zs_lba;
4907	u8 *arr = NULL, *desc;
4908	u8 *cmd = scp->cmnd;
4909	struct sdeb_zone_state *zsp = NULL;
4910	struct sdeb_store_info *sip = devip2sip(devip, false);
4911
4912	if (!sdebug_dev_is_zoned(devip)) {
4913		mk_sense_invalid_opcode(scp);
4914		return check_condition_result;
4915	}
4916	zs_lba = get_unaligned_be64(cmd + 2);
4917	alloc_len = get_unaligned_be32(cmd + 10);
4918	if (alloc_len == 0)
4919		return 0;	/* not an error */
4920	rep_opts = cmd[14] & 0x3f;
4921	partial = cmd[14] & 0x80;
4922
4923	if (zs_lba >= sdebug_capacity) {
4924		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4925		return check_condition_result;
4926	}
4927
4928	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4929
4930	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4931	if (!arr) {
 
4932		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4933				INSUFF_RES_ASCQ);
4934		return check_condition_result;
4935	}
4936
4937	sdeb_read_lock(sip);
4938
4939	desc = arr + 64;
4940	for (lba = zs_lba; lba < sdebug_capacity;
4941	     lba = zsp->z_start + zsp->z_size) {
4942		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4943			break;
4944		zsp = zbc_zone(devip, lba);
4945		switch (rep_opts) {
4946		case 0x00:
4947			/* All zones */
4948			break;
4949		case 0x01:
4950			/* Empty zones */
4951			if (zsp->z_cond != ZC1_EMPTY)
4952				continue;
4953			break;
4954		case 0x02:
4955			/* Implicit open zones */
4956			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4957				continue;
4958			break;
4959		case 0x03:
4960			/* Explicit open zones */
4961			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4962				continue;
4963			break;
4964		case 0x04:
4965			/* Closed zones */
4966			if (zsp->z_cond != ZC4_CLOSED)
4967				continue;
4968			break;
4969		case 0x05:
4970			/* Full zones */
4971			if (zsp->z_cond != ZC5_FULL)
4972				continue;
4973			break;
4974		case 0x06:
4975		case 0x07:
4976		case 0x10:
4977			/*
4978			 * Read-only, offline, reset WP recommended are
4979			 * not emulated: no zones to report;
4980			 */
4981			continue;
4982		case 0x11:
4983			/* non-seq-resource set */
4984			if (!zsp->z_non_seq_resource)
4985				continue;
4986			break;
4987		case 0x3e:
4988			/* All zones except gap zones. */
4989			if (zbc_zone_is_gap(zsp))
4990				continue;
4991			break;
4992		case 0x3f:
4993			/* Not write pointer (conventional) zones */
4994			if (zbc_zone_is_seq(zsp))
4995				continue;
4996			break;
4997		default:
4998			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4999					INVALID_FIELD_IN_CDB, 0);
5000			ret = check_condition_result;
5001			goto fini;
5002		}
5003
5004		if (nrz < rep_max_zones) {
5005			/* Fill zone descriptor */
5006			desc[0] = zsp->z_type;
5007			desc[1] = zsp->z_cond << 4;
5008			if (zsp->z_non_seq_resource)
5009				desc[1] |= 1 << 1;
5010			put_unaligned_be64((u64)zsp->z_size, desc + 8);
5011			put_unaligned_be64((u64)zsp->z_start, desc + 16);
5012			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5013			desc += 64;
5014		}
5015
5016		if (partial && nrz >= rep_max_zones)
5017			break;
5018
5019		nrz++;
5020	}
5021
5022	/* Report header */
5023	/* Zone list length. */
5024	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5025	/* Maximum LBA */
5026	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
5027	/* Zone starting LBA granularity. */
5028	if (devip->zcap < devip->zsize)
5029		put_unaligned_be64(devip->zsize, arr + 16);
5030
5031	rep_len = (unsigned long)desc - (unsigned long)arr;
5032	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
5033
5034fini:
5035	sdeb_read_unlock(sip);
5036	kfree(arr);
5037	return ret;
5038}
5039
5040/* Logic transplanted from tcmu-runner, file_zbc.c */
5041static void zbc_open_all(struct sdebug_dev_info *devip)
5042{
5043	struct sdeb_zone_state *zsp = &devip->zstate[0];
5044	unsigned int i;
5045
5046	for (i = 0; i < devip->nr_zones; i++, zsp++) {
5047		if (zsp->z_cond == ZC4_CLOSED)
5048			zbc_open_zone(devip, &devip->zstate[i], true);
5049	}
5050}
5051
5052static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5053{
5054	int res = 0;
5055	u64 z_id;
5056	enum sdebug_z_cond zc;
5057	u8 *cmd = scp->cmnd;
5058	struct sdeb_zone_state *zsp;
5059	bool all = cmd[14] & 0x01;
5060	struct sdeb_store_info *sip = devip2sip(devip, false);
5061
5062	if (!sdebug_dev_is_zoned(devip)) {
5063		mk_sense_invalid_opcode(scp);
5064		return check_condition_result;
5065	}
5066
5067	sdeb_write_lock(sip);
5068
5069	if (all) {
5070		/* Check if all closed zones can be open */
5071		if (devip->max_open &&
5072		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
5073			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5074					INSUFF_ZONE_ASCQ);
5075			res = check_condition_result;
5076			goto fini;
5077		}
5078		/* Open all closed zones */
5079		zbc_open_all(devip);
5080		goto fini;
5081	}
5082
5083	/* Open the specified zone */
5084	z_id = get_unaligned_be64(cmd + 2);
5085	if (z_id >= sdebug_capacity) {
5086		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5087		res = check_condition_result;
5088		goto fini;
5089	}
5090
5091	zsp = zbc_zone(devip, z_id);
5092	if (z_id != zsp->z_start) {
5093		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5094		res = check_condition_result;
5095		goto fini;
5096	}
5097	if (zbc_zone_is_conv(zsp)) {
5098		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5099		res = check_condition_result;
5100		goto fini;
5101	}
5102
5103	zc = zsp->z_cond;
5104	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
5105		goto fini;
5106
5107	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
5108		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5109				INSUFF_ZONE_ASCQ);
5110		res = check_condition_result;
5111		goto fini;
5112	}
5113
5114	zbc_open_zone(devip, zsp, true);
5115fini:
5116	sdeb_write_unlock(sip);
5117	return res;
5118}
5119
5120static void zbc_close_all(struct sdebug_dev_info *devip)
5121{
5122	unsigned int i;
5123
5124	for (i = 0; i < devip->nr_zones; i++)
5125		zbc_close_zone(devip, &devip->zstate[i]);
5126}
5127
5128static int resp_close_zone(struct scsi_cmnd *scp,
5129			   struct sdebug_dev_info *devip)
5130{
5131	int res = 0;
5132	u64 z_id;
5133	u8 *cmd = scp->cmnd;
5134	struct sdeb_zone_state *zsp;
5135	bool all = cmd[14] & 0x01;
5136	struct sdeb_store_info *sip = devip2sip(devip, false);
5137
5138	if (!sdebug_dev_is_zoned(devip)) {
5139		mk_sense_invalid_opcode(scp);
5140		return check_condition_result;
5141	}
5142
5143	sdeb_write_lock(sip);
5144
5145	if (all) {
5146		zbc_close_all(devip);
5147		goto fini;
5148	}
5149
5150	/* Close specified zone */
5151	z_id = get_unaligned_be64(cmd + 2);
5152	if (z_id >= sdebug_capacity) {
5153		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5154		res = check_condition_result;
5155		goto fini;
5156	}
5157
5158	zsp = zbc_zone(devip, z_id);
5159	if (z_id != zsp->z_start) {
5160		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5161		res = check_condition_result;
5162		goto fini;
5163	}
5164	if (zbc_zone_is_conv(zsp)) {
5165		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5166		res = check_condition_result;
5167		goto fini;
5168	}
5169
5170	zbc_close_zone(devip, zsp);
5171fini:
5172	sdeb_write_unlock(sip);
5173	return res;
5174}
5175
5176static void zbc_finish_zone(struct sdebug_dev_info *devip,
5177			    struct sdeb_zone_state *zsp, bool empty)
5178{
5179	enum sdebug_z_cond zc = zsp->z_cond;
 
 
 
 
5180
5181	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5182	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5183		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5184			zbc_close_zone(devip, zsp);
5185		if (zsp->z_cond == ZC4_CLOSED)
5186			devip->nr_closed--;
5187		zsp->z_wp = zsp->z_start + zsp->z_size;
5188		zsp->z_cond = ZC5_FULL;
5189	}
5190}
5191
5192static void zbc_finish_all(struct sdebug_dev_info *devip)
5193{
5194	unsigned int i;
5195
5196	for (i = 0; i < devip->nr_zones; i++)
5197		zbc_finish_zone(devip, &devip->zstate[i], false);
5198}
5199
5200static int resp_finish_zone(struct scsi_cmnd *scp,
5201			    struct sdebug_dev_info *devip)
5202{
5203	struct sdeb_zone_state *zsp;
5204	int res = 0;
5205	u64 z_id;
5206	u8 *cmd = scp->cmnd;
5207	bool all = cmd[14] & 0x01;
5208	struct sdeb_store_info *sip = devip2sip(devip, false);
 
5209
5210	if (!sdebug_dev_is_zoned(devip)) {
5211		mk_sense_invalid_opcode(scp);
 
5212		return check_condition_result;
5213	}
5214
5215	sdeb_write_lock(sip);
5216
5217	if (all) {
5218		zbc_finish_all(devip);
5219		goto fini;
5220	}
5221
5222	/* Finish the specified zone */
5223	z_id = get_unaligned_be64(cmd + 2);
5224	if (z_id >= sdebug_capacity) {
5225		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5226		res = check_condition_result;
5227		goto fini;
5228	}
5229
5230	zsp = zbc_zone(devip, z_id);
5231	if (z_id != zsp->z_start) {
5232		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5233		res = check_condition_result;
5234		goto fini;
5235	}
5236	if (zbc_zone_is_conv(zsp)) {
5237		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5238		res = check_condition_result;
5239		goto fini;
5240	}
5241
5242	zbc_finish_zone(devip, zsp, true);
5243fini:
5244	sdeb_write_unlock(sip);
5245	return res;
5246}
5247
5248static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5249			 struct sdeb_zone_state *zsp)
5250{
5251	enum sdebug_z_cond zc;
5252	struct sdeb_store_info *sip = devip2sip(devip, false);
5253
5254	if (!zbc_zone_is_seq(zsp))
5255		return;
5256
5257	zc = zsp->z_cond;
5258	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5259		zbc_close_zone(devip, zsp);
5260
5261	if (zsp->z_cond == ZC4_CLOSED)
5262		devip->nr_closed--;
5263
5264	if (zsp->z_wp > zsp->z_start)
5265		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5266		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5267
5268	zsp->z_non_seq_resource = false;
5269	zsp->z_wp = zsp->z_start;
5270	zsp->z_cond = ZC1_EMPTY;
5271}
5272
5273static void zbc_rwp_all(struct sdebug_dev_info *devip)
5274{
5275	unsigned int i;
5276
5277	for (i = 0; i < devip->nr_zones; i++)
5278		zbc_rwp_zone(devip, &devip->zstate[i]);
5279}
5280
5281static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5282{
5283	struct sdeb_zone_state *zsp;
5284	int res = 0;
5285	u64 z_id;
5286	u8 *cmd = scp->cmnd;
5287	bool all = cmd[14] & 0x01;
5288	struct sdeb_store_info *sip = devip2sip(devip, false);
5289
5290	if (!sdebug_dev_is_zoned(devip)) {
5291		mk_sense_invalid_opcode(scp);
5292		return check_condition_result;
5293	}
5294
5295	sdeb_write_lock(sip);
5296
5297	if (all) {
5298		zbc_rwp_all(devip);
5299		goto fini;
5300	}
5301
5302	z_id = get_unaligned_be64(cmd + 2);
5303	if (z_id >= sdebug_capacity) {
5304		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5305		res = check_condition_result;
5306		goto fini;
5307	}
5308
5309	zsp = zbc_zone(devip, z_id);
5310	if (z_id != zsp->z_start) {
5311		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5312		res = check_condition_result;
5313		goto fini;
5314	}
5315	if (zbc_zone_is_conv(zsp)) {
5316		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5317		res = check_condition_result;
5318		goto fini;
5319	}
5320
5321	zbc_rwp_zone(devip, zsp);
5322fini:
5323	sdeb_write_unlock(sip);
5324	return res;
5325}
5326
5327static u32 get_tag(struct scsi_cmnd *cmnd)
5328{
5329	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
 
5330}
5331
5332/* Queued (deferred) command completions converge here. */
5333static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5334{
5335	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5336	unsigned long flags;
5337	struct scsi_cmnd *scp = sqcp->scmd;
5338	struct sdebug_scsi_cmd *sdsc;
5339	bool aborted;
 
 
5340
 
 
 
5341	if (sdebug_statistics) {
5342		atomic_inc(&sdebug_completions);
5343		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5344			atomic_inc(&sdebug_miss_cpus);
5345	}
5346
5347	if (!scp) {
5348		pr_err("scmd=NULL\n");
5349		goto out;
5350	}
5351
5352	sdsc = scsi_cmd_priv(scp);
5353	spin_lock_irqsave(&sdsc->lock, flags);
5354	aborted = sd_dp->aborted;
5355	if (unlikely(aborted))
5356		sd_dp->aborted = false;
5357	ASSIGN_QUEUED_CMD(scp, NULL);
5358
5359	spin_unlock_irqrestore(&sdsc->lock, flags);
5360
5361	if (aborted) {
5362		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5363		blk_abort_request(scsi_cmd_to_rq(scp));
5364		goto out;
 
 
 
 
 
 
 
 
5365	}
5366
5367	scsi_done(scp); /* callback to mid level */
5368out:
5369	sdebug_free_queued_cmd(sqcp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5370}
5371
5372/* When high resolution timer goes off this function is called. */
5373static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5374{
5375	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5376						  hrt);
5377	sdebug_q_cmd_complete(sd_dp);
5378	return HRTIMER_NORESTART;
5379}
5380
5381/* When work queue schedules work, it calls this function. */
5382static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5383{
5384	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5385						  ew.work);
5386	sdebug_q_cmd_complete(sd_dp);
5387}
5388
5389static bool got_shared_uuid;
5390static uuid_t shared_uuid;
5391
5392static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5393{
5394	struct sdeb_zone_state *zsp;
5395	sector_t capacity = get_sdebug_capacity();
5396	sector_t conv_capacity;
5397	sector_t zstart = 0;
5398	unsigned int i;
5399
5400	/*
5401	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5402	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5403	 * use the specified zone size checking that at least 2 zones can be
5404	 * created for the device.
5405	 */
5406	if (!sdeb_zbc_zone_size_mb) {
5407		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5408			>> ilog2(sdebug_sector_size);
5409		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5410			devip->zsize >>= 1;
5411		if (devip->zsize < 2) {
5412			pr_err("Device capacity too small\n");
5413			return -EINVAL;
5414		}
5415	} else {
5416		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5417			pr_err("Zone size is not a power of 2\n");
5418			return -EINVAL;
5419		}
5420		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5421			>> ilog2(sdebug_sector_size);
5422		if (devip->zsize >= capacity) {
5423			pr_err("Zone size too large for device capacity\n");
5424			return -EINVAL;
5425		}
5426	}
5427
5428	devip->zsize_shift = ilog2(devip->zsize);
5429	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5430
5431	if (sdeb_zbc_zone_cap_mb == 0) {
5432		devip->zcap = devip->zsize;
5433	} else {
5434		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5435			      ilog2(sdebug_sector_size);
5436		if (devip->zcap > devip->zsize) {
5437			pr_err("Zone capacity too large\n");
5438			return -EINVAL;
5439		}
5440	}
5441
5442	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5443	if (conv_capacity >= capacity) {
5444		pr_err("Number of conventional zones too large\n");
5445		return -EINVAL;
5446	}
5447	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5448	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5449			      devip->zsize_shift;
5450	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5451
5452	/* Add gap zones if zone capacity is smaller than the zone size */
5453	if (devip->zcap < devip->zsize)
5454		devip->nr_zones += devip->nr_seq_zones;
5455
5456	if (devip->zoned) {
5457		/* zbc_max_open_zones can be 0, meaning "not reported" */
5458		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5459			devip->max_open = (devip->nr_zones - 1) / 2;
5460		else
5461			devip->max_open = sdeb_zbc_max_open;
5462	}
5463
5464	devip->zstate = kcalloc(devip->nr_zones,
5465				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5466	if (!devip->zstate)
5467		return -ENOMEM;
5468
5469	for (i = 0; i < devip->nr_zones; i++) {
5470		zsp = &devip->zstate[i];
5471
5472		zsp->z_start = zstart;
5473
5474		if (i < devip->nr_conv_zones) {
5475			zsp->z_type = ZBC_ZTYPE_CNV;
5476			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5477			zsp->z_wp = (sector_t)-1;
5478			zsp->z_size =
5479				min_t(u64, devip->zsize, capacity - zstart);
5480		} else if ((zstart & (devip->zsize - 1)) == 0) {
5481			if (devip->zoned)
5482				zsp->z_type = ZBC_ZTYPE_SWR;
5483			else
5484				zsp->z_type = ZBC_ZTYPE_SWP;
5485			zsp->z_cond = ZC1_EMPTY;
5486			zsp->z_wp = zsp->z_start;
5487			zsp->z_size =
5488				min_t(u64, devip->zcap, capacity - zstart);
5489		} else {
5490			zsp->z_type = ZBC_ZTYPE_GAP;
5491			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5492			zsp->z_wp = (sector_t)-1;
5493			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5494					    capacity - zstart);
5495		}
5496
5497		WARN_ON_ONCE((int)zsp->z_size <= 0);
5498		zstart += zsp->z_size;
5499	}
5500
5501	return 0;
5502}
5503
5504static struct sdebug_dev_info *sdebug_device_create(
5505			struct sdebug_host_info *sdbg_host, gfp_t flags)
5506{
5507	struct sdebug_dev_info *devip;
5508
5509	devip = kzalloc(sizeof(*devip), flags);
5510	if (devip) {
5511		if (sdebug_uuid_ctl == 1)
5512			uuid_gen(&devip->lu_name);
5513		else if (sdebug_uuid_ctl == 2) {
5514			if (got_shared_uuid)
5515				devip->lu_name = shared_uuid;
5516			else {
5517				uuid_gen(&shared_uuid);
5518				got_shared_uuid = true;
5519				devip->lu_name = shared_uuid;
5520			}
5521		}
5522		devip->sdbg_host = sdbg_host;
5523		if (sdeb_zbc_in_use) {
5524			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5525			if (sdebug_device_create_zones(devip)) {
5526				kfree(devip);
5527				return NULL;
5528			}
5529		} else {
5530			devip->zoned = false;
5531		}
5532		devip->create_ts = ktime_get_boottime();
5533		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5534		spin_lock_init(&devip->list_lock);
5535		INIT_LIST_HEAD(&devip->inject_err_list);
5536		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5537	}
5538	return devip;
5539}
5540
5541static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5542{
5543	struct sdebug_host_info *sdbg_host;
5544	struct sdebug_dev_info *open_devip = NULL;
5545	struct sdebug_dev_info *devip;
5546
5547	sdbg_host = shost_to_sdebug_host(sdev->host);
5548
 
 
 
5549	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5550		if ((devip->used) && (devip->channel == sdev->channel) &&
5551		    (devip->target == sdev->id) &&
5552		    (devip->lun == sdev->lun))
5553			return devip;
5554		else {
5555			if ((!devip->used) && (!open_devip))
5556				open_devip = devip;
5557		}
5558	}
5559	if (!open_devip) { /* try and make a new one */
5560		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5561		if (!open_devip) {
5562			pr_err("out of memory at line %d\n", __LINE__);
5563			return NULL;
5564		}
5565	}
5566
5567	open_devip->channel = sdev->channel;
5568	open_devip->target = sdev->id;
5569	open_devip->lun = sdev->lun;
5570	open_devip->sdbg_host = sdbg_host;
5571	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
 
5572	open_devip->used = true;
5573	return open_devip;
5574}
5575
5576static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5577{
5578	if (sdebug_verbose)
5579		pr_info("slave_alloc <%u %u %u %llu>\n",
5580		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5581
5582	return 0;
5583}
5584
5585static int scsi_debug_slave_configure(struct scsi_device *sdp)
5586{
5587	struct sdebug_dev_info *devip =
5588			(struct sdebug_dev_info *)sdp->hostdata;
5589	struct dentry *dentry;
5590
5591	if (sdebug_verbose)
5592		pr_info("slave_configure <%u %u %u %llu>\n",
5593		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5594	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5595		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5596	if (devip == NULL) {
5597		devip = find_build_dev_info(sdp);
5598		if (devip == NULL)
5599			return 1;  /* no resources, will be marked offline */
5600	}
5601	sdp->hostdata = devip;
 
5602	if (sdebug_no_uld)
5603		sdp->no_uld_attach = 1;
5604	config_cdb_len(sdp);
5605
5606	if (sdebug_allow_restart)
5607		sdp->allow_restart = 1;
5608
5609	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5610				sdebug_debugfs_root);
5611	if (IS_ERR_OR_NULL(devip->debugfs_entry))
5612		pr_info("%s: failed to create debugfs directory for device %s\n",
5613			__func__, dev_name(&sdp->sdev_gendev));
5614
5615	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5616				&sdebug_error_fops);
5617	if (IS_ERR_OR_NULL(dentry))
5618		pr_info("%s: failed to create error file for device %s\n",
5619			__func__, dev_name(&sdp->sdev_gendev));
5620
5621	return 0;
5622}
5623
5624static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5625{
5626	struct sdebug_dev_info *devip =
5627		(struct sdebug_dev_info *)sdp->hostdata;
5628	struct sdebug_err_inject *err;
5629
5630	if (sdebug_verbose)
5631		pr_info("slave_destroy <%u %u %u %llu>\n",
5632		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5633
5634	if (!devip)
5635		return;
5636
5637	spin_lock(&devip->list_lock);
5638	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5639		list_del_rcu(&err->list);
5640		call_rcu(&err->rcu, sdebug_err_free);
5641	}
5642	spin_unlock(&devip->list_lock);
5643
5644	debugfs_remove(devip->debugfs_entry);
5645
5646	/* make this slot available for re-use */
5647	devip->used = false;
5648	sdp->hostdata = NULL;
5649}
5650
5651/* Returns true if we require the queued memory to be freed by the caller. */
5652static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5653			   enum sdeb_defer_type defer_t)
5654{
5655	if (defer_t == SDEB_DEFER_HRT) {
5656		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5657
5658		switch (res) {
5659		case 0: /* Not active, it must have already run */
5660		case -1: /* -1 It's executing the CB */
5661			return false;
5662		case 1: /* Was active, we've now cancelled */
5663		default:
5664			return true;
5665		}
5666	} else if (defer_t == SDEB_DEFER_WQ) {
5667		/* Cancel if pending */
5668		if (cancel_work_sync(&sd_dp->ew.work))
5669			return true;
5670		/* Was not pending, so it must have run */
5671		return false;
5672	} else if (defer_t == SDEB_DEFER_POLL) {
5673		return true;
5674	}
5675
5676	return false;
5677}
5678
5679
5680static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
 
5681{
 
 
5682	enum sdeb_defer_type l_defer_t;
 
 
 
5683	struct sdebug_defer *sd_dp;
5684	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5685	struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5686
5687	lockdep_assert_held(&sdsc->lock);
5688
5689	if (!sqcp)
5690		return false;
5691	sd_dp = &sqcp->sd_dp;
5692	l_defer_t = READ_ONCE(sd_dp->defer_t);
5693	ASSIGN_QUEUED_CMD(cmnd, NULL);
5694
5695	if (stop_qc_helper(sd_dp, l_defer_t))
5696		sdebug_free_queued_cmd(sqcp);
5697
5698	return true;
5699}
5700
5701/*
5702 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5703 */
5704static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5705{
5706	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5707	unsigned long flags;
5708	bool res;
5709
5710	spin_lock_irqsave(&sdsc->lock, flags);
5711	res = scsi_debug_stop_cmnd(cmnd);
5712	spin_unlock_irqrestore(&sdsc->lock, flags);
5713
5714	return res;
5715}
5716
5717/*
5718 * All we can do is set the cmnd as internally aborted and wait for it to
5719 * finish. We cannot call scsi_done() as normal completion path may do that.
5720 */
5721static bool sdebug_stop_cmnd(struct request *rq, void *data)
5722{
5723	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5724
5725	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5726}
5727
5728/* Deletes (stops) timers or work queues of all queued commands */
5729static void stop_all_queued(void)
5730{
5731	struct sdebug_host_info *sdhp;
5732
5733	mutex_lock(&sdebug_host_list_mutex);
5734	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5735		struct Scsi_Host *shost = sdhp->shost;
 
 
5736
5737		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5738	}
5739	mutex_unlock(&sdebug_host_list_mutex);
5740}
5741
5742static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
 
5743{
5744	struct scsi_device *sdp = cmnd->device;
5745	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5746	struct sdebug_err_inject *err;
5747	unsigned char *cmd = cmnd->cmnd;
5748	int ret = 0;
5749
5750	if (devip == NULL)
5751		return 0;
5752
5753	rcu_read_lock();
5754	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5755		if (err->type == ERR_ABORT_CMD_FAILED &&
5756		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
5757			ret = !!err->cnt;
5758			if (err->cnt < 0)
5759				err->cnt++;
5760
5761			rcu_read_unlock();
5762			return ret;
 
 
 
5763		}
5764	}
5765	rcu_read_unlock();
5766
5767	return 0;
5768}
5769
5770static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5771{
5772	bool ok = scsi_debug_abort_cmnd(SCpnt);
5773	u8 *cmd = SCpnt->cmnd;
5774	u8 opcode = cmd[0];
5775
5776	++num_aborts;
5777
5778	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5779		sdev_printk(KERN_INFO, SCpnt->device,
5780			    "%s: command%s found\n", __func__,
5781			    ok ? "" : " not");
5782
5783	if (sdebug_fail_abort(SCpnt)) {
5784		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5785			    opcode);
5786		return FAILED;
5787	}
5788
5789	return SUCCESS;
5790}
5791
5792static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5793{
5794	struct scsi_device *sdp = data;
5795	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5796
5797	if (scmd->device == sdp)
5798		scsi_debug_abort_cmnd(scmd);
5799
5800	return true;
5801}
5802
5803/* Deletes (stops) timers or work queues of all queued commands per sdev */
5804static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5805{
5806	struct Scsi_Host *shost = sdp->host;
5807
5808	blk_mq_tagset_busy_iter(&shost->tag_set,
5809				scsi_debug_stop_all_queued_iter, sdp);
5810}
5811
5812static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5813{
5814	struct scsi_device *sdp = cmnd->device;
5815	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5816	struct sdebug_err_inject *err;
5817	unsigned char *cmd = cmnd->cmnd;
5818	int ret = 0;
5819
5820	if (devip == NULL)
5821		return 0;
5822
5823	rcu_read_lock();
5824	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5825		if (err->type == ERR_LUN_RESET_FAILED &&
5826		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
5827			ret = !!err->cnt;
5828			if (err->cnt < 0)
5829				err->cnt++;
5830
5831			rcu_read_unlock();
5832			return ret;
5833		}
5834	}
5835	rcu_read_unlock();
5836
5837	return 0;
5838}
5839
5840static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5841{
5842	struct scsi_device *sdp = SCpnt->device;
5843	struct sdebug_dev_info *devip = sdp->hostdata;
5844	u8 *cmd = SCpnt->cmnd;
5845	u8 opcode = cmd[0];
5846
5847	++num_dev_resets;
5848
5849	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5850		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5851
5852	scsi_debug_stop_all_queued(sdp);
5853	if (devip)
5854		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5855
5856	if (sdebug_fail_lun_reset(SCpnt)) {
5857		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5858		return FAILED;
5859	}
5860
5861	return SUCCESS;
5862}
5863
5864static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5865{
5866	struct scsi_target *starget = scsi_target(cmnd->device);
5867	struct sdebug_target_info *targetip =
5868		(struct sdebug_target_info *)starget->hostdata;
5869
5870	if (targetip)
5871		return targetip->reset_fail;
5872
5873	return 0;
5874}
5875
5876static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5877{
5878	struct scsi_device *sdp = SCpnt->device;
5879	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5880	struct sdebug_dev_info *devip;
5881	u8 *cmd = SCpnt->cmnd;
5882	u8 opcode = cmd[0];
5883	int k = 0;
5884
5885	++num_target_resets;
 
 
 
 
 
5886	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5887		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5888
5889	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5890		if (devip->target == sdp->id) {
5891			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5892			++k;
5893		}
 
 
 
 
 
 
5894	}
5895
5896	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5897		sdev_printk(KERN_INFO, sdp,
5898			    "%s: %d device(s) found in target\n", __func__, k);
5899
5900	if (sdebug_fail_target_reset(SCpnt)) {
5901		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5902			    opcode);
5903		return FAILED;
5904	}
5905
5906	return SUCCESS;
5907}
5908
5909static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5910{
5911	struct scsi_device *sdp = SCpnt->device;
5912	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5913	struct sdebug_dev_info *devip;
 
 
5914	int k = 0;
5915
5916	++num_bus_resets;
5917
 
 
5918	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5919		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5920
5921	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5922		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5923		++k;
 
 
 
 
 
 
 
5924	}
5925
5926	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5927		sdev_printk(KERN_INFO, sdp,
5928			    "%s: %d device(s) found in host\n", __func__, k);
 
5929	return SUCCESS;
5930}
5931
5932static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5933{
5934	struct sdebug_host_info *sdbg_host;
5935	struct sdebug_dev_info *devip;
5936	int k = 0;
5937
5938	++num_host_resets;
5939	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5940		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5941	mutex_lock(&sdebug_host_list_mutex);
5942	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5943		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5944				    dev_list) {
5945			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5946			++k;
5947		}
5948	}
5949	mutex_unlock(&sdebug_host_list_mutex);
5950	stop_all_queued();
5951	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5952		sdev_printk(KERN_INFO, SCpnt->device,
5953			    "%s: %d device(s) found\n", __func__, k);
5954	return SUCCESS;
5955}
5956
5957static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
 
5958{
5959	struct msdos_partition *pp;
5960	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5961	int sectors_per_part, num_sectors, k;
5962	int heads_by_sects, start_sec, end_sec;
5963
5964	/* assume partition table already zeroed */
5965	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5966		return;
5967	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5968		sdebug_num_parts = SDEBUG_MAX_PARTS;
5969		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5970	}
5971	num_sectors = (int)get_sdebug_capacity();
5972	sectors_per_part = (num_sectors - sdebug_sectors_per)
5973			   / sdebug_num_parts;
5974	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5975	starts[0] = sdebug_sectors_per;
5976	max_part_secs = sectors_per_part;
5977	for (k = 1; k < sdebug_num_parts; ++k) {
5978		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5979			    * heads_by_sects;
5980		if (starts[k] - starts[k - 1] < max_part_secs)
5981			max_part_secs = starts[k] - starts[k - 1];
5982	}
5983	starts[sdebug_num_parts] = num_sectors;
5984	starts[sdebug_num_parts + 1] = 0;
5985
5986	ramp[510] = 0x55;	/* magic partition markings */
5987	ramp[511] = 0xAA;
5988	pp = (struct msdos_partition *)(ramp + 0x1be);
5989	for (k = 0; starts[k + 1]; ++k, ++pp) {
5990		start_sec = starts[k];
5991		end_sec = starts[k] + max_part_secs - 1;
5992		pp->boot_ind = 0;
5993
5994		pp->cyl = start_sec / heads_by_sects;
5995		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5996			   / sdebug_sectors_per;
5997		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5998
5999		pp->end_cyl = end_sec / heads_by_sects;
6000		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
6001			       / sdebug_sectors_per;
6002		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
6003
6004		pp->start_sect = cpu_to_le32(start_sec);
6005		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
6006		pp->sys_ind = 0x83;	/* plain Linux partition */
6007	}
6008}
6009
6010static void block_unblock_all_queues(bool block)
6011{
6012	struct sdebug_host_info *sdhp;
6013
6014	lockdep_assert_held(&sdebug_host_list_mutex);
6015
6016	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6017		struct Scsi_Host *shost = sdhp->shost;
6018
6019		if (block)
6020			scsi_block_requests(shost);
6021		else
6022			scsi_unblock_requests(shost);
6023	}
6024}
6025
6026/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6027 * commands will be processed normally before triggers occur.
6028 */
6029static void tweak_cmnd_count(void)
6030{
6031	int count, modulo;
6032
6033	modulo = abs(sdebug_every_nth);
6034	if (modulo < 2)
6035		return;
6036
6037	mutex_lock(&sdebug_host_list_mutex);
6038	block_unblock_all_queues(true);
6039	count = atomic_read(&sdebug_cmnd_count);
6040	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
6041	block_unblock_all_queues(false);
6042	mutex_unlock(&sdebug_host_list_mutex);
6043}
6044
6045static void clear_queue_stats(void)
6046{
6047	atomic_set(&sdebug_cmnd_count, 0);
6048	atomic_set(&sdebug_completions, 0);
6049	atomic_set(&sdebug_miss_cpus, 0);
6050	atomic_set(&sdebug_a_tsf, 0);
6051}
6052
6053static bool inject_on_this_cmd(void)
6054{
6055	if (sdebug_every_nth == 0)
6056		return false;
6057	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6058}
6059
6060#define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
6061
6062
6063void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
6064{
6065	if (sqcp)
6066		kmem_cache_free(queued_cmd_cache, sqcp);
6067}
6068
6069static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
6070{
6071	struct sdebug_queued_cmd *sqcp;
6072	struct sdebug_defer *sd_dp;
6073
6074	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
6075	if (!sqcp)
6076		return NULL;
6077
6078	sd_dp = &sqcp->sd_dp;
6079
6080	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6081	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
6082	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
6083
6084	sqcp->scmd = scmd;
6085
6086	return sqcp;
6087}
6088
6089/* Complete the processing of the thread that queued a SCSI command to this
6090 * driver. It either completes the command by calling cmnd_done() or
6091 * schedules a hr timer or work queue then returns 0. Returns
6092 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
6093 */
6094static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
6095			 int scsi_result,
6096			 int (*pfp)(struct scsi_cmnd *,
6097				    struct sdebug_dev_info *),
6098			 int delta_jiff, int ndelay)
6099{
6100	struct request *rq = scsi_cmd_to_rq(cmnd);
6101	bool polled = rq->cmd_flags & REQ_POLLED;
6102	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6103	unsigned long flags;
6104	u64 ns_from_boot = 0;
6105	struct sdebug_queued_cmd *sqcp;
6106	struct scsi_device *sdp;
6107	struct sdebug_defer *sd_dp;
6108
6109	if (unlikely(devip == NULL)) {
6110		if (scsi_result == 0)
6111			scsi_result = DID_NO_CONNECT << 16;
6112		goto respond_in_thread;
6113	}
6114	sdp = cmnd->device;
6115
6116	if (delta_jiff == 0)
6117		goto respond_in_thread;
6118
6119
6120	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6121		     (scsi_result == 0))) {
6122		int num_in_q = scsi_device_busy(sdp);
6123		int qdepth = cmnd->device->queue_depth;
6124
6125		if ((num_in_q == qdepth) &&
 
 
 
 
 
 
 
 
 
 
 
 
 
6126		    (atomic_inc_return(&sdebug_a_tsf) >=
6127		     abs(sdebug_every_nth))) {
6128			atomic_set(&sdebug_a_tsf, 0);
 
6129			scsi_result = device_qfull_result;
6130
6131			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6132				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6133					    __func__, num_in_q);
6134		}
6135	}
6136
6137	sqcp = sdebug_alloc_queued_cmd(cmnd);
6138	if (!sqcp) {
6139		pr_err("%s no alloc\n", __func__);
6140		return SCSI_MLQUEUE_HOST_BUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6141	}
6142	sd_dp = &sqcp->sd_dp;
6143
6144	if (polled)
6145		ns_from_boot = ktime_get_boottime_ns();
6146
6147	/* one of the resp_*() response functions is called here */
6148	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6149	if (cmnd->result & SDEG_RES_IMMED_MASK) {
 
 
 
6150		cmnd->result &= ~SDEG_RES_IMMED_MASK;
6151		delta_jiff = ndelay = 0;
6152	}
6153	if (cmnd->result == 0 && scsi_result != 0)
6154		cmnd->result = scsi_result;
6155	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6156		if (atomic_read(&sdeb_inject_pending)) {
6157			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6158			atomic_set(&sdeb_inject_pending, 0);
6159			cmnd->result = check_condition_result;
6160		}
6161	}
6162
6163	if (unlikely(sdebug_verbose && cmnd->result))
6164		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6165			    __func__, cmnd->result);
6166
6167	if (delta_jiff > 0 || ndelay > 0) {
6168		ktime_t kt;
6169
6170		if (delta_jiff > 0) {
6171			u64 ns = jiffies_to_nsecs(delta_jiff);
6172
6173			if (sdebug_random && ns < U32_MAX) {
6174				ns = get_random_u32_below((u32)ns);
6175			} else if (sdebug_random) {
6176				ns >>= 12;	/* scale to 4 usec precision */
6177				if (ns < U32_MAX)	/* over 4 hours max */
6178					ns = get_random_u32_below((u32)ns);
6179				ns <<= 12;
6180			}
6181			kt = ns_to_ktime(ns);
6182		} else {	/* ndelay has a 4.2 second max */
6183			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6184					     (u32)ndelay;
6185			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6186				u64 d = ktime_get_boottime_ns() - ns_from_boot;
6187
6188				if (kt <= d) {	/* elapsed duration >= kt */
6189					/* call scsi_done() from this thread */
6190					sdebug_free_queued_cmd(sqcp);
6191					scsi_done(cmnd);
6192					return 0;
6193				}
6194				/* otherwise reduce kt by elapsed time */
6195				kt -= d;
6196			}
6197		}
6198		if (sdebug_statistics)
6199			sd_dp->issuing_cpu = raw_smp_processor_id();
6200		if (polled) {
6201			spin_lock_irqsave(&sdsc->lock, flags);
6202			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6203			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6204			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6205			spin_unlock_irqrestore(&sdsc->lock, flags);
6206		} else {
6207			/* schedule the invocation of scsi_done() for a later time */
6208			spin_lock_irqsave(&sdsc->lock, flags);
6209			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6210			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6211			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6212			/*
6213			 * The completion handler will try to grab sqcp->lock,
6214			 * so there is no chance that the completion handler
6215			 * will call scsi_done() until we release the lock
6216			 * here (so ok to keep referencing sdsc).
6217			 */
6218			spin_unlock_irqrestore(&sdsc->lock, flags);
6219		}
6220	} else {	/* jdelay < 0, use work queue */
6221		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6222			     atomic_read(&sdeb_inject_pending))) {
6223			sd_dp->aborted = true;
6224			atomic_set(&sdeb_inject_pending, 0);
6225			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6226				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6227		}
6228
6229		if (sdebug_statistics)
6230			sd_dp->issuing_cpu = raw_smp_processor_id();
6231		if (polled) {
6232			spin_lock_irqsave(&sdsc->lock, flags);
6233			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6234			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6235			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6236			spin_unlock_irqrestore(&sdsc->lock, flags);
6237		} else {
6238			spin_lock_irqsave(&sdsc->lock, flags);
6239			ASSIGN_QUEUED_CMD(cmnd, sqcp);
6240			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6241			schedule_work(&sd_dp->ew.work);
6242			spin_unlock_irqrestore(&sdsc->lock, flags);
6243		}
6244	}
6245
 
 
 
 
 
6246	return 0;
6247
6248respond_in_thread:	/* call back to mid-layer using invocation thread */
6249	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6250	cmnd->result &= ~SDEG_RES_IMMED_MASK;
6251	if (cmnd->result == 0 && scsi_result != 0)
6252		cmnd->result = scsi_result;
6253	scsi_done(cmnd);
6254	return 0;
6255}
6256
6257/* Note: The following macros create attribute files in the
6258   /sys/module/scsi_debug/parameters directory. Unfortunately this
6259   driver is unaware of a change and cannot trigger auxiliary actions
6260   as it can when the corresponding attribute in the
6261   /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6262 */
6263module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6264module_param_named(ato, sdebug_ato, int, S_IRUGO);
6265module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6266module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6267module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6268module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6269module_param_named(dif, sdebug_dif, int, S_IRUGO);
6270module_param_named(dix, sdebug_dix, int, S_IRUGO);
6271module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6272module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6273module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6274module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6275module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6276module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
 
6277module_param_string(inq_product, sdebug_inq_product_id,
6278		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6279module_param_string(inq_rev, sdebug_inq_product_rev,
6280		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6281module_param_string(inq_vendor, sdebug_inq_vendor_id,
6282		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6283module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6284module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6285module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6286module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
 
6287module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6288module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6289module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6290module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6291module_param_named(medium_error_count, sdebug_medium_error_count, int,
6292		   S_IRUGO | S_IWUSR);
6293module_param_named(medium_error_start, sdebug_medium_error_start, int,
6294		   S_IRUGO | S_IWUSR);
6295module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6296module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6297module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6298module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6299module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6300module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6301module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6302module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6303module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6304module_param_named(per_host_store, sdebug_per_host_store, bool,
6305		   S_IRUGO | S_IWUSR);
6306module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
 
6307module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6308module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6309module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6310module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6311module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6312module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6313module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6314module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6315module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6316module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6317module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6318module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6319module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6320module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6321module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6322module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
 
6323module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6324		   S_IRUGO | S_IWUSR);
6325module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6326module_param_named(write_same_length, sdebug_write_same_length, int,
6327		   S_IRUGO | S_IWUSR);
6328module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6329module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6330module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6331module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6332module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6333module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6334
6335MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6336MODULE_DESCRIPTION("SCSI debug adapter driver");
6337MODULE_LICENSE("GPL");
6338MODULE_VERSION(SDEBUG_VERSION);
6339
6340MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6341MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6342MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6343MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6344MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6345MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6346MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6347MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6348MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6349MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6350MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6351MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6352MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6353MODULE_PARM_DESC(host_max_queue,
6354		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6355MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6356MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6357		 SDEBUG_VERSION "\")");
6358MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6359MODULE_PARM_DESC(lbprz,
6360		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6361MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6362MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6363MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
 
 
6364MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6365MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6366MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6367MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6368MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6369MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
 
6370MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6371MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6372MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6373MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6374MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6375MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6376MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6377MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6378MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6379MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6380MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6381MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6382MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6383MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6384MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6385MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6386MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6387MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6388MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6389MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6390MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6391MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6392MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6393MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6394MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6395MODULE_PARM_DESC(uuid_ctl,
6396		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6397MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6398MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6399MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6400MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6401MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6402MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6403MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6404MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6405MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6406MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6407
6408#define SDEBUG_INFO_LEN 256
6409static char sdebug_info[SDEBUG_INFO_LEN];
6410
6411static const char *scsi_debug_info(struct Scsi_Host *shp)
6412{
6413	int k;
6414
6415	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6416		      my_name, SDEBUG_VERSION, sdebug_version_date);
6417	if (k >= (SDEBUG_INFO_LEN - 1))
6418		return sdebug_info;
6419	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6420		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6421		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
6422		  "statistics", (int)sdebug_statistics);
6423	return sdebug_info;
6424}
6425
6426/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6427static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6428				 int length)
6429{
6430	char arr[16];
6431	int opts;
6432	int minLen = length > 15 ? 15 : length;
6433
6434	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6435		return -EACCES;
6436	memcpy(arr, buffer, minLen);
6437	arr[minLen] = '\0';
6438	if (1 != sscanf(arr, "%d", &opts))
6439		return -EINVAL;
6440	sdebug_opts = opts;
6441	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6442	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6443	if (sdebug_every_nth != 0)
6444		tweak_cmnd_count();
6445	return length;
6446}
6447
6448struct sdebug_submit_queue_data {
6449	int *first;
6450	int *last;
6451	int queue_num;
6452};
6453
6454static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6455{
6456	struct sdebug_submit_queue_data *data = opaque;
6457	u32 unique_tag = blk_mq_unique_tag(rq);
6458	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6459	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6460	int queue_num = data->queue_num;
6461
6462	if (hwq != queue_num)
6463		return true;
6464
6465	/* Rely on iter'ing in ascending tag order */
6466	if (*data->first == -1)
6467		*data->first = *data->last = tag;
6468	else
6469		*data->last = tag;
6470
6471	return true;
6472}
6473
6474/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6475 * same for each scsi_debug host (if more than one). Some of the counters
6476 * output are not atomics so might be inaccurate in a busy system. */
6477static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6478{
6479	struct sdebug_host_info *sdhp;
6480	int j;
6481
6482	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6483		   SDEBUG_VERSION, sdebug_version_date);
6484	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6485		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6486		   sdebug_opts, sdebug_every_nth);
6487	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6488		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6489		   sdebug_sector_size, "bytes");
6490	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6491		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6492		   num_aborts);
6493	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6494		   num_dev_resets, num_target_resets, num_bus_resets,
6495		   num_host_resets);
6496	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6497		   dix_reads, dix_writes, dif_errors);
6498	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6499		   sdebug_statistics);
6500	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6501		   atomic_read(&sdebug_cmnd_count),
6502		   atomic_read(&sdebug_completions),
6503		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6504		   atomic_read(&sdebug_a_tsf),
6505		   atomic_read(&sdeb_mq_poll_count));
6506
6507	seq_printf(m, "submit_queues=%d\n", submit_queues);
6508	for (j = 0; j < submit_queues; ++j) {
6509		int f = -1, l = -1;
6510		struct sdebug_submit_queue_data data = {
6511			.queue_num = j,
6512			.first = &f,
6513			.last = &l,
6514		};
6515		seq_printf(m, "  queue %d:\n", j);
6516		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6517					&data);
6518		if (f >= 0) {
6519			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6520				   "first,last bits", f, l);
6521		}
6522	}
6523
6524	seq_printf(m, "this host_no=%d\n", host->host_no);
6525	if (!xa_empty(per_store_ap)) {
6526		bool niu;
6527		int idx;
6528		unsigned long l_idx;
6529		struct sdeb_store_info *sip;
6530
6531		seq_puts(m, "\nhost list:\n");
6532		j = 0;
6533		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6534			idx = sdhp->si_idx;
6535			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6536				   sdhp->shost->host_no, idx);
6537			++j;
6538		}
6539		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6540			   sdeb_most_recent_idx);
6541		j = 0;
6542		xa_for_each(per_store_ap, l_idx, sip) {
6543			niu = xa_get_mark(per_store_ap, l_idx,
6544					  SDEB_XA_NOT_IN_USE);
6545			idx = (int)l_idx;
6546			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6547				   (niu ? "  not_in_use" : ""));
6548			++j;
6549		}
6550	}
6551	return 0;
6552}
6553
6554static ssize_t delay_show(struct device_driver *ddp, char *buf)
6555{
6556	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6557}
6558/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6559 * of delay is jiffies.
6560 */
6561static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6562			   size_t count)
6563{
6564	int jdelay, res;
6565
6566	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6567		res = count;
6568		if (sdebug_jdelay != jdelay) {
6569			struct sdebug_host_info *sdhp;
 
6570
6571			mutex_lock(&sdebug_host_list_mutex);
6572			block_unblock_all_queues(true);
6573
6574			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6575				struct Scsi_Host *shost = sdhp->shost;
6576
6577				if (scsi_host_busy(shost)) {
6578					res = -EBUSY;   /* queued commands */
6579					break;
6580				}
6581			}
6582			if (res > 0) {
6583				sdebug_jdelay = jdelay;
6584				sdebug_ndelay = 0;
6585			}
6586			block_unblock_all_queues(false);
6587			mutex_unlock(&sdebug_host_list_mutex);
6588		}
6589		return res;
6590	}
6591	return -EINVAL;
6592}
6593static DRIVER_ATTR_RW(delay);
6594
6595static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6596{
6597	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6598}
6599/* Returns -EBUSY if ndelay is being changed and commands are queued */
6600/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6601static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6602			    size_t count)
6603{
6604	int ndelay, res;
6605
6606	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6607	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6608		res = count;
6609		if (sdebug_ndelay != ndelay) {
6610			struct sdebug_host_info *sdhp;
 
6611
6612			mutex_lock(&sdebug_host_list_mutex);
6613			block_unblock_all_queues(true);
6614
6615			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6616				struct Scsi_Host *shost = sdhp->shost;
6617
6618				if (scsi_host_busy(shost)) {
6619					res = -EBUSY;   /* queued commands */
6620					break;
6621				}
6622			}
6623
6624			if (res > 0) {
6625				sdebug_ndelay = ndelay;
6626				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6627							: DEF_JDELAY;
6628			}
6629			block_unblock_all_queues(false);
6630			mutex_unlock(&sdebug_host_list_mutex);
6631		}
6632		return res;
6633	}
6634	return -EINVAL;
6635}
6636static DRIVER_ATTR_RW(ndelay);
6637
6638static ssize_t opts_show(struct device_driver *ddp, char *buf)
6639{
6640	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6641}
6642
6643static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6644			  size_t count)
6645{
6646	int opts;
6647	char work[20];
6648
6649	if (sscanf(buf, "%10s", work) == 1) {
6650		if (strncasecmp(work, "0x", 2) == 0) {
6651			if (kstrtoint(work + 2, 16, &opts) == 0)
6652				goto opts_done;
6653		} else {
6654			if (kstrtoint(work, 10, &opts) == 0)
6655				goto opts_done;
6656		}
6657	}
6658	return -EINVAL;
6659opts_done:
6660	sdebug_opts = opts;
6661	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6662	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6663	tweak_cmnd_count();
6664	return count;
6665}
6666static DRIVER_ATTR_RW(opts);
6667
6668static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6669{
6670	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6671}
6672static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6673			   size_t count)
6674{
6675	int n;
6676
6677	/* Cannot change from or to TYPE_ZBC with sysfs */
6678	if (sdebug_ptype == TYPE_ZBC)
6679		return -EINVAL;
6680
6681	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6682		if (n == TYPE_ZBC)
6683			return -EINVAL;
6684		sdebug_ptype = n;
6685		return count;
6686	}
6687	return -EINVAL;
6688}
6689static DRIVER_ATTR_RW(ptype);
6690
6691static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6692{
6693	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6694}
6695static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6696			    size_t count)
6697{
6698	int n;
6699
6700	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6701		sdebug_dsense = n;
6702		return count;
6703	}
6704	return -EINVAL;
6705}
6706static DRIVER_ATTR_RW(dsense);
6707
6708static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6709{
6710	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6711}
6712static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6713			     size_t count)
6714{
6715	int n, idx;
6716
6717	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6718		bool want_store = (n == 0);
6719		struct sdebug_host_info *sdhp;
6720
6721		n = (n > 0);
6722		sdebug_fake_rw = (sdebug_fake_rw > 0);
6723		if (sdebug_fake_rw == n)
6724			return count;	/* not transitioning so do nothing */
6725
6726		if (want_store) {	/* 1 --> 0 transition, set up store */
6727			if (sdeb_first_idx < 0) {
6728				idx = sdebug_add_store();
6729				if (idx < 0)
6730					return idx;
6731			} else {
6732				idx = sdeb_first_idx;
6733				xa_clear_mark(per_store_ap, idx,
6734					      SDEB_XA_NOT_IN_USE);
6735			}
6736			/* make all hosts use same store */
6737			list_for_each_entry(sdhp, &sdebug_host_list,
6738					    host_list) {
6739				if (sdhp->si_idx != idx) {
6740					xa_set_mark(per_store_ap, sdhp->si_idx,
6741						    SDEB_XA_NOT_IN_USE);
6742					sdhp->si_idx = idx;
6743				}
 
6744			}
6745			sdeb_most_recent_idx = idx;
6746		} else {	/* 0 --> 1 transition is trigger for shrink */
6747			sdebug_erase_all_stores(true /* apart from first */);
6748		}
6749		sdebug_fake_rw = n;
6750		return count;
6751	}
6752	return -EINVAL;
6753}
6754static DRIVER_ATTR_RW(fake_rw);
6755
6756static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6757{
6758	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6759}
6760static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6761			      size_t count)
6762{
6763	int n;
6764
6765	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6766		sdebug_no_lun_0 = n;
6767		return count;
6768	}
6769	return -EINVAL;
6770}
6771static DRIVER_ATTR_RW(no_lun_0);
6772
6773static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6774{
6775	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6776}
6777static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6778			      size_t count)
6779{
6780	int n;
6781
6782	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6783		sdebug_num_tgts = n;
6784		sdebug_max_tgts_luns();
6785		return count;
6786	}
6787	return -EINVAL;
6788}
6789static DRIVER_ATTR_RW(num_tgts);
6790
6791static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6792{
6793	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6794}
6795static DRIVER_ATTR_RO(dev_size_mb);
6796
6797static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6798{
6799	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6800}
6801
6802static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6803				    size_t count)
6804{
6805	bool v;
6806
6807	if (kstrtobool(buf, &v))
6808		return -EINVAL;
6809
6810	sdebug_per_host_store = v;
6811	return count;
6812}
6813static DRIVER_ATTR_RW(per_host_store);
6814
6815static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6816{
6817	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6818}
6819static DRIVER_ATTR_RO(num_parts);
6820
6821static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6822{
6823	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6824}
6825static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6826			       size_t count)
6827{
6828	int nth;
6829	char work[20];
6830
6831	if (sscanf(buf, "%10s", work) == 1) {
6832		if (strncasecmp(work, "0x", 2) == 0) {
6833			if (kstrtoint(work + 2, 16, &nth) == 0)
6834				goto every_nth_done;
6835		} else {
6836			if (kstrtoint(work, 10, &nth) == 0)
6837				goto every_nth_done;
6838		}
6839	}
6840	return -EINVAL;
6841
6842every_nth_done:
6843	sdebug_every_nth = nth;
6844	if (nth && !sdebug_statistics) {
6845		pr_info("every_nth needs statistics=1, set it\n");
6846		sdebug_statistics = true;
6847	}
6848	tweak_cmnd_count();
6849	return count;
6850}
6851static DRIVER_ATTR_RW(every_nth);
6852
6853static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6854{
6855	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6856}
6857static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6858				size_t count)
6859{
6860	int n;
6861	bool changed;
6862
6863	if (kstrtoint(buf, 0, &n))
6864		return -EINVAL;
6865	if (n >= 0) {
6866		if (n > (int)SAM_LUN_AM_FLAT) {
6867			pr_warn("only LUN address methods 0 and 1 are supported\n");
6868			return -EINVAL;
6869		}
6870		changed = ((int)sdebug_lun_am != n);
6871		sdebug_lun_am = n;
6872		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6873			struct sdebug_host_info *sdhp;
6874			struct sdebug_dev_info *dp;
6875
6876			mutex_lock(&sdebug_host_list_mutex);
6877			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6878				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6879					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6880				}
6881			}
6882			mutex_unlock(&sdebug_host_list_mutex);
6883		}
 
6884		return count;
6885	}
6886	return -EINVAL;
6887}
6888static DRIVER_ATTR_RW(lun_format);
6889
6890static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6891{
6892	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6893}
6894static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6895			      size_t count)
6896{
6897	int n;
6898	bool changed;
6899
6900	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6901		if (n > 256) {
6902			pr_warn("max_luns can be no more than 256\n");
6903			return -EINVAL;
6904		}
6905		changed = (sdebug_max_luns != n);
6906		sdebug_max_luns = n;
6907		sdebug_max_tgts_luns();
6908		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6909			struct sdebug_host_info *sdhp;
6910			struct sdebug_dev_info *dp;
6911
6912			mutex_lock(&sdebug_host_list_mutex);
6913			list_for_each_entry(sdhp, &sdebug_host_list,
6914					    host_list) {
6915				list_for_each_entry(dp, &sdhp->dev_info_list,
6916						    dev_list) {
6917					set_bit(SDEBUG_UA_LUNS_CHANGED,
6918						dp->uas_bm);
6919				}
6920			}
6921			mutex_unlock(&sdebug_host_list_mutex);
6922		}
6923		return count;
6924	}
6925	return -EINVAL;
6926}
6927static DRIVER_ATTR_RW(max_luns);
6928
6929static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6930{
6931	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6932}
6933/* N.B. max_queue can be changed while there are queued commands. In flight
6934 * commands beyond the new max_queue will be completed. */
6935static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6936			       size_t count)
6937{
6938	int n;
 
6939
6940	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6941	    (n <= SDEBUG_CANQUEUE) &&
6942	    (sdebug_host_max_queue == 0)) {
6943		mutex_lock(&sdebug_host_list_mutex);
6944
6945		/* We may only change sdebug_max_queue when we have no shosts */
6946		if (list_empty(&sdebug_host_list))
6947			sdebug_max_queue = n;
 
 
 
 
 
 
 
6948		else
6949			count = -EBUSY;
6950		mutex_unlock(&sdebug_host_list_mutex);
6951		return count;
6952	}
6953	return -EINVAL;
6954}
6955static DRIVER_ATTR_RW(max_queue);
6956
6957static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6958{
6959	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6960}
6961
6962static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6963{
6964	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6965}
6966
6967static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6968{
6969	bool v;
6970
6971	if (kstrtobool(buf, &v))
6972		return -EINVAL;
6973
6974	sdebug_no_rwlock = v;
6975	return count;
6976}
6977static DRIVER_ATTR_RW(no_rwlock);
6978
6979/*
6980 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6981 * in range [0, sdebug_host_max_queue), we can't change it.
6982 */
6983static DRIVER_ATTR_RO(host_max_queue);
6984
6985static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6986{
6987	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6988}
6989static DRIVER_ATTR_RO(no_uld);
6990
6991static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6992{
6993	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6994}
6995static DRIVER_ATTR_RO(scsi_level);
6996
6997static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6998{
6999	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
7000}
7001static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
7002				size_t count)
7003{
7004	int n;
7005	bool changed;
7006
7007	/* Ignore capacity change for ZBC drives for now */
7008	if (sdeb_zbc_in_use)
7009		return -ENOTSUPP;
7010
7011	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7012		changed = (sdebug_virtual_gb != n);
7013		sdebug_virtual_gb = n;
7014		sdebug_capacity = get_sdebug_capacity();
7015		if (changed) {
7016			struct sdebug_host_info *sdhp;
7017			struct sdebug_dev_info *dp;
7018
7019			mutex_lock(&sdebug_host_list_mutex);
7020			list_for_each_entry(sdhp, &sdebug_host_list,
7021					    host_list) {
7022				list_for_each_entry(dp, &sdhp->dev_info_list,
7023						    dev_list) {
7024					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
7025						dp->uas_bm);
7026				}
7027			}
7028			mutex_unlock(&sdebug_host_list_mutex);
7029		}
7030		return count;
7031	}
7032	return -EINVAL;
7033}
7034static DRIVER_ATTR_RW(virtual_gb);
7035
7036static ssize_t add_host_show(struct device_driver *ddp, char *buf)
7037{
7038	/* absolute number of hosts currently active is what is shown */
7039	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
7040}
7041
 
 
 
7042static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
7043			      size_t count)
7044{
7045	bool found;
7046	unsigned long idx;
7047	struct sdeb_store_info *sip;
7048	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
7049	int delta_hosts;
7050
7051	if (sscanf(buf, "%d", &delta_hosts) != 1)
7052		return -EINVAL;
7053	if (delta_hosts > 0) {
7054		do {
7055			found = false;
7056			if (want_phs) {
7057				xa_for_each_marked(per_store_ap, idx, sip,
7058						   SDEB_XA_NOT_IN_USE) {
7059					sdeb_most_recent_idx = (int)idx;
7060					found = true;
7061					break;
7062				}
7063				if (found)	/* re-use case */
7064					sdebug_add_host_helper((int)idx);
7065				else
7066					sdebug_do_add_host(true);
7067			} else {
7068				sdebug_do_add_host(false);
7069			}
7070		} while (--delta_hosts);
7071	} else if (delta_hosts < 0) {
7072		do {
7073			sdebug_do_remove_host(false);
7074		} while (++delta_hosts);
7075	}
7076	return count;
7077}
7078static DRIVER_ATTR_RW(add_host);
7079
7080static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
7081{
7082	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
7083}
7084static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
7085				    size_t count)
7086{
7087	int n;
7088
7089	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7090		sdebug_vpd_use_hostno = n;
7091		return count;
7092	}
7093	return -EINVAL;
7094}
7095static DRIVER_ATTR_RW(vpd_use_hostno);
7096
7097static ssize_t statistics_show(struct device_driver *ddp, char *buf)
7098{
7099	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
7100}
7101static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
7102				size_t count)
7103{
7104	int n;
7105
7106	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
7107		if (n > 0)
7108			sdebug_statistics = true;
7109		else {
7110			clear_queue_stats();
7111			sdebug_statistics = false;
7112		}
7113		return count;
7114	}
7115	return -EINVAL;
7116}
7117static DRIVER_ATTR_RW(statistics);
7118
7119static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
7120{
7121	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7122}
7123static DRIVER_ATTR_RO(sector_size);
7124
7125static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7126{
7127	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7128}
7129static DRIVER_ATTR_RO(submit_queues);
7130
7131static ssize_t dix_show(struct device_driver *ddp, char *buf)
7132{
7133	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7134}
7135static DRIVER_ATTR_RO(dix);
7136
7137static ssize_t dif_show(struct device_driver *ddp, char *buf)
7138{
7139	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7140}
7141static DRIVER_ATTR_RO(dif);
7142
7143static ssize_t guard_show(struct device_driver *ddp, char *buf)
7144{
7145	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7146}
7147static DRIVER_ATTR_RO(guard);
7148
7149static ssize_t ato_show(struct device_driver *ddp, char *buf)
7150{
7151	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7152}
7153static DRIVER_ATTR_RO(ato);
7154
7155static ssize_t map_show(struct device_driver *ddp, char *buf)
7156{
7157	ssize_t count = 0;
7158
7159	if (!scsi_debug_lbp())
7160		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7161				 sdebug_store_sectors);
7162
7163	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7164		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7165
7166		if (sip)
7167			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7168					  (int)map_size, sip->map_storep);
7169	}
7170	buf[count++] = '\n';
7171	buf[count] = '\0';
7172
7173	return count;
7174}
7175static DRIVER_ATTR_RO(map);
7176
7177static ssize_t random_show(struct device_driver *ddp, char *buf)
7178{
7179	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7180}
7181
7182static ssize_t random_store(struct device_driver *ddp, const char *buf,
7183			    size_t count)
7184{
7185	bool v;
7186
7187	if (kstrtobool(buf, &v))
7188		return -EINVAL;
7189
7190	sdebug_random = v;
7191	return count;
7192}
7193static DRIVER_ATTR_RW(random);
7194
7195static ssize_t removable_show(struct device_driver *ddp, char *buf)
7196{
7197	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7198}
7199static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7200			       size_t count)
7201{
7202	int n;
7203
7204	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7205		sdebug_removable = (n > 0);
7206		return count;
7207	}
7208	return -EINVAL;
7209}
7210static DRIVER_ATTR_RW(removable);
7211
7212static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7213{
7214	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7215}
7216/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7217static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7218			       size_t count)
7219{
7220	int n;
7221
7222	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7223		sdebug_host_lock = (n > 0);
7224		return count;
7225	}
7226	return -EINVAL;
7227}
7228static DRIVER_ATTR_RW(host_lock);
7229
7230static ssize_t strict_show(struct device_driver *ddp, char *buf)
7231{
7232	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7233}
7234static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7235			    size_t count)
7236{
7237	int n;
7238
7239	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7240		sdebug_strict = (n > 0);
7241		return count;
7242	}
7243	return -EINVAL;
7244}
7245static DRIVER_ATTR_RW(strict);
7246
7247static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7248{
7249	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7250}
7251static DRIVER_ATTR_RO(uuid_ctl);
7252
7253static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7254{
7255	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7256}
7257static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7258			     size_t count)
7259{
7260	int ret, n;
7261
7262	ret = kstrtoint(buf, 0, &n);
7263	if (ret)
7264		return ret;
7265	sdebug_cdb_len = n;
7266	all_config_cdb_len();
7267	return count;
7268}
7269static DRIVER_ATTR_RW(cdb_len);
7270
7271static const char * const zbc_model_strs_a[] = {
7272	[BLK_ZONED_NONE] = "none",
7273	[BLK_ZONED_HA]   = "host-aware",
7274	[BLK_ZONED_HM]   = "host-managed",
7275};
7276
7277static const char * const zbc_model_strs_b[] = {
7278	[BLK_ZONED_NONE] = "no",
7279	[BLK_ZONED_HA]   = "aware",
7280	[BLK_ZONED_HM]   = "managed",
7281};
7282
7283static const char * const zbc_model_strs_c[] = {
7284	[BLK_ZONED_NONE] = "0",
7285	[BLK_ZONED_HA]   = "1",
7286	[BLK_ZONED_HM]   = "2",
7287};
7288
7289static int sdeb_zbc_model_str(const char *cp)
7290{
7291	int res = sysfs_match_string(zbc_model_strs_a, cp);
7292
7293	if (res < 0) {
7294		res = sysfs_match_string(zbc_model_strs_b, cp);
7295		if (res < 0) {
7296			res = sysfs_match_string(zbc_model_strs_c, cp);
7297			if (res < 0)
7298				return -EINVAL;
7299		}
7300	}
7301	return res;
7302}
7303
7304static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7305{
7306	return scnprintf(buf, PAGE_SIZE, "%s\n",
7307			 zbc_model_strs_a[sdeb_zbc_model]);
7308}
7309static DRIVER_ATTR_RO(zbc);
7310
7311static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7312{
7313	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7314}
7315static DRIVER_ATTR_RO(tur_ms_to_ready);
7316
7317static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
7318{
7319	char *p = buf, *end = buf + PAGE_SIZE;
7320	int i;
7321
7322	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7323		p += scnprintf(p, end - p, "%d %ld\n", i,
7324			       atomic_long_read(&writes_by_group_number[i]));
7325
7326	return p - buf;
7327}
7328
7329static ssize_t group_number_stats_store(struct device_driver *ddp,
7330					const char *buf, size_t count)
7331{
7332	int i;
7333
7334	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
7335		atomic_long_set(&writes_by_group_number[i], 0);
7336
7337	return count;
7338}
7339static DRIVER_ATTR_RW(group_number_stats);
7340
7341/* Note: The following array creates attribute files in the
7342   /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7343   files (over those found in the /sys/module/scsi_debug/parameters
7344   directory) is that auxiliary actions can be triggered when an attribute
7345   is changed. For example see: add_host_store() above.
7346 */
7347
7348static struct attribute *sdebug_drv_attrs[] = {
7349	&driver_attr_delay.attr,
7350	&driver_attr_opts.attr,
7351	&driver_attr_ptype.attr,
7352	&driver_attr_dsense.attr,
7353	&driver_attr_fake_rw.attr,
7354	&driver_attr_host_max_queue.attr,
7355	&driver_attr_no_lun_0.attr,
7356	&driver_attr_num_tgts.attr,
7357	&driver_attr_dev_size_mb.attr,
7358	&driver_attr_num_parts.attr,
7359	&driver_attr_every_nth.attr,
7360	&driver_attr_lun_format.attr,
7361	&driver_attr_max_luns.attr,
7362	&driver_attr_max_queue.attr,
7363	&driver_attr_no_rwlock.attr,
7364	&driver_attr_no_uld.attr,
7365	&driver_attr_scsi_level.attr,
7366	&driver_attr_virtual_gb.attr,
7367	&driver_attr_add_host.attr,
7368	&driver_attr_per_host_store.attr,
7369	&driver_attr_vpd_use_hostno.attr,
7370	&driver_attr_sector_size.attr,
7371	&driver_attr_statistics.attr,
7372	&driver_attr_submit_queues.attr,
7373	&driver_attr_dix.attr,
7374	&driver_attr_dif.attr,
7375	&driver_attr_guard.attr,
7376	&driver_attr_ato.attr,
7377	&driver_attr_map.attr,
7378	&driver_attr_random.attr,
7379	&driver_attr_removable.attr,
7380	&driver_attr_host_lock.attr,
7381	&driver_attr_ndelay.attr,
7382	&driver_attr_strict.attr,
7383	&driver_attr_uuid_ctl.attr,
7384	&driver_attr_cdb_len.attr,
7385	&driver_attr_tur_ms_to_ready.attr,
7386	&driver_attr_zbc.attr,
7387	&driver_attr_group_number_stats.attr,
7388	NULL,
7389};
7390ATTRIBUTE_GROUPS(sdebug_drv);
7391
7392static struct device *pseudo_primary;
7393
7394static int __init scsi_debug_init(void)
7395{
7396	bool want_store = (sdebug_fake_rw == 0);
7397	unsigned long sz;
7398	int k, ret, hosts_to_add;
7399	int idx = -1;
 
 
 
7400
7401	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7402		pr_warn("ndelay must be less than 1 second, ignored\n");
7403		sdebug_ndelay = 0;
7404	} else if (sdebug_ndelay > 0)
7405		sdebug_jdelay = JDELAY_OVERRIDDEN;
7406
7407	switch (sdebug_sector_size) {
7408	case  512:
7409	case 1024:
7410	case 2048:
7411	case 4096:
7412		break;
7413	default:
7414		pr_err("invalid sector_size %d\n", sdebug_sector_size);
7415		return -EINVAL;
7416	}
7417
7418	switch (sdebug_dif) {
7419	case T10_PI_TYPE0_PROTECTION:
7420		break;
7421	case T10_PI_TYPE1_PROTECTION:
7422	case T10_PI_TYPE2_PROTECTION:
7423	case T10_PI_TYPE3_PROTECTION:
7424		have_dif_prot = true;
7425		break;
7426
7427	default:
7428		pr_err("dif must be 0, 1, 2 or 3\n");
7429		return -EINVAL;
7430	}
7431
7432	if (sdebug_num_tgts < 0) {
7433		pr_err("num_tgts must be >= 0\n");
7434		return -EINVAL;
7435	}
7436
7437	if (sdebug_guard > 1) {
7438		pr_err("guard must be 0 or 1\n");
7439		return -EINVAL;
7440	}
7441
7442	if (sdebug_ato > 1) {
7443		pr_err("ato must be 0 or 1\n");
7444		return -EINVAL;
7445	}
7446
7447	if (sdebug_physblk_exp > 15) {
7448		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7449		return -EINVAL;
7450	}
7451
7452	sdebug_lun_am = sdebug_lun_am_i;
7453	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7454		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7455		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7456	}
7457
7458	if (sdebug_max_luns > 256) {
7459		if (sdebug_max_luns > 16384) {
7460			pr_warn("max_luns can be no more than 16384, use default\n");
7461			sdebug_max_luns = DEF_MAX_LUNS;
7462		}
7463		sdebug_lun_am = SAM_LUN_AM_FLAT;
7464	}
7465
7466	if (sdebug_lowest_aligned > 0x3fff) {
7467		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7468		return -EINVAL;
7469	}
7470
7471	if (submit_queues < 1) {
7472		pr_err("submit_queues must be 1 or more\n");
7473		return -EINVAL;
7474	}
 
 
 
 
 
 
7475
7476	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7477		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7478		return -EINVAL;
7479	}
7480
7481	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7482	    (sdebug_host_max_queue < 0)) {
7483		pr_err("host_max_queue must be in range [0 %d]\n",
7484		       SDEBUG_CANQUEUE);
7485		return -EINVAL;
7486	}
7487
7488	if (sdebug_host_max_queue &&
7489	    (sdebug_max_queue != sdebug_host_max_queue)) {
7490		sdebug_max_queue = sdebug_host_max_queue;
7491		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7492			sdebug_max_queue);
7493	}
7494
7495	/*
7496	 * check for host managed zoned block device specified with
7497	 * ptype=0x14 or zbc=XXX.
7498	 */
7499	if (sdebug_ptype == TYPE_ZBC) {
7500		sdeb_zbc_model = BLK_ZONED_HM;
7501	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7502		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7503		if (k < 0)
7504			return k;
7505		sdeb_zbc_model = k;
7506		switch (sdeb_zbc_model) {
7507		case BLK_ZONED_NONE:
7508		case BLK_ZONED_HA:
7509			sdebug_ptype = TYPE_DISK;
7510			break;
7511		case BLK_ZONED_HM:
7512			sdebug_ptype = TYPE_ZBC;
7513			break;
7514		default:
7515			pr_err("Invalid ZBC model\n");
7516			return -EINVAL;
7517		}
7518	}
7519	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7520		sdeb_zbc_in_use = true;
7521		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7522			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7523	}
7524
7525	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7526		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7527	if (sdebug_dev_size_mb < 1)
7528		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7529	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7530	sdebug_store_sectors = sz / sdebug_sector_size;
7531	sdebug_capacity = get_sdebug_capacity();
7532
7533	/* play around with geometry, don't waste too much on track 0 */
7534	sdebug_heads = 8;
7535	sdebug_sectors_per = 32;
7536	if (sdebug_dev_size_mb >= 256)
7537		sdebug_heads = 64;
7538	else if (sdebug_dev_size_mb >= 16)
7539		sdebug_heads = 32;
7540	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7541			       (sdebug_sectors_per * sdebug_heads);
7542	if (sdebug_cylinders_per >= 1024) {
7543		/* other LLDs do this; implies >= 1GB ram disk ... */
7544		sdebug_heads = 255;
7545		sdebug_sectors_per = 63;
7546		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7547			       (sdebug_sectors_per * sdebug_heads);
7548	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7549	if (scsi_debug_lbp()) {
7550		sdebug_unmap_max_blocks =
7551			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7552
7553		sdebug_unmap_max_desc =
7554			clamp(sdebug_unmap_max_desc, 0U, 256U);
7555
7556		sdebug_unmap_granularity =
7557			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7558
7559		if (sdebug_unmap_alignment &&
7560		    sdebug_unmap_granularity <=
7561		    sdebug_unmap_alignment) {
7562			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7563			return -EINVAL;
 
7564		}
7565	}
7566	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7567	if (want_store) {
7568		idx = sdebug_add_store();
7569		if (idx < 0)
7570			return idx;
 
 
 
 
 
 
 
 
 
 
 
7571	}
7572
7573	pseudo_primary = root_device_register("pseudo_0");
7574	if (IS_ERR(pseudo_primary)) {
7575		pr_warn("root_device_register() error\n");
7576		ret = PTR_ERR(pseudo_primary);
7577		goto free_vm;
7578	}
7579	ret = bus_register(&pseudo_lld_bus);
7580	if (ret < 0) {
7581		pr_warn("bus_register error: %d\n", ret);
7582		goto dev_unreg;
7583	}
7584	ret = driver_register(&sdebug_driverfs_driver);
7585	if (ret < 0) {
7586		pr_warn("driver_register error: %d\n", ret);
7587		goto bus_unreg;
7588	}
7589
7590	hosts_to_add = sdebug_add_host;
7591	sdebug_add_host = 0;
7592
7593	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7594	if (!queued_cmd_cache) {
7595		ret = -ENOMEM;
7596		goto driver_unreg;
7597	}
7598
7599	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7600	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7601		pr_info("%s: failed to create initial debugfs directory\n", __func__);
7602
7603	for (k = 0; k < hosts_to_add; k++) {
7604		if (want_store && k == 0) {
7605			ret = sdebug_add_host_helper(idx);
7606			if (ret < 0) {
7607				pr_err("add_host_helper k=%d, error=%d\n",
7608				       k, -ret);
7609				break;
7610			}
7611		} else {
7612			ret = sdebug_do_add_host(want_store &&
7613						 sdebug_per_host_store);
7614			if (ret < 0) {
7615				pr_err("add_host k=%d error=%d\n", k, -ret);
7616				break;
7617			}
7618		}
7619	}
 
7620	if (sdebug_verbose)
7621		pr_info("built %d host(s)\n", sdebug_num_hosts);
7622
7623	return 0;
7624
7625driver_unreg:
7626	driver_unregister(&sdebug_driverfs_driver);
7627bus_unreg:
7628	bus_unregister(&pseudo_lld_bus);
7629dev_unreg:
7630	root_device_unregister(pseudo_primary);
7631free_vm:
7632	sdebug_erase_store(idx, NULL);
 
 
 
 
7633	return ret;
7634}
7635
7636static void __exit scsi_debug_exit(void)
7637{
7638	int k = sdebug_num_hosts;
7639
 
 
7640	for (; k; k--)
7641		sdebug_do_remove_host(true);
7642	kmem_cache_destroy(queued_cmd_cache);
7643	driver_unregister(&sdebug_driverfs_driver);
7644	bus_unregister(&pseudo_lld_bus);
7645	root_device_unregister(pseudo_primary);
7646
7647	sdebug_erase_all_stores(false);
7648	xa_destroy(per_store_ap);
7649	debugfs_remove(sdebug_debugfs_root);
 
7650}
7651
7652device_initcall(scsi_debug_init);
7653module_exit(scsi_debug_exit);
7654
7655static void sdebug_release_adapter(struct device *dev)
7656{
7657	struct sdebug_host_info *sdbg_host;
7658
7659	sdbg_host = dev_to_sdebug_host(dev);
7660	kfree(sdbg_host);
7661}
7662
7663/* idx must be valid, if sip is NULL then it will be obtained using idx */
7664static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7665{
7666	if (idx < 0)
7667		return;
7668	if (!sip) {
7669		if (xa_empty(per_store_ap))
7670			return;
7671		sip = xa_load(per_store_ap, idx);
7672		if (!sip)
7673			return;
7674	}
7675	vfree(sip->map_storep);
7676	vfree(sip->dif_storep);
7677	vfree(sip->storep);
7678	xa_erase(per_store_ap, idx);
7679	kfree(sip);
7680}
7681
7682/* Assume apart_from_first==false only in shutdown case. */
7683static void sdebug_erase_all_stores(bool apart_from_first)
7684{
7685	unsigned long idx;
7686	struct sdeb_store_info *sip = NULL;
7687
7688	xa_for_each(per_store_ap, idx, sip) {
7689		if (apart_from_first)
7690			apart_from_first = false;
7691		else
7692			sdebug_erase_store(idx, sip);
7693	}
7694	if (apart_from_first)
7695		sdeb_most_recent_idx = sdeb_first_idx;
7696}
7697
7698/*
7699 * Returns store xarray new element index (idx) if >=0 else negated errno.
7700 * Limit the number of stores to 65536.
7701 */
7702static int sdebug_add_store(void)
7703{
7704	int res;
7705	u32 n_idx;
7706	unsigned long iflags;
7707	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7708	struct sdeb_store_info *sip = NULL;
7709	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7710
7711	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7712	if (!sip)
7713		return -ENOMEM;
7714
7715	xa_lock_irqsave(per_store_ap, iflags);
7716	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7717	if (unlikely(res < 0)) {
7718		xa_unlock_irqrestore(per_store_ap, iflags);
7719		kfree(sip);
7720		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7721		return res;
7722	}
7723	sdeb_most_recent_idx = n_idx;
7724	if (sdeb_first_idx < 0)
7725		sdeb_first_idx = n_idx;
7726	xa_unlock_irqrestore(per_store_ap, iflags);
7727
7728	res = -ENOMEM;
7729	sip->storep = vzalloc(sz);
7730	if (!sip->storep) {
7731		pr_err("user data oom\n");
7732		goto err;
7733	}
7734	if (sdebug_num_parts > 0)
7735		sdebug_build_parts(sip->storep, sz);
7736
7737	/* DIF/DIX: what T10 calls Protection Information (PI) */
7738	if (sdebug_dix) {
7739		int dif_size;
7740
7741		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7742		sip->dif_storep = vmalloc(dif_size);
7743
7744		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7745			sip->dif_storep);
7746
7747		if (!sip->dif_storep) {
7748			pr_err("DIX oom\n");
7749			goto err;
7750		}
7751		memset(sip->dif_storep, 0xff, dif_size);
7752	}
7753	/* Logical Block Provisioning */
7754	if (scsi_debug_lbp()) {
7755		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7756		sip->map_storep = vmalloc(array_size(sizeof(long),
7757						     BITS_TO_LONGS(map_size)));
7758
7759		pr_info("%lu provisioning blocks\n", map_size);
7760
7761		if (!sip->map_storep) {
7762			pr_err("LBP map oom\n");
7763			goto err;
7764		}
7765
7766		bitmap_zero(sip->map_storep, map_size);
7767
7768		/* Map first 1KB for partition table */
7769		if (sdebug_num_parts)
7770			map_region(sip, 0, 2);
7771	}
7772
7773	rwlock_init(&sip->macc_lck);
7774	return (int)n_idx;
7775err:
7776	sdebug_erase_store((int)n_idx, sip);
7777	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7778	return res;
7779}
7780
7781static int sdebug_add_host_helper(int per_host_idx)
7782{
7783	int k, devs_per_host, idx;
7784	int error = -ENOMEM;
7785	struct sdebug_host_info *sdbg_host;
7786	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7787
7788	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7789	if (!sdbg_host)
 
7790		return -ENOMEM;
7791	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7792	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7793		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7794	sdbg_host->si_idx = idx;
7795
7796	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7797
7798	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7799	for (k = 0; k < devs_per_host; k++) {
7800		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7801		if (!sdbg_devinfo)
 
 
7802			goto clean;
 
7803	}
7804
7805	mutex_lock(&sdebug_host_list_mutex);
7806	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7807	mutex_unlock(&sdebug_host_list_mutex);
7808
7809	sdbg_host->dev.bus = &pseudo_lld_bus;
7810	sdbg_host->dev.parent = pseudo_primary;
7811	sdbg_host->dev.release = &sdebug_release_adapter;
7812	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7813
7814	error = device_register(&sdbg_host->dev);
7815	if (error) {
7816		mutex_lock(&sdebug_host_list_mutex);
7817		list_del(&sdbg_host->host_list);
7818		mutex_unlock(&sdebug_host_list_mutex);
7819		goto clean;
7820	}
7821
7822	++sdebug_num_hosts;
7823	return 0;
7824
7825clean:
7826	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7827				 dev_list) {
7828		list_del(&sdbg_devinfo->dev_list);
7829		kfree(sdbg_devinfo->zstate);
7830		kfree(sdbg_devinfo);
7831	}
7832	if (sdbg_host->dev.release)
7833		put_device(&sdbg_host->dev);
7834	else
7835		kfree(sdbg_host);
7836	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7837	return error;
7838}
7839
7840static int sdebug_do_add_host(bool mk_new_store)
7841{
7842	int ph_idx = sdeb_most_recent_idx;
7843
7844	if (mk_new_store) {
7845		ph_idx = sdebug_add_store();
7846		if (ph_idx < 0)
7847			return ph_idx;
7848	}
7849	return sdebug_add_host_helper(ph_idx);
7850}
7851
7852static void sdebug_do_remove_host(bool the_end)
7853{
7854	int idx = -1;
7855	struct sdebug_host_info *sdbg_host = NULL;
7856	struct sdebug_host_info *sdbg_host2;
7857
7858	mutex_lock(&sdebug_host_list_mutex);
7859	if (!list_empty(&sdebug_host_list)) {
7860		sdbg_host = list_entry(sdebug_host_list.prev,
7861				       struct sdebug_host_info, host_list);
7862		idx = sdbg_host->si_idx;
7863	}
7864	if (!the_end && idx >= 0) {
7865		bool unique = true;
7866
7867		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7868			if (sdbg_host2 == sdbg_host)
7869				continue;
7870			if (idx == sdbg_host2->si_idx) {
7871				unique = false;
7872				break;
7873			}
7874		}
7875		if (unique) {
7876			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7877			if (idx == sdeb_most_recent_idx)
7878				--sdeb_most_recent_idx;
7879		}
7880	}
7881	if (sdbg_host)
7882		list_del(&sdbg_host->host_list);
7883	mutex_unlock(&sdebug_host_list_mutex);
 
7884
7885	if (!sdbg_host)
7886		return;
7887
7888	device_unregister(&sdbg_host->dev);
7889	--sdebug_num_hosts;
7890}
7891
7892static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7893{
7894	struct sdebug_dev_info *devip = sdev->hostdata;
7895
7896	if (!devip)
7897		return	-ENODEV;
7898
7899	mutex_lock(&sdebug_host_list_mutex);
7900	block_unblock_all_queues(true);
7901
7902	if (qdepth > SDEBUG_CANQUEUE) {
7903		qdepth = SDEBUG_CANQUEUE;
7904		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7905			qdepth, SDEBUG_CANQUEUE);
7906	}
 
 
7907	if (qdepth < 1)
7908		qdepth = 1;
7909	if (qdepth != sdev->queue_depth)
7910		scsi_change_queue_depth(sdev, qdepth);
7911
 
 
 
 
 
 
7912	block_unblock_all_queues(false);
7913	mutex_unlock(&sdebug_host_list_mutex);
7914
7915	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7916		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7917
7918	return sdev->queue_depth;
7919}
7920
7921static bool fake_timeout(struct scsi_cmnd *scp)
7922{
7923	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7924		if (sdebug_every_nth < -1)
7925			sdebug_every_nth = -1;
7926		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7927			return true; /* ignore command causing timeout */
7928		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7929			 scsi_medium_access_command(scp))
7930			return true; /* time out reads and writes */
7931	}
7932	return false;
7933}
7934
7935/* Response to TUR or media access command when device stopped */
7936static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7937{
7938	int stopped_state;
7939	u64 diff_ns = 0;
7940	ktime_t now_ts = ktime_get_boottime();
7941	struct scsi_device *sdp = scp->device;
7942
7943	stopped_state = atomic_read(&devip->stopped);
7944	if (stopped_state == 2) {
7945		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7946			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7947			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7948				/* tur_ms_to_ready timer extinguished */
7949				atomic_set(&devip->stopped, 0);
7950				return 0;
7951			}
7952		}
7953		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7954		if (sdebug_verbose)
7955			sdev_printk(KERN_INFO, sdp,
7956				    "%s: Not ready: in process of becoming ready\n", my_name);
7957		if (scp->cmnd[0] == TEST_UNIT_READY) {
7958			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7959
7960			if (diff_ns <= tur_nanosecs_to_ready)
7961				diff_ns = tur_nanosecs_to_ready - diff_ns;
7962			else
7963				diff_ns = tur_nanosecs_to_ready;
7964			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7965			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7966			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7967						   diff_ns);
7968			return check_condition_result;
7969		}
7970	}
7971	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7972	if (sdebug_verbose)
7973		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7974			    my_name);
7975	return check_condition_result;
7976}
7977
7978static void sdebug_map_queues(struct Scsi_Host *shost)
7979{
7980	int i, qoff;
7981
7982	if (shost->nr_hw_queues == 1)
7983		return;
7984
7985	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7986		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7987
7988		map->nr_queues  = 0;
7989
7990		if (i == HCTX_TYPE_DEFAULT)
7991			map->nr_queues = submit_queues - poll_queues;
7992		else if (i == HCTX_TYPE_POLL)
7993			map->nr_queues = poll_queues;
7994
7995		if (!map->nr_queues) {
7996			BUG_ON(i == HCTX_TYPE_DEFAULT);
7997			continue;
7998		}
7999
8000		map->queue_offset = qoff;
8001		blk_mq_map_queues(map);
8002
8003		qoff += map->nr_queues;
8004	}
8005}
8006
8007struct sdebug_blk_mq_poll_data {
8008	unsigned int queue_num;
8009	int *num_entries;
8010};
8011
8012/*
8013 * We don't handle aborted commands here, but it does not seem possible to have
8014 * aborted polled commands from schedule_resp()
8015 */
8016static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
8017{
8018	struct sdebug_blk_mq_poll_data *data = opaque;
8019	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
8020	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8021	struct sdebug_defer *sd_dp;
8022	u32 unique_tag = blk_mq_unique_tag(rq);
8023	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
8024	struct sdebug_queued_cmd *sqcp;
8025	unsigned long flags;
8026	int queue_num = data->queue_num;
8027	ktime_t time;
8028
8029	/* We're only interested in one queue for this iteration */
8030	if (hwq != queue_num)
8031		return true;
8032
8033	/* Subsequent checks would fail if this failed, but check anyway */
8034	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
8035		return true;
8036
8037	time = ktime_get_boottime();
8038
8039	spin_lock_irqsave(&sdsc->lock, flags);
8040	sqcp = TO_QUEUED_CMD(cmd);
8041	if (!sqcp) {
8042		spin_unlock_irqrestore(&sdsc->lock, flags);
8043		return true;
8044	}
8045
8046	sd_dp = &sqcp->sd_dp;
8047	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
8048		spin_unlock_irqrestore(&sdsc->lock, flags);
8049		return true;
8050	}
8051
8052	if (time < sd_dp->cmpl_ts) {
8053		spin_unlock_irqrestore(&sdsc->lock, flags);
8054		return true;
8055	}
8056
8057	ASSIGN_QUEUED_CMD(cmd, NULL);
8058	spin_unlock_irqrestore(&sdsc->lock, flags);
8059
8060	if (sdebug_statistics) {
8061		atomic_inc(&sdebug_completions);
8062		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
8063			atomic_inc(&sdebug_miss_cpus);
8064	}
8065
8066	sdebug_free_queued_cmd(sqcp);
8067
8068	scsi_done(cmd); /* callback to mid level */
8069	(*data->num_entries)++;
8070	return true;
8071}
8072
8073static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
8074{
8075	int num_entries = 0;
8076	struct sdebug_blk_mq_poll_data data = {
8077		.queue_num = queue_num,
8078		.num_entries = &num_entries,
8079	};
8080
8081	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
8082				&data);
8083
8084	if (num_entries > 0)
8085		atomic_add(num_entries, &sdeb_mq_poll_count);
8086	return num_entries;
8087}
8088
8089static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
8090{
8091	struct scsi_device *sdp = cmnd->device;
8092	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8093	struct sdebug_err_inject *err;
8094	unsigned char *cmd = cmnd->cmnd;
8095	int ret = 0;
8096
8097	if (devip == NULL)
8098		return 0;
8099
8100	rcu_read_lock();
8101	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8102		if (err->type == ERR_TMOUT_CMD &&
8103		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8104			ret = !!err->cnt;
8105			if (err->cnt < 0)
8106				err->cnt++;
8107
8108			rcu_read_unlock();
8109			return ret;
8110		}
8111	}
8112	rcu_read_unlock();
8113
8114	return 0;
8115}
8116
8117static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
8118{
8119	struct scsi_device *sdp = cmnd->device;
8120	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8121	struct sdebug_err_inject *err;
8122	unsigned char *cmd = cmnd->cmnd;
8123	int ret = 0;
8124
8125	if (devip == NULL)
8126		return 0;
8127
8128	rcu_read_lock();
8129	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8130		if (err->type == ERR_FAIL_QUEUE_CMD &&
8131		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8132			ret = err->cnt ? err->queuecmd_ret : 0;
8133			if (err->cnt < 0)
8134				err->cnt++;
8135
8136			rcu_read_unlock();
8137			return ret;
8138		}
8139	}
8140	rcu_read_unlock();
8141
8142	return 0;
8143}
8144
8145static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8146			   struct sdebug_err_inject *info)
8147{
8148	struct scsi_device *sdp = cmnd->device;
8149	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8150	struct sdebug_err_inject *err;
8151	unsigned char *cmd = cmnd->cmnd;
8152	int ret = 0;
8153	int result;
8154
8155	if (devip == NULL)
8156		return 0;
8157
8158	rcu_read_lock();
8159	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8160		if (err->type == ERR_FAIL_CMD &&
8161		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
8162			if (!err->cnt) {
8163				rcu_read_unlock();
8164				return 0;
8165			}
8166
8167			ret = !!err->cnt;
8168			rcu_read_unlock();
8169			goto out_handle;
8170		}
8171	}
8172	rcu_read_unlock();
8173
8174	return 0;
8175
8176out_handle:
8177	if (err->cnt < 0)
8178		err->cnt++;
8179	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8180	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8181	*info = *err;
8182	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8183
8184	return ret;
8185}
8186
8187static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8188				   struct scsi_cmnd *scp)
8189{
8190	u8 sdeb_i;
8191	struct scsi_device *sdp = scp->device;
8192	const struct opcode_info_t *oip;
8193	const struct opcode_info_t *r_oip;
8194	struct sdebug_dev_info *devip;
8195	u8 *cmd = scp->cmnd;
8196	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8197	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8198	int k, na;
8199	int errsts = 0;
8200	u64 lun_index = sdp->lun & 0x3FFF;
8201	u32 flags;
8202	u16 sa;
8203	u8 opcode = cmd[0];
8204	bool has_wlun_rl;
8205	bool inject_now;
8206	int ret = 0;
8207	struct sdebug_err_inject err;
8208
8209	scsi_set_resid(scp, 0);
8210	if (sdebug_statistics) {
8211		atomic_inc(&sdebug_cmnd_count);
8212		inject_now = inject_on_this_cmd();
8213	} else {
8214		inject_now = false;
8215	}
8216	if (unlikely(sdebug_verbose &&
8217		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8218		char b[120];
8219		int n, len, sb;
8220
8221		len = scp->cmd_len;
8222		sb = (int)sizeof(b);
8223		if (len > 32)
8224			strcpy(b, "too long, over 32 bytes");
8225		else {
8226			for (k = 0, n = 0; k < len && n < sb; ++k)
8227				n += scnprintf(b + n, sb - n, "%02x ",
8228					       (u32)cmd[k]);
8229		}
8230		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8231			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8232	}
8233	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8234		return SCSI_MLQUEUE_HOST_BUSY;
8235	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8236	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8237		goto err_out;
8238
8239	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
8240	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
8241	devip = (struct sdebug_dev_info *)sdp->hostdata;
8242	if (unlikely(!devip)) {
8243		devip = find_build_dev_info(sdp);
8244		if (NULL == devip)
8245			goto err_out;
8246	}
8247
8248	if (sdebug_timeout_cmd(scp)) {
8249		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8250		return 0;
8251	}
8252
8253	ret = sdebug_fail_queue_cmd(scp);
8254	if (ret) {
8255		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8256				opcode, ret);
8257		return ret;
8258	}
8259
8260	if (sdebug_fail_cmd(scp, &ret, &err)) {
8261		scmd_printk(KERN_INFO, scp,
8262			"fail command 0x%x with hostbyte=0x%x, "
8263			"driverbyte=0x%x, statusbyte=0x%x, "
8264			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8265			opcode, err.host_byte, err.driver_byte,
8266			err.status_byte, err.sense_key, err.asc, err.asq);
8267		return ret;
8268	}
8269
8270	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8271		atomic_set(&sdeb_inject_pending, 1);
8272
8273	na = oip->num_attached;
8274	r_pfp = oip->pfp;
8275	if (na) {	/* multiple commands with this opcode */
8276		r_oip = oip;
8277		if (FF_SA & r_oip->flags) {
8278			if (F_SA_LOW & oip->flags)
8279				sa = 0x1f & cmd[1];
8280			else
8281				sa = get_unaligned_be16(cmd + 8);
8282			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8283				if (opcode == oip->opcode && sa == oip->sa)
8284					break;
8285			}
8286		} else {   /* since no service action only check opcode */
8287			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8288				if (opcode == oip->opcode)
8289					break;
8290			}
8291		}
8292		if (k > na) {
8293			if (F_SA_LOW & r_oip->flags)
8294				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8295			else if (F_SA_HIGH & r_oip->flags)
8296				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8297			else
8298				mk_sense_invalid_opcode(scp);
8299			goto check_cond;
8300		}
8301	}	/* else (when na==0) we assume the oip is a match */
8302	flags = oip->flags;
8303	if (unlikely(F_INV_OP & flags)) {
8304		mk_sense_invalid_opcode(scp);
8305		goto check_cond;
8306	}
8307	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8308		if (sdebug_verbose)
8309			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8310				    my_name, opcode, " supported for wlun");
8311		mk_sense_invalid_opcode(scp);
8312		goto check_cond;
8313	}
8314	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
8315		u8 rem;
8316		int j;
8317
8318		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8319			rem = ~oip->len_mask[k] & cmd[k];
8320			if (rem) {
8321				for (j = 7; j >= 0; --j, rem <<= 1) {
8322					if (0x80 & rem)
8323						break;
8324				}
8325				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8326				goto check_cond;
8327			}
8328		}
8329	}
8330	if (unlikely(!(F_SKIP_UA & flags) &&
8331		     find_first_bit(devip->uas_bm,
8332				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8333		errsts = make_ua(scp, devip);
8334		if (errsts)
8335			goto check_cond;
8336	}
8337	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8338		     atomic_read(&devip->stopped))) {
8339		errsts = resp_not_ready(scp, devip);
8340		if (errsts)
8341			goto fini;
 
 
 
8342	}
8343	if (sdebug_fake_rw && (F_FAKE_RW & flags))
8344		goto fini;
8345	if (unlikely(sdebug_every_nth)) {
8346		if (fake_timeout(scp))
8347			return 0;	/* ignore command: make trouble */
8348	}
8349	if (likely(oip->pfp))
8350		pfp = oip->pfp;	/* calls a resp_* function */
8351	else
8352		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
8353
8354fini:
8355	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
8356		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8357	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8358					    sdebug_ndelay > 10000)) {
8359		/*
8360		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8361		 * for Start Stop Unit (SSU) want at least 1 second delay and
8362		 * if sdebug_jdelay>1 want a long delay of that many seconds.
8363		 * For Synchronize Cache want 1/20 of SSU's delay.
8364		 */
8365		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8366		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8367
8368		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8369		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8370	} else
8371		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8372				     sdebug_ndelay);
8373check_cond:
8374	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8375err_out:
8376	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8377}
8378
8379static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8380{
8381	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8382
8383	spin_lock_init(&sdsc->lock);
8384
8385	return 0;
8386}
8387
8388static struct scsi_host_template sdebug_driver_template = {
8389	.show_info =		scsi_debug_show_info,
8390	.write_info =		scsi_debug_write_info,
8391	.proc_name =		sdebug_proc_name,
8392	.name =			"SCSI DEBUG",
8393	.info =			scsi_debug_info,
8394	.slave_alloc =		scsi_debug_slave_alloc,
8395	.slave_configure =	scsi_debug_slave_configure,
8396	.slave_destroy =	scsi_debug_slave_destroy,
8397	.ioctl =		scsi_debug_ioctl,
8398	.queuecommand =		scsi_debug_queuecommand,
8399	.change_queue_depth =	sdebug_change_qdepth,
8400	.map_queues =		sdebug_map_queues,
8401	.mq_poll =		sdebug_blk_mq_poll,
8402	.eh_abort_handler =	scsi_debug_abort,
8403	.eh_device_reset_handler = scsi_debug_device_reset,
8404	.eh_target_reset_handler = scsi_debug_target_reset,
8405	.eh_bus_reset_handler = scsi_debug_bus_reset,
8406	.eh_host_reset_handler = scsi_debug_host_reset,
8407	.can_queue =		SDEBUG_CANQUEUE,
8408	.this_id =		7,
8409	.sg_tablesize =		SG_MAX_SEGMENTS,
8410	.cmd_per_lun =		DEF_CMD_PER_LUN,
8411	.max_sectors =		-1U,
8412	.max_segment_size =	-1U,
8413	.module =		THIS_MODULE,
8414	.track_queue_depth =	1,
8415	.cmd_size = sizeof(struct sdebug_scsi_cmd),
8416	.init_cmd_priv = sdebug_init_cmd_priv,
8417	.target_alloc =		sdebug_target_alloc,
8418	.target_destroy =	sdebug_target_destroy,
8419};
8420
8421static int sdebug_driver_probe(struct device *dev)
8422{
8423	int error = 0;
8424	struct sdebug_host_info *sdbg_host;
8425	struct Scsi_Host *hpnt;
8426	int hprot;
8427
8428	sdbg_host = dev_to_sdebug_host(dev);
8429
8430	sdebug_driver_template.can_queue = sdebug_max_queue;
8431	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8432	if (!sdebug_clustering)
8433		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8434
8435	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8436	if (NULL == hpnt) {
8437		pr_err("scsi_host_alloc failed\n");
8438		error = -ENODEV;
8439		return error;
8440	}
8441	if (submit_queues > nr_cpu_ids) {
8442		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8443			my_name, submit_queues, nr_cpu_ids);
8444		submit_queues = nr_cpu_ids;
8445	}
8446	/*
8447	 * Decide whether to tell scsi subsystem that we want mq. The
8448	 * following should give the same answer for each host.
8449	 */
8450	hpnt->nr_hw_queues = submit_queues;
8451	if (sdebug_host_max_queue)
8452		hpnt->host_tagset = 1;
8453
8454	/* poll queues are possible for nr_hw_queues > 1 */
8455	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8456		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8457			 my_name, poll_queues, hpnt->nr_hw_queues);
8458		poll_queues = 0;
8459	}
8460
8461	/*
8462	 * Poll queues don't need interrupts, but we need at least one I/O queue
8463	 * left over for non-polled I/O.
8464	 * If condition not met, trim poll_queues to 1 (just for simplicity).
8465	 */
8466	if (poll_queues >= submit_queues) {
8467		if (submit_queues < 3)
8468			pr_warn("%s: trim poll_queues to 1\n", my_name);
8469		else
8470			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8471				my_name, submit_queues - 1);
8472		poll_queues = 1;
8473	}
8474	if (poll_queues)
8475		hpnt->nr_maps = 3;
8476
8477	sdbg_host->shost = hpnt;
 
8478	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8479		hpnt->max_id = sdebug_num_tgts + 1;
8480	else
8481		hpnt->max_id = sdebug_num_tgts;
8482	/* = sdebug_max_luns; */
8483	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8484
8485	hprot = 0;
8486
8487	switch (sdebug_dif) {
8488
8489	case T10_PI_TYPE1_PROTECTION:
8490		hprot = SHOST_DIF_TYPE1_PROTECTION;
8491		if (sdebug_dix)
8492			hprot |= SHOST_DIX_TYPE1_PROTECTION;
8493		break;
8494
8495	case T10_PI_TYPE2_PROTECTION:
8496		hprot = SHOST_DIF_TYPE2_PROTECTION;
8497		if (sdebug_dix)
8498			hprot |= SHOST_DIX_TYPE2_PROTECTION;
8499		break;
8500
8501	case T10_PI_TYPE3_PROTECTION:
8502		hprot = SHOST_DIF_TYPE3_PROTECTION;
8503		if (sdebug_dix)
8504			hprot |= SHOST_DIX_TYPE3_PROTECTION;
8505		break;
8506
8507	default:
8508		if (sdebug_dix)
8509			hprot |= SHOST_DIX_TYPE0_PROTECTION;
8510		break;
8511	}
8512
8513	scsi_host_set_prot(hpnt, hprot);
8514
8515	if (have_dif_prot || sdebug_dix)
8516		pr_info("host protection%s%s%s%s%s%s%s\n",
8517			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8518			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8519			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8520			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8521			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8522			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8523			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8524
8525	if (sdebug_guard == 1)
8526		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8527	else
8528		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8529
8530	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8531	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8532	if (sdebug_every_nth)	/* need stats counters for every_nth */
8533		sdebug_statistics = true;
8534	error = scsi_add_host(hpnt, &sdbg_host->dev);
8535	if (error) {
8536		pr_err("scsi_add_host failed\n");
8537		error = -ENODEV;
8538		scsi_host_put(hpnt);
8539	} else {
8540		scsi_scan_host(hpnt);
8541	}
8542
8543	return error;
8544}
8545
8546static void sdebug_driver_remove(struct device *dev)
8547{
8548	struct sdebug_host_info *sdbg_host;
8549	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8550
8551	sdbg_host = dev_to_sdebug_host(dev);
 
 
 
 
 
8552
8553	scsi_remove_host(sdbg_host->shost);
8554
8555	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8556				 dev_list) {
8557		list_del(&sdbg_devinfo->dev_list);
8558		kfree(sdbg_devinfo->zstate);
8559		kfree(sdbg_devinfo);
8560	}
8561
8562	scsi_host_put(sdbg_host->shost);
 
 
 
 
 
 
 
8563}
8564
8565static const struct bus_type pseudo_lld_bus = {
8566	.name = "pseudo",
 
8567	.probe = sdebug_driver_probe,
8568	.remove = sdebug_driver_remove,
8569	.drv_groups = sdebug_drv_groups,
8570};
v4.17
 
   1/*
   2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
   3 *  Copyright (C) 1992  Eric Youngdale
   4 *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
   5 *  to make sure that we are not getting blocks mixed up, and PANIC if
   6 *  anything out of the ordinary is seen.
   7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
   8 *
   9 * Copyright (C) 2001 - 2018 Douglas Gilbert
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 *  For documentation see http://sg.danny.cz/sg/sdebug26.html
  17 *
 
  18 */
  19
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  22
  23#include <linux/module.h>
  24
  25#include <linux/kernel.h>
  26#include <linux/errno.h>
  27#include <linux/jiffies.h>
  28#include <linux/slab.h>
  29#include <linux/types.h>
  30#include <linux/string.h>
  31#include <linux/genhd.h>
  32#include <linux/fs.h>
  33#include <linux/init.h>
  34#include <linux/proc_fs.h>
  35#include <linux/vmalloc.h>
  36#include <linux/moduleparam.h>
  37#include <linux/scatterlist.h>
  38#include <linux/blkdev.h>
  39#include <linux/crc-t10dif.h>
  40#include <linux/spinlock.h>
  41#include <linux/interrupt.h>
  42#include <linux/atomic.h>
  43#include <linux/hrtimer.h>
  44#include <linux/uuid.h>
  45#include <linux/t10-pi.h>
 
 
 
 
 
 
 
  46
  47#include <net/checksum.h>
  48
  49#include <asm/unaligned.h>
  50
  51#include <scsi/scsi.h>
  52#include <scsi/scsi_cmnd.h>
  53#include <scsi/scsi_device.h>
  54#include <scsi/scsi_host.h>
  55#include <scsi/scsicam.h>
  56#include <scsi/scsi_eh.h>
  57#include <scsi/scsi_tcq.h>
  58#include <scsi/scsi_dbg.h>
  59
  60#include "sd.h"
  61#include "scsi_logging.h"
  62
  63/* make sure inq_product_rev string corresponds to this version */
  64#define SDEBUG_VERSION "0188"	/* format to fit INQUIRY revision field */
  65static const char *sdebug_version_date = "20180128";
  66
  67#define MY_NAME "scsi_debug"
  68
  69/* Additional Sense Code (ASC) */
  70#define NO_ADDITIONAL_SENSE 0x0
  71#define LOGICAL_UNIT_NOT_READY 0x4
  72#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
  73#define UNRECOVERED_READ_ERR 0x11
  74#define PARAMETER_LIST_LENGTH_ERR 0x1a
  75#define INVALID_OPCODE 0x20
  76#define LBA_OUT_OF_RANGE 0x21
  77#define INVALID_FIELD_IN_CDB 0x24
  78#define INVALID_FIELD_IN_PARAM_LIST 0x26
 
  79#define UA_RESET_ASC 0x29
  80#define UA_CHANGED_ASC 0x2a
  81#define TARGET_CHANGED_ASC 0x3f
  82#define LUNS_CHANGED_ASCQ 0x0e
  83#define INSUFF_RES_ASC 0x55
  84#define INSUFF_RES_ASCQ 0x3
  85#define POWER_ON_RESET_ASCQ 0x0
 
  86#define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
  87#define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
  88#define CAPACITY_CHANGED_ASCQ 0x9
  89#define SAVING_PARAMS_UNSUP 0x39
  90#define TRANSPORT_PROBLEM 0x4b
  91#define THRESHOLD_EXCEEDED 0x5d
  92#define LOW_POWER_COND_ON 0x5e
  93#define MISCOMPARE_VERIFY_ASC 0x1d
  94#define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
  95#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
  96#define WRITE_ERROR_ASC 0xc
 
 
 
 
 
 
  97
  98/* Additional Sense Code Qualifier (ASCQ) */
  99#define ACK_NAK_TO 0x3
 100
 101/* Default values for driver parameters */
 102#define DEF_NUM_HOST   1
 103#define DEF_NUM_TGTS   1
 104#define DEF_MAX_LUNS   1
 105/* With these defaults, this driver will make 1 host with 1 target
 106 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
 107 */
 108#define DEF_ATO 1
 109#define DEF_CDB_LEN 10
 110#define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
 
 111#define DEF_DEV_SIZE_MB   8
 
 112#define DEF_DIF 0
 113#define DEF_DIX 0
 
 114#define DEF_D_SENSE   0
 115#define DEF_EVERY_NTH   0
 116#define DEF_FAKE_RW	0
 117#define DEF_GUARD 0
 118#define DEF_HOST_LOCK 0
 119#define DEF_LBPU 0
 120#define DEF_LBPWS 0
 121#define DEF_LBPWS10 0
 122#define DEF_LBPRZ 1
 123#define DEF_LOWEST_ALIGNED 0
 124#define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
 125#define DEF_NO_LUN_0   0
 126#define DEF_NUM_PARTS   0
 127#define DEF_OPTS   0
 128#define DEF_OPT_BLKS 1024
 129#define DEF_PHYSBLK_EXP 0
 130#define DEF_OPT_XFERLEN_EXP 0
 131#define DEF_PTYPE   TYPE_DISK
 
 132#define DEF_REMOVABLE false
 133#define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
 134#define DEF_SECTOR_SIZE 512
 135#define DEF_UNMAP_ALIGNMENT 0
 136#define DEF_UNMAP_GRANULARITY 1
 137#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
 138#define DEF_UNMAP_MAX_DESC 256
 139#define DEF_VIRTUAL_GB   0
 140#define DEF_VPD_USE_HOSTNO 1
 141#define DEF_WRITESAME_LENGTH 0xFFFF
 142#define DEF_STRICT 0
 143#define DEF_STATISTICS false
 144#define DEF_SUBMIT_QUEUES 1
 
 145#define DEF_UUID_CTL 0
 146#define JDELAY_OVERRIDDEN -9999
 147
 
 
 
 
 
 148#define SDEBUG_LUN_0_VAL 0
 149
 150/* bit mask values for sdebug_opts */
 151#define SDEBUG_OPT_NOISE		1
 152#define SDEBUG_OPT_MEDIUM_ERR		2
 153#define SDEBUG_OPT_TIMEOUT		4
 154#define SDEBUG_OPT_RECOVERED_ERR	8
 155#define SDEBUG_OPT_TRANSPORT_ERR	16
 156#define SDEBUG_OPT_DIF_ERR		32
 157#define SDEBUG_OPT_DIX_ERR		64
 158#define SDEBUG_OPT_MAC_TIMEOUT		128
 159#define SDEBUG_OPT_SHORT_TRANSFER	0x100
 160#define SDEBUG_OPT_Q_NOISE		0x200
 161#define SDEBUG_OPT_ALL_TSF		0x400
 162#define SDEBUG_OPT_RARE_TSF		0x800
 163#define SDEBUG_OPT_N_WCE		0x1000
 164#define SDEBUG_OPT_RESET_NOISE		0x2000
 165#define SDEBUG_OPT_NO_CDB_NOISE		0x4000
 166#define SDEBUG_OPT_HOST_BUSY		0x8000
 
 167#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
 168			      SDEBUG_OPT_RESET_NOISE)
 169#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
 170				  SDEBUG_OPT_TRANSPORT_ERR | \
 171				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
 172				  SDEBUG_OPT_SHORT_TRANSFER | \
 173				  SDEBUG_OPT_HOST_BUSY)
 174/* When "every_nth" > 0 then modulo "every_nth" commands:
 175 *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
 176 *   - a RECOVERED_ERROR is simulated on successful read and write
 177 *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
 178 *   - a TRANSPORT_ERROR is simulated on successful read and write
 179 *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
 180 *
 181 * When "every_nth" < 0 then after "- every_nth" commands:
 182 *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
 183 *   - a RECOVERED_ERROR is simulated on successful read and write
 184 *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
 185 *   - a TRANSPORT_ERROR is simulated on successful read and write
 186 *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
 187 * This will continue on every subsequent command until some other action
 188 * occurs (e.g. the user * writing a new value (other than -1 or 1) to
 189 * every_nth via sysfs).
 190 */
 191
 192/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
 193 * priority order. In the subset implemented here lower numbers have higher
 194 * priority. The UA numbers should be a sequence starting from 0 with
 195 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
 196#define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
 197#define SDEBUG_UA_BUS_RESET 1
 198#define SDEBUG_UA_MODE_CHANGED 2
 199#define SDEBUG_UA_CAPACITY_CHANGED 3
 200#define SDEBUG_UA_LUNS_CHANGED 4
 201#define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
 202#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
 203#define SDEBUG_NUM_UAS 7
 
 204
 205/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
 206 * sector on read commands: */
 207#define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
 208#define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
 209
 210/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
 211 * or "peripheral device" addressing (value 0) */
 212#define SAM2_LUN_ADDRESS_METHOD 0
 213
 214/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
 215 * (for response) per submit queue at one time. Can be reduced by max_queue
 216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
 217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
 218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
 219 * but cannot exceed SDEBUG_CANQUEUE .
 220 */
 221#define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
 222#define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
 223#define DEF_CMD_PER_LUN  255
 224
 225#define F_D_IN			1
 226#define F_D_OUT			2
 
 227#define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
 228#define F_D_UNKN		8
 229#define F_RL_WLUN_OK		0x10
 230#define F_SKIP_UA		0x20
 231#define F_DELAY_OVERR		0x40
 232#define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
 233#define F_SA_HIGH		0x100	/* as used by variable length cdbs */
 234#define F_INV_OP		0x200
 235#define F_FAKE_RW		0x400
 236#define F_M_ACCESS		0x800	/* media access */
 237#define F_SSU_DELAY		0x1000
 238#define F_SYNC_DELAY		0x2000
 239
 
 240#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
 241#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
 242#define FF_SA (F_SA_HIGH | F_SA_LOW)
 243#define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
 244
 245#define SDEBUG_MAX_PARTS 4
 246
 247#define SDEBUG_MAX_CMD_LEN 32
 248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 249
 250struct sdebug_dev_info {
 251	struct list_head dev_list;
 252	unsigned int channel;
 253	unsigned int target;
 254	u64 lun;
 255	uuid_t lu_name;
 256	struct sdebug_host_info *sdbg_host;
 257	unsigned long uas_bm[1];
 258	atomic_t num_in_q;
 259	atomic_t stopped;
 260	bool used;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261};
 262
 263struct sdebug_host_info {
 264	struct list_head host_list;
 
 265	struct Scsi_Host *shost;
 266	struct device dev;
 267	struct list_head dev_info_list;
 268};
 269
 270#define to_sdebug_host(d)	\
 
 
 
 
 
 
 
 
 271	container_of(d, struct sdebug_host_info, dev)
 272
 
 
 
 273enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
 274		      SDEB_DEFER_WQ = 2};
 275
 276struct sdebug_defer {
 277	struct hrtimer hrt;
 278	struct execute_work ew;
 279	int sqa_idx;	/* index of sdebug_queue array */
 280	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
 281	int issuing_cpu;
 282	bool init_hrt;
 283	bool init_wq;
 284	enum sdeb_defer_type defer_t;
 285};
 286
 287struct sdebug_queued_cmd {
 288	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
 289	 * instance indicates this slot is in use.
 290	 */
 291	struct sdebug_defer *sd_dp;
 292	struct scsi_cmnd *a_cmnd;
 293	unsigned int inj_recovered:1;
 294	unsigned int inj_transport:1;
 295	unsigned int inj_dif:1;
 296	unsigned int inj_dix:1;
 297	unsigned int inj_short:1;
 298	unsigned int inj_host_busy:1;
 299};
 300
 301struct sdebug_queue {
 302	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
 303	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
 304	spinlock_t qc_lock;
 305	atomic_t blocked;	/* to temporarily stop more being queued */
 306};
 307
 308static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
 309static atomic_t sdebug_completions;  /* count of deferred completions */
 310static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
 311static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
 
 
 312
 313struct opcode_info_t {
 314	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
 315				/* for terminating element */
 316	u8 opcode;		/* if num_attached > 0, preferred */
 317	u16 sa;			/* service action */
 318	u32 flags;		/* OR-ed set of SDEB_F_* */
 319	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
 320	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
 321	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
 322				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
 323};
 324
 325/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
 326enum sdeb_opcode_index {
 327	SDEB_I_INVALID_OPCODE =	0,
 328	SDEB_I_INQUIRY = 1,
 329	SDEB_I_REPORT_LUNS = 2,
 330	SDEB_I_REQUEST_SENSE = 3,
 331	SDEB_I_TEST_UNIT_READY = 4,
 332	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
 333	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
 334	SDEB_I_LOG_SENSE = 7,
 335	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
 336	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
 337	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
 338	SDEB_I_START_STOP = 11,
 339	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
 340	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
 341	SDEB_I_MAINT_IN = 14,
 342	SDEB_I_MAINT_OUT = 15,
 343	SDEB_I_VERIFY = 16,		/* 10 only */
 344	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
 345	SDEB_I_RESERVE = 18,		/* 6, 10 */
 346	SDEB_I_RELEASE = 19,		/* 6, 10 */
 347	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
 348	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
 349	SDEB_I_ATA_PT = 22,		/* 12, 16 */
 350	SDEB_I_SEND_DIAG = 23,
 351	SDEB_I_UNMAP = 24,
 352	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
 353	SDEB_I_WRITE_BUFFER = 26,
 354	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
 355	SDEB_I_SYNC_CACHE = 28,		/* 10, 16 */
 356	SDEB_I_COMP_WRITE = 29,
 357	SDEB_I_LAST_ELEMENT = 30,	/* keep this last (previous + 1) */
 
 
 358};
 359
 360
 361static const unsigned char opcode_ind_arr[256] = {
 362/* 0x0; 0x0->0x1f: 6 byte cdbs */
 363	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
 364	    0, 0, 0, 0,
 365	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 366	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 367	    SDEB_I_RELEASE,
 368	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
 369	    SDEB_I_ALLOW_REMOVAL, 0,
 370/* 0x20; 0x20->0x3f: 10 byte cdbs */
 371	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
 372	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
 373	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
 374	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
 375/* 0x40; 0x40->0x5f: 10 byte cdbs */
 376	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
 377	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
 378	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 379	    SDEB_I_RELEASE,
 380	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
 381/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
 382	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 383	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 384	0, SDEB_I_VARIABLE_LEN,
 385/* 0x80; 0x80->0x9f: 16 byte cdbs */
 386	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
 387	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 388	0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
 
 
 389	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
 390/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
 391	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
 392	     SDEB_I_MAINT_OUT, 0, 0, 0,
 393	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
 394	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
 395	0, 0, 0, 0, 0, 0, 0, 0,
 396	0, 0, 0, 0, 0, 0, 0, 0,
 397/* 0xc0; 0xc0->0xff: vendor specific */
 398	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 399	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 400	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 401	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 402};
 403
 404/*
 405 * The following "response" functions return the SCSI mid-level's 4 byte
 406 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
 407 * command completion, they can mask their return value with
 408 * SDEG_RES_IMMED_MASK .
 409 */
 410#define SDEG_RES_IMMED_MASK 0x40000000
 411
 412static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
 413static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
 414static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
 415static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 416static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
 417static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 418static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
 419static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 420static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 421static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
 422static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
 423static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
 424static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
 
 
 425static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
 426static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
 427static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
 428static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
 
 429static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 430static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
 431static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 432static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
 433static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
 434static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435
 436/*
 437 * The following are overflow arrays for cdbs that "hit" the same index in
 438 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
 439 * should be placed in opcode_info_arr[], the others should be placed here.
 440 */
 441static const struct opcode_info_t msense_iarr[] = {
 442	{0, 0x1a, 0, F_D_IN, NULL, NULL,
 443	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 444};
 445
 446static const struct opcode_info_t mselect_iarr[] = {
 447	{0, 0x15, 0, F_D_OUT, NULL, NULL,
 448	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 449};
 450
 451static const struct opcode_info_t read_iarr[] = {
 452	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
 453	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
 454	     0, 0, 0, 0} },
 455	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
 456	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 457	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
 458	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
 459	     0xc7, 0, 0, 0, 0} },
 460};
 461
 462static const struct opcode_info_t write_iarr[] = {
 463	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
 464	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
 465		   0, 0, 0, 0, 0, 0} },
 466	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
 467	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
 468		   0, 0, 0} },
 469	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
 470	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 471		   0xbf, 0xc7, 0, 0, 0, 0} },
 472};
 473
 
 
 
 
 
 
 474static const struct opcode_info_t sa_in_16_iarr[] = {
 475	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
 476	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 477	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
 
 
 
 478};
 479
 480static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
 481	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
 482	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
 483		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
 484	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
 485	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
 486		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
 487};
 488
 489static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
 490	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
 491	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
 492	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
 493	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
 494	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 495	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
 496};
 497
 498static const struct opcode_info_t write_same_iarr[] = {
 499	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
 500	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 501	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
 502};
 503
 504static const struct opcode_info_t reserve_iarr[] = {
 505	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
 506	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 507};
 508
 509static const struct opcode_info_t release_iarr[] = {
 510	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
 511	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 512};
 513
 514static const struct opcode_info_t sync_cache_iarr[] = {
 515	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
 516	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 517	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
 518};
 519
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520
 521/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
 522 * plus the terminating elements for logic that scans this table such as
 523 * REPORT SUPPORTED OPERATION CODES. */
 524static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
 525/* 0 */
 526	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
 527	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 528	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
 529	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 530	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
 531	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 532	     0, 0} },					/* REPORT LUNS */
 533	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
 534	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 535	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
 536	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 537/* 5 */
 538	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
 539	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
 540		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 541	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
 542	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
 543		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 544	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
 545	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
 546	     0, 0, 0} },
 547	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
 548	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
 549	     0, 0} },
 550	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
 551	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
 552	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
 553/* 10 */
 554	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
 555	    resp_write_dt0, write_iarr,			/* WRITE(16) */
 556		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 557		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
 558	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
 559	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 560	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
 561	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
 562		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 563		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
 564	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
 565	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
 566	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
 567	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
 568	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
 569		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
 570				0xff, 0, 0xc7, 0, 0, 0, 0} },
 571/* 15 */
 572	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
 573	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 574	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
 575	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
 576	     0, 0, 0, 0, 0, 0} },
 
 577	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
 578	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
 579	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
 580	     0xff, 0xff} },
 581	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
 582	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
 583	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 584	     0} },
 585	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
 586	    NULL, release_iarr, /* RELEASE(10) <no response function> */
 587	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 588	     0} },
 589/* 20 */
 590	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
 591	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 592	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
 593	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 594	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
 595	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 596	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
 597	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 598	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
 599	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 600/* 25 */
 601	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
 602	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
 603		   0, 0, 0, 0, 0, 0} },		/* XDWRITEREAD(10) */
 604	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
 605	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
 606	     0, 0, 0, 0} },			/* WRITE_BUFFER */
 607	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
 608	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
 609		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
 610		 0, 0, 0, 0, 0} },
 611	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
 612	    resp_sync_cache, sync_cache_iarr,
 613	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
 614	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
 615	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
 616	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
 617	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
 
 
 
 
 618
 619/* 30 */
 
 
 
 
 
 
 
 
 
 620	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
 621	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 622};
 623
 624static int sdebug_add_host = DEF_NUM_HOST;
 
 625static int sdebug_ato = DEF_ATO;
 626static int sdebug_cdb_len = DEF_CDB_LEN;
 627static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
 628static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
 629static int sdebug_dif = DEF_DIF;
 630static int sdebug_dix = DEF_DIX;
 631static int sdebug_dsense = DEF_D_SENSE;
 632static int sdebug_every_nth = DEF_EVERY_NTH;
 633static int sdebug_fake_rw = DEF_FAKE_RW;
 634static unsigned int sdebug_guard = DEF_GUARD;
 
 635static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
 636static int sdebug_max_luns = DEF_MAX_LUNS;
 637static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
 638static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
 639static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
 640static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
 641static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
 642static int sdebug_no_lun_0 = DEF_NO_LUN_0;
 643static int sdebug_no_uld;
 644static int sdebug_num_parts = DEF_NUM_PARTS;
 645static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
 646static int sdebug_opt_blks = DEF_OPT_BLKS;
 647static int sdebug_opts = DEF_OPTS;
 648static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
 649static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
 650static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
 651static int sdebug_scsi_level = DEF_SCSI_LEVEL;
 652static int sdebug_sector_size = DEF_SECTOR_SIZE;
 
 653static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
 654static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
 655static unsigned int sdebug_lbpu = DEF_LBPU;
 656static unsigned int sdebug_lbpws = DEF_LBPWS;
 657static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
 658static unsigned int sdebug_lbprz = DEF_LBPRZ;
 659static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 660static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 661static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
 662static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
 663static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
 664static int sdebug_uuid_ctl = DEF_UUID_CTL;
 
 
 665static bool sdebug_removable = DEF_REMOVABLE;
 666static bool sdebug_clustering;
 667static bool sdebug_host_lock = DEF_HOST_LOCK;
 668static bool sdebug_strict = DEF_STRICT;
 669static bool sdebug_any_injecting_opt;
 
 670static bool sdebug_verbose;
 671static bool have_dif_prot;
 672static bool write_since_sync;
 673static bool sdebug_statistics = DEF_STATISTICS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 674
 675static unsigned int sdebug_store_sectors;
 676static sector_t sdebug_capacity;	/* in sectors */
 677
 678/* old BIOS stuff, kernel may get rid of them but some mode sense pages
 679   may still need them */
 680static int sdebug_heads;		/* heads per disk */
 681static int sdebug_cylinders_per;	/* cylinders per surface */
 682static int sdebug_sectors_per;		/* sectors per cylinder */
 683
 684static LIST_HEAD(sdebug_host_list);
 685static DEFINE_SPINLOCK(sdebug_host_list_lock);
 686
 687static unsigned char *fake_storep;	/* ramdisk storage */
 688static struct t10_pi_tuple *dif_storep;	/* protection info */
 689static void *map_storep;		/* provisioning map */
 
 
 690
 691static unsigned long map_size;
 692static int num_aborts;
 693static int num_dev_resets;
 694static int num_target_resets;
 695static int num_bus_resets;
 696static int num_host_resets;
 697static int dix_writes;
 698static int dix_reads;
 699static int dif_errors;
 700
 
 
 
 
 
 
 
 701static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
 702static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
 703
 704static DEFINE_RWLOCK(atomic_rw);
 705
 706static char sdebug_proc_name[] = MY_NAME;
 707static const char *my_name = MY_NAME;
 708
 709static struct bus_type pseudo_lld_bus;
 710
 711static struct device_driver sdebug_driverfs_driver = {
 712	.name 		= sdebug_proc_name,
 713	.bus		= &pseudo_lld_bus,
 714};
 715
 716static const int check_condition_result =
 717		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 718
 719static const int illegal_condition_result =
 720	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
 721
 722static const int device_qfull_result =
 723	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725
 726/* Only do the extra work involved in logical block provisioning if one or
 727 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
 728 * real reads and writes (i.e. not skipping them for speed).
 729 */
 730static inline bool scsi_debug_lbp(void)
 731{
 732	return 0 == sdebug_fake_rw &&
 733		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
 734}
 735
 736static void *fake_store(unsigned long long lba)
 
 737{
 
 
 738	lba = do_div(lba, sdebug_store_sectors);
 739
 740	return fake_storep + lba * sdebug_sector_size;
 
 
 
 741}
 742
 743static struct t10_pi_tuple *dif_store(sector_t sector)
 
 744{
 745	sector = sector_div(sector, sdebug_store_sectors);
 746
 747	return dif_storep + sector;
 748}
 749
 750static void sdebug_max_tgts_luns(void)
 751{
 752	struct sdebug_host_info *sdbg_host;
 753	struct Scsi_Host *hpnt;
 754
 755	spin_lock(&sdebug_host_list_lock);
 756	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 757		hpnt = sdbg_host->shost;
 758		if ((hpnt->this_id >= 0) &&
 759		    (sdebug_num_tgts > hpnt->this_id))
 760			hpnt->max_id = sdebug_num_tgts + 1;
 761		else
 762			hpnt->max_id = sdebug_num_tgts;
 763		/* sdebug_max_luns; */
 764		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
 765	}
 766	spin_unlock(&sdebug_host_list_lock);
 767}
 768
 769enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
 770
 771/* Set in_bit to -1 to indicate no bit position of invalid field */
 772static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
 773				 enum sdeb_cmd_data c_d,
 774				 int in_byte, int in_bit)
 775{
 776	unsigned char *sbuff;
 777	u8 sks[4];
 778	int sl, asc;
 779
 780	sbuff = scp->sense_buffer;
 781	if (!sbuff) {
 782		sdev_printk(KERN_ERR, scp->device,
 783			    "%s: sense_buffer is NULL\n", __func__);
 784		return;
 785	}
 786	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
 787	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 788	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
 789	memset(sks, 0, sizeof(sks));
 790	sks[0] = 0x80;
 791	if (c_d)
 792		sks[0] |= 0x40;
 793	if (in_bit >= 0) {
 794		sks[0] |= 0x8;
 795		sks[0] |= 0x7 & in_bit;
 796	}
 797	put_unaligned_be16(in_byte, sks + 1);
 798	if (sdebug_dsense) {
 799		sl = sbuff[7] + 8;
 800		sbuff[7] = sl;
 801		sbuff[sl] = 0x2;
 802		sbuff[sl + 1] = 0x6;
 803		memcpy(sbuff + sl + 4, sks, 3);
 804	} else
 805		memcpy(sbuff + 15, sks, 3);
 806	if (sdebug_verbose)
 807		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
 808			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
 809			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
 810}
 811
 812static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
 813{
 814	unsigned char *sbuff;
 815
 816	sbuff = scp->sense_buffer;
 817	if (!sbuff) {
 818		sdev_printk(KERN_ERR, scp->device,
 819			    "%s: sense_buffer is NULL\n", __func__);
 820		return;
 821	}
 822	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 823
 824	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
 825
 826	if (sdebug_verbose)
 827		sdev_printk(KERN_INFO, scp->device,
 828			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
 829			    my_name, key, asc, asq);
 830}
 831
 832static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
 833{
 834	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
 835}
 836
 837static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
 
 838{
 839	if (sdebug_verbose) {
 840		if (0x1261 == cmd)
 841			sdev_printk(KERN_INFO, dev,
 842				    "%s: BLKFLSBUF [0x1261]\n", __func__);
 843		else if (0x5331 == cmd)
 844			sdev_printk(KERN_INFO, dev,
 845				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
 846				    __func__);
 847		else
 848			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
 849				    __func__, cmd);
 850	}
 851	return -EINVAL;
 852	/* return -ENOTTY; // correct return but upsets fdisk */
 853}
 854
 855static void config_cdb_len(struct scsi_device *sdev)
 856{
 857	switch (sdebug_cdb_len) {
 858	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
 859		sdev->use_10_for_rw = false;
 860		sdev->use_16_for_rw = false;
 861		sdev->use_10_for_ms = false;
 862		break;
 863	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
 864		sdev->use_10_for_rw = true;
 865		sdev->use_16_for_rw = false;
 866		sdev->use_10_for_ms = false;
 867		break;
 868	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
 869		sdev->use_10_for_rw = true;
 870		sdev->use_16_for_rw = false;
 871		sdev->use_10_for_ms = true;
 872		break;
 873	case 16:
 874		sdev->use_10_for_rw = false;
 875		sdev->use_16_for_rw = true;
 876		sdev->use_10_for_ms = true;
 877		break;
 878	case 32: /* No knobs to suggest this so same as 16 for now */
 879		sdev->use_10_for_rw = false;
 880		sdev->use_16_for_rw = true;
 881		sdev->use_10_for_ms = true;
 882		break;
 883	default:
 884		pr_warn("unexpected cdb_len=%d, force to 10\n",
 885			sdebug_cdb_len);
 886		sdev->use_10_for_rw = true;
 887		sdev->use_16_for_rw = false;
 888		sdev->use_10_for_ms = false;
 889		sdebug_cdb_len = 10;
 890		break;
 891	}
 892}
 893
 894static void all_config_cdb_len(void)
 895{
 896	struct sdebug_host_info *sdbg_host;
 897	struct Scsi_Host *shost;
 898	struct scsi_device *sdev;
 899
 900	spin_lock(&sdebug_host_list_lock);
 901	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 902		shost = sdbg_host->shost;
 903		shost_for_each_device(sdev, shost) {
 904			config_cdb_len(sdev);
 905		}
 906	}
 907	spin_unlock(&sdebug_host_list_lock);
 908}
 909
 910static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
 911{
 912	struct sdebug_host_info *sdhp;
 913	struct sdebug_dev_info *dp;
 914
 915	spin_lock(&sdebug_host_list_lock);
 916	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
 917		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
 918			if ((devip->sdbg_host == dp->sdbg_host) &&
 919			    (devip->target == dp->target))
 920				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
 921		}
 922	}
 923	spin_unlock(&sdebug_host_list_lock);
 924}
 925
 926static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 927{
 928	int k;
 929
 930	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
 931	if (k != SDEBUG_NUM_UAS) {
 932		const char *cp = NULL;
 933
 934		switch (k) {
 935		case SDEBUG_UA_POR:
 936			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
 937					POWER_ON_RESET_ASCQ);
 938			if (sdebug_verbose)
 939				cp = "power on reset";
 940			break;
 
 
 
 
 
 
 941		case SDEBUG_UA_BUS_RESET:
 942			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
 943					BUS_RESET_ASCQ);
 944			if (sdebug_verbose)
 945				cp = "bus reset";
 946			break;
 947		case SDEBUG_UA_MODE_CHANGED:
 948			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
 949					MODE_CHANGED_ASCQ);
 950			if (sdebug_verbose)
 951				cp = "mode parameters changed";
 952			break;
 953		case SDEBUG_UA_CAPACITY_CHANGED:
 954			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
 955					CAPACITY_CHANGED_ASCQ);
 956			if (sdebug_verbose)
 957				cp = "capacity data changed";
 958			break;
 959		case SDEBUG_UA_MICROCODE_CHANGED:
 960			mk_sense_buffer(scp, UNIT_ATTENTION,
 961					TARGET_CHANGED_ASC,
 962					MICROCODE_CHANGED_ASCQ);
 963			if (sdebug_verbose)
 964				cp = "microcode has been changed";
 965			break;
 966		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
 967			mk_sense_buffer(scp, UNIT_ATTENTION,
 968					TARGET_CHANGED_ASC,
 969					MICROCODE_CHANGED_WO_RESET_ASCQ);
 970			if (sdebug_verbose)
 971				cp = "microcode has been changed without reset";
 972			break;
 973		case SDEBUG_UA_LUNS_CHANGED:
 974			/*
 975			 * SPC-3 behavior is to report a UNIT ATTENTION with
 976			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
 977			 * on the target, until a REPORT LUNS command is
 978			 * received.  SPC-4 behavior is to report it only once.
 979			 * NOTE:  sdebug_scsi_level does not use the same
 980			 * values as struct scsi_device->scsi_level.
 981			 */
 982			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
 983				clear_luns_changed_on_target(devip);
 984			mk_sense_buffer(scp, UNIT_ATTENTION,
 985					TARGET_CHANGED_ASC,
 986					LUNS_CHANGED_ASCQ);
 987			if (sdebug_verbose)
 988				cp = "reported luns data has changed";
 989			break;
 990		default:
 991			pr_warn("unexpected unit attention code=%d\n", k);
 992			if (sdebug_verbose)
 993				cp = "unknown";
 994			break;
 995		}
 996		clear_bit(k, devip->uas_bm);
 997		if (sdebug_verbose)
 998			sdev_printk(KERN_INFO, scp->device,
 999				   "%s reports: Unit attention: %s\n",
1000				   my_name, cp);
1001		return check_condition_result;
1002	}
1003	return 0;
1004}
1005
1006/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1007static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1008				int arr_len)
1009{
1010	int act_len;
1011	struct scsi_data_buffer *sdb = scsi_in(scp);
1012
1013	if (!sdb->length)
1014		return 0;
1015	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1016		return DID_ERROR << 16;
1017
1018	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1019				      arr, arr_len);
1020	sdb->resid = scsi_bufflen(scp) - act_len;
1021
1022	return 0;
1023}
1024
1025/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1026 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1027 * calls, not required to write in ascending offset order. Assumes resid
1028 * set to scsi_bufflen() prior to any calls.
1029 */
1030static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1031				  int arr_len, unsigned int off_dst)
1032{
1033	int act_len, n;
1034	struct scsi_data_buffer *sdb = scsi_in(scp);
1035	off_t skip = off_dst;
1036
1037	if (sdb->length <= off_dst)
1038		return 0;
1039	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1040		return DID_ERROR << 16;
1041
1042	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1043				       arr, arr_len, skip);
1044	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1045		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
1046	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1047	sdb->resid = min(sdb->resid, n);
 
1048	return 0;
1049}
1050
1051/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1052 * 'arr' or -1 if error.
1053 */
1054static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1055			       int arr_len)
1056{
1057	if (!scsi_bufflen(scp))
1058		return 0;
1059	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1060		return -1;
1061
1062	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1063}
1064
1065
1066static char sdebug_inq_vendor_id[9] = "Linux   ";
1067static char sdebug_inq_product_id[17] = "scsi_debug      ";
1068static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1069/* Use some locally assigned NAAs for SAS addresses. */
1070static const u64 naa3_comp_a = 0x3222222000000000ULL;
1071static const u64 naa3_comp_b = 0x3333333000000000ULL;
1072static const u64 naa3_comp_c = 0x3111111000000000ULL;
1073
1074/* Device identification VPD page. Returns number of bytes placed in arr */
1075static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1076			  int target_dev_id, int dev_id_num,
1077			  const char *dev_id_str, int dev_id_str_len,
1078			  const uuid_t *lu_name)
1079{
1080	int num, port_a;
1081	char b[32];
1082
1083	port_a = target_dev_id + 1;
1084	/* T10 vendor identifier field format (faked) */
1085	arr[0] = 0x2;	/* ASCII */
1086	arr[1] = 0x1;
1087	arr[2] = 0x0;
1088	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1089	memcpy(&arr[12], sdebug_inq_product_id, 16);
1090	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1091	num = 8 + 16 + dev_id_str_len;
1092	arr[3] = num;
1093	num += 4;
1094	if (dev_id_num >= 0) {
1095		if (sdebug_uuid_ctl) {
1096			/* Locally assigned UUID */
1097			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1098			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1099			arr[num++] = 0x0;
1100			arr[num++] = 0x12;
1101			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1102			arr[num++] = 0x0;
1103			memcpy(arr + num, lu_name, 16);
1104			num += 16;
1105		} else {
1106			/* NAA-3, Logical unit identifier (binary) */
1107			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1108			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1109			arr[num++] = 0x0;
1110			arr[num++] = 0x8;
1111			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1112			num += 8;
1113		}
1114		/* Target relative port number */
1115		arr[num++] = 0x61;	/* proto=sas, binary */
1116		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1117		arr[num++] = 0x0;	/* reserved */
1118		arr[num++] = 0x4;	/* length */
1119		arr[num++] = 0x0;	/* reserved */
1120		arr[num++] = 0x0;	/* reserved */
1121		arr[num++] = 0x0;
1122		arr[num++] = 0x1;	/* relative port A */
1123	}
1124	/* NAA-3, Target port identifier */
1125	arr[num++] = 0x61;	/* proto=sas, binary */
1126	arr[num++] = 0x93;	/* piv=1, target port, naa */
1127	arr[num++] = 0x0;
1128	arr[num++] = 0x8;
1129	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1130	num += 8;
1131	/* NAA-3, Target port group identifier */
1132	arr[num++] = 0x61;	/* proto=sas, binary */
1133	arr[num++] = 0x95;	/* piv=1, target port group id */
1134	arr[num++] = 0x0;
1135	arr[num++] = 0x4;
1136	arr[num++] = 0;
1137	arr[num++] = 0;
1138	put_unaligned_be16(port_group_id, arr + num);
1139	num += 2;
1140	/* NAA-3, Target device identifier */
1141	arr[num++] = 0x61;	/* proto=sas, binary */
1142	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1143	arr[num++] = 0x0;
1144	arr[num++] = 0x8;
1145	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1146	num += 8;
1147	/* SCSI name string: Target device identifier */
1148	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1149	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1150	arr[num++] = 0x0;
1151	arr[num++] = 24;
1152	memcpy(arr + num, "naa.32222220", 12);
1153	num += 12;
1154	snprintf(b, sizeof(b), "%08X", target_dev_id);
1155	memcpy(arr + num, b, 8);
1156	num += 8;
1157	memset(arr + num, 0, 4);
1158	num += 4;
1159	return num;
1160}
1161
1162static unsigned char vpd84_data[] = {
1163/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1164    0x22,0x22,0x22,0x0,0xbb,0x1,
1165    0x22,0x22,0x22,0x0,0xbb,0x2,
1166};
1167
1168/*  Software interface identification VPD page */
1169static int inquiry_vpd_84(unsigned char *arr)
1170{
1171	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1172	return sizeof(vpd84_data);
1173}
1174
1175/* Management network addresses VPD page */
1176static int inquiry_vpd_85(unsigned char *arr)
1177{
1178	int num = 0;
1179	const char *na1 = "https://www.kernel.org/config";
1180	const char *na2 = "http://www.kernel.org/log";
1181	int plen, olen;
1182
1183	arr[num++] = 0x1;	/* lu, storage config */
1184	arr[num++] = 0x0;	/* reserved */
1185	arr[num++] = 0x0;
1186	olen = strlen(na1);
1187	plen = olen + 1;
1188	if (plen % 4)
1189		plen = ((plen / 4) + 1) * 4;
1190	arr[num++] = plen;	/* length, null termianted, padded */
1191	memcpy(arr + num, na1, olen);
1192	memset(arr + num + olen, 0, plen - olen);
1193	num += plen;
1194
1195	arr[num++] = 0x4;	/* lu, logging */
1196	arr[num++] = 0x0;	/* reserved */
1197	arr[num++] = 0x0;
1198	olen = strlen(na2);
1199	plen = olen + 1;
1200	if (plen % 4)
1201		plen = ((plen / 4) + 1) * 4;
1202	arr[num++] = plen;	/* length, null terminated, padded */
1203	memcpy(arr + num, na2, olen);
1204	memset(arr + num + olen, 0, plen - olen);
1205	num += plen;
1206
1207	return num;
1208}
1209
1210/* SCSI ports VPD page */
1211static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1212{
1213	int num = 0;
1214	int port_a, port_b;
1215
1216	port_a = target_dev_id + 1;
1217	port_b = port_a + 1;
1218	arr[num++] = 0x0;	/* reserved */
1219	arr[num++] = 0x0;	/* reserved */
1220	arr[num++] = 0x0;
1221	arr[num++] = 0x1;	/* relative port 1 (primary) */
1222	memset(arr + num, 0, 6);
1223	num += 6;
1224	arr[num++] = 0x0;
1225	arr[num++] = 12;	/* length tp descriptor */
1226	/* naa-5 target port identifier (A) */
1227	arr[num++] = 0x61;	/* proto=sas, binary */
1228	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1229	arr[num++] = 0x0;	/* reserved */
1230	arr[num++] = 0x8;	/* length */
1231	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1232	num += 8;
1233	arr[num++] = 0x0;	/* reserved */
1234	arr[num++] = 0x0;	/* reserved */
1235	arr[num++] = 0x0;
1236	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1237	memset(arr + num, 0, 6);
1238	num += 6;
1239	arr[num++] = 0x0;
1240	arr[num++] = 12;	/* length tp descriptor */
1241	/* naa-5 target port identifier (B) */
1242	arr[num++] = 0x61;	/* proto=sas, binary */
1243	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1244	arr[num++] = 0x0;	/* reserved */
1245	arr[num++] = 0x8;	/* length */
1246	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1247	num += 8;
1248
1249	return num;
1250}
1251
1252
1253static unsigned char vpd89_data[] = {
1254/* from 4th byte */ 0,0,0,0,
1255'l','i','n','u','x',' ',' ',' ',
1256'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1257'1','2','3','4',
12580x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
12590xec,0,0,0,
12600x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
12610,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
12620x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
12630x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
12640x53,0x41,
12650x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
12660x20,0x20,
12670x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
12680x10,0x80,
12690,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
12700x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
12710x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
12720,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
12730x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
12740x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
12750,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
12760,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12770,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12780,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12790x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
12800,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
12810xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
12820,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
12830,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12840,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12850,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12870,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12880,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12890,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12900,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12910,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12920,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12930,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12940,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1295};
1296
1297/* ATA Information VPD page */
1298static int inquiry_vpd_89(unsigned char *arr)
1299{
1300	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1301	return sizeof(vpd89_data);
1302}
1303
1304
1305static unsigned char vpdb0_data[] = {
1306	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1307	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1308	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1309	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1310};
1311
1312/* Block limits VPD page (SBC-3) */
1313static int inquiry_vpd_b0(unsigned char *arr)
1314{
1315	unsigned int gran;
1316
1317	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1318
1319	/* Optimal transfer length granularity */
1320	if (sdebug_opt_xferlen_exp != 0 &&
1321	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1322		gran = 1 << sdebug_opt_xferlen_exp;
1323	else
1324		gran = 1 << sdebug_physblk_exp;
1325	put_unaligned_be16(gran, arr + 2);
1326
1327	/* Maximum Transfer Length */
1328	if (sdebug_store_sectors > 0x400)
1329		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1330
1331	/* Optimal Transfer Length */
1332	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1333
1334	if (sdebug_lbpu) {
1335		/* Maximum Unmap LBA Count */
1336		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1337
1338		/* Maximum Unmap Block Descriptor Count */
1339		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1340	}
1341
1342	/* Unmap Granularity Alignment */
1343	if (sdebug_unmap_alignment) {
1344		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1345		arr[28] |= 0x80; /* UGAVALID */
1346	}
1347
1348	/* Optimal Unmap Granularity */
1349	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1350
1351	/* Maximum WRITE SAME Length */
1352	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1353
1354	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1355
1356	return sizeof(vpdb0_data);
1357}
1358
1359/* Block device characteristics VPD page (SBC-3) */
1360static int inquiry_vpd_b1(unsigned char *arr)
1361{
1362	memset(arr, 0, 0x3c);
1363	arr[0] = 0;
1364	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1365	arr[2] = 0;
1366	arr[3] = 5;	/* less than 1.8" */
1367
1368	return 0x3c;
1369}
1370
1371/* Logical block provisioning VPD page (SBC-4) */
1372static int inquiry_vpd_b2(unsigned char *arr)
1373{
1374	memset(arr, 0, 0x4);
1375	arr[0] = 0;			/* threshold exponent */
1376	if (sdebug_lbpu)
1377		arr[1] = 1 << 7;
1378	if (sdebug_lbpws)
1379		arr[1] |= 1 << 6;
1380	if (sdebug_lbpws10)
1381		arr[1] |= 1 << 5;
1382	if (sdebug_lbprz && scsi_debug_lbp())
1383		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1384	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1385	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1386	/* threshold_percentage=0 */
1387	return 0x4;
1388}
1389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1390#define SDEBUG_LONG_INQ_SZ 96
1391#define SDEBUG_MAX_INQ_ARR_SZ 584
1392
1393static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1394{
1395	unsigned char pq_pdt;
1396	unsigned char *arr;
1397	unsigned char *cmd = scp->cmnd;
1398	int alloc_len, n, ret;
1399	bool have_wlun, is_disk;
 
1400
1401	alloc_len = get_unaligned_be16(cmd + 3);
1402	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1403	if (! arr)
1404		return DID_REQUEUE << 16;
1405	is_disk = (sdebug_ptype == TYPE_DISK);
 
 
1406	have_wlun = scsi_is_wlun(scp->device->lun);
1407	if (have_wlun)
1408		pq_pdt = TYPE_WLUN;	/* present, wlun */
1409	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1410		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1411	else
1412		pq_pdt = (sdebug_ptype & 0x1f);
1413	arr[0] = pq_pdt;
1414	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1415		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1416		kfree(arr);
1417		return check_condition_result;
1418	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1419		int lu_id_num, port_group_id, target_dev_id, len;
 
1420		char lu_id_str[6];
1421		int host_no = devip->sdbg_host->shost->host_no;
1422		
 
1423		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1424		    (devip->channel & 0x7f);
1425		if (sdebug_vpd_use_hostno == 0)
1426			host_no = 0;
1427		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1428			    (devip->target * 1000) + devip->lun);
1429		target_dev_id = ((host_no + 1) * 2000) +
1430				 (devip->target * 1000) - 3;
1431		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1432		if (0 == cmd[2]) { /* supported vital product data pages */
1433			arr[1] = cmd[2];	/*sanity */
1434			n = 4;
1435			arr[n++] = 0x0;   /* this page */
1436			arr[n++] = 0x80;  /* unit serial number */
1437			arr[n++] = 0x83;  /* device identification */
1438			arr[n++] = 0x84;  /* software interface ident. */
1439			arr[n++] = 0x85;  /* management network addresses */
1440			arr[n++] = 0x86;  /* extended inquiry */
1441			arr[n++] = 0x87;  /* mode page policy */
1442			arr[n++] = 0x88;  /* SCSI ports */
1443			if (is_disk) {	  /* SBC only */
1444				arr[n++] = 0x89;  /* ATA information */
1445				arr[n++] = 0xb0;  /* Block limits */
1446				arr[n++] = 0xb1;  /* Block characteristics */
1447				arr[n++] = 0xb2;  /* Logical Block Prov */
 
 
 
 
1448			}
1449			arr[3] = n - 4;	  /* number of supported VPD pages */
1450		} else if (0x80 == cmd[2]) { /* unit serial number */
1451			arr[1] = cmd[2];	/*sanity */
1452			arr[3] = len;
1453			memcpy(&arr[4], lu_id_str, len);
1454		} else if (0x83 == cmd[2]) { /* device identification */
1455			arr[1] = cmd[2];	/*sanity */
1456			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1457						target_dev_id, lu_id_num,
1458						lu_id_str, len,
1459						&devip->lu_name);
1460		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1461			arr[1] = cmd[2];	/*sanity */
1462			arr[3] = inquiry_vpd_84(&arr[4]);
1463		} else if (0x85 == cmd[2]) { /* Management network addresses */
1464			arr[1] = cmd[2];	/*sanity */
1465			arr[3] = inquiry_vpd_85(&arr[4]);
1466		} else if (0x86 == cmd[2]) { /* extended inquiry */
1467			arr[1] = cmd[2];	/*sanity */
1468			arr[3] = 0x3c;	/* number of following entries */
1469			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1470				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1471			else if (have_dif_prot)
1472				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1473			else
1474				arr[4] = 0x0;   /* no protection stuff */
1475			arr[5] = 0x7;   /* head of q, ordered + simple q's */
 
 
 
 
1476		} else if (0x87 == cmd[2]) { /* mode page policy */
1477			arr[1] = cmd[2];	/*sanity */
1478			arr[3] = 0x8;	/* number of following entries */
1479			arr[4] = 0x2;	/* disconnect-reconnect mp */
1480			arr[6] = 0x80;	/* mlus, shared */
1481			arr[8] = 0x18;	 /* protocol specific lu */
1482			arr[10] = 0x82;	 /* mlus, per initiator port */
1483		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1484			arr[1] = cmd[2];	/*sanity */
1485			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1486		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1487			arr[1] = cmd[2];        /*sanity */
1488			n = inquiry_vpd_89(&arr[4]);
1489			put_unaligned_be16(n, arr + 2);
1490		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1491			arr[1] = cmd[2];        /*sanity */
1492			arr[3] = inquiry_vpd_b0(&arr[4]);
1493		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1494			arr[1] = cmd[2];        /*sanity */
1495			arr[3] = inquiry_vpd_b1(&arr[4]);
1496		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1497			arr[1] = cmd[2];        /*sanity */
1498			arr[3] = inquiry_vpd_b2(&arr[4]);
 
 
 
 
1499		} else {
1500			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1501			kfree(arr);
1502			return check_condition_result;
1503		}
1504		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1505		ret = fill_from_dev_buffer(scp, arr,
1506			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1507		kfree(arr);
1508		return ret;
1509	}
1510	/* drops through here for a standard inquiry */
1511	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1512	arr[2] = sdebug_scsi_level;
1513	arr[3] = 2;    /* response_data_format==2 */
1514	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1515	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1516	if (sdebug_vpd_use_hostno == 0)
1517		arr[5] |= 0x10; /* claim: implicit TPGS */
1518	arr[6] = 0x10; /* claim: MultiP */
1519	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1520	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1521	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1522	memcpy(&arr[16], sdebug_inq_product_id, 16);
1523	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1524	/* Use Vendor Specific area to place driver date in ASCII hex */
1525	memcpy(&arr[36], sdebug_version_date, 8);
1526	/* version descriptors (2 bytes each) follow */
1527	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1528	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1529	n = 62;
1530	if (is_disk) {		/* SBC-4 no version claimed */
1531		put_unaligned_be16(0x600, arr + n);
1532		n += 2;
1533	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1534		put_unaligned_be16(0x525, arr + n);
1535		n += 2;
 
 
 
1536	}
1537	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1538	ret = fill_from_dev_buffer(scp, arr,
1539			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1540	kfree(arr);
1541	return ret;
1542}
1543
 
1544static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1545				   0, 0, 0x0, 0x0};
1546
1547static int resp_requests(struct scsi_cmnd *scp,
1548			 struct sdebug_dev_info *devip)
1549{
1550	unsigned char *sbuff;
1551	unsigned char *cmd = scp->cmnd;
1552	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1553	bool dsense;
1554	int len = 18;
 
 
1555
1556	memset(arr, 0, sizeof(arr));
1557	dsense = !!(cmd[1] & 1);
1558	sbuff = scp->sense_buffer;
1559	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
1560		if (dsense) {
1561			arr[0] = 0x72;
1562			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1563			arr[2] = THRESHOLD_EXCEEDED;
1564			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1565			len = 8;
1566		} else {
1567			arr[0] = 0x70;
1568			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1569			arr[7] = 0xa;   	/* 18 byte sense buffer */
1570			arr[12] = THRESHOLD_EXCEEDED;
1571			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1572		}
1573	} else {
1574		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1575		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1576			;	/* have sense and formats match */
1577		else if (arr[0] <= 0x70) {
1578			if (dsense) {
1579				memset(arr, 0, 8);
1580				arr[0] = 0x72;
1581				len = 8;
1582			} else {
1583				memset(arr, 0, 18);
1584				arr[0] = 0x70;
1585				arr[7] = 0xa;
1586			}
1587		} else if (dsense) {
1588			memset(arr, 0, 8);
1589			arr[0] = 0x72;
1590			arr[1] = sbuff[2];     /* sense key */
1591			arr[2] = sbuff[12];    /* asc */
1592			arr[3] = sbuff[13];    /* ascq */
1593			len = 8;
1594		} else {
1595			memset(arr, 0, 18);
1596			arr[0] = 0x70;
1597			arr[2] = sbuff[1];
1598			arr[7] = 0xa;
1599			arr[12] = sbuff[1];
1600			arr[13] = sbuff[3];
1601		}
1602
1603	}
1604	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1605	return fill_from_dev_buffer(scp, arr, len);
1606}
1607
1608static int resp_start_stop(struct scsi_cmnd *scp,
1609			   struct sdebug_dev_info *devip)
1610{
1611	unsigned char *cmd = scp->cmnd;
1612	int power_cond, stop;
1613	bool changing;
1614
1615	power_cond = (cmd[4] & 0xf0) >> 4;
1616	if (power_cond) {
1617		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1618		return check_condition_result;
1619	}
1620	stop = !(cmd[4] & 1);
1621	changing = atomic_read(&devip->stopped) == !stop;
1622	atomic_xchg(&devip->stopped, stop);
1623	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1624		return SDEG_RES_IMMED_MASK;
1625	else
1626		return 0;
1627}
1628
1629static sector_t get_sdebug_capacity(void)
1630{
1631	static const unsigned int gibibyte = 1073741824;
1632
1633	if (sdebug_virtual_gb > 0)
1634		return (sector_t)sdebug_virtual_gb *
1635			(gibibyte / sdebug_sector_size);
1636	else
1637		return sdebug_store_sectors;
1638}
1639
1640#define SDEBUG_READCAP_ARR_SZ 8
1641static int resp_readcap(struct scsi_cmnd *scp,
1642			struct sdebug_dev_info *devip)
1643{
1644	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1645	unsigned int capac;
1646
1647	/* following just in case virtual_gb changed */
1648	sdebug_capacity = get_sdebug_capacity();
1649	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1650	if (sdebug_capacity < 0xffffffff) {
1651		capac = (unsigned int)sdebug_capacity - 1;
1652		put_unaligned_be32(capac, arr + 0);
1653	} else
1654		put_unaligned_be32(0xffffffff, arr + 0);
1655	put_unaligned_be16(sdebug_sector_size, arr + 6);
1656	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1657}
1658
1659#define SDEBUG_READCAP16_ARR_SZ 32
1660static int resp_readcap16(struct scsi_cmnd *scp,
1661			  struct sdebug_dev_info *devip)
1662{
1663	unsigned char *cmd = scp->cmnd;
1664	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1665	int alloc_len;
1666
1667	alloc_len = get_unaligned_be32(cmd + 10);
1668	/* following just in case virtual_gb changed */
1669	sdebug_capacity = get_sdebug_capacity();
1670	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1671	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1672	put_unaligned_be32(sdebug_sector_size, arr + 8);
1673	arr[13] = sdebug_physblk_exp & 0xf;
1674	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1675
1676	if (scsi_debug_lbp()) {
1677		arr[14] |= 0x80; /* LBPME */
1678		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1679		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1680		 * in the wider field maps to 0 in this field.
1681		 */
1682		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1683			arr[14] |= 0x40;
1684	}
1685
 
 
 
 
 
 
 
1686	arr[15] = sdebug_lowest_aligned & 0xff;
1687
1688	if (have_dif_prot) {
1689		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1690		arr[12] |= 1; /* PROT_EN */
1691	}
1692
1693	return fill_from_dev_buffer(scp, arr,
1694				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1695}
1696
1697#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1698
1699static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1700			      struct sdebug_dev_info *devip)
1701{
1702	unsigned char *cmd = scp->cmnd;
1703	unsigned char *arr;
1704	int host_no = devip->sdbg_host->shost->host_no;
1705	int n, ret, alen, rlen;
1706	int port_group_a, port_group_b, port_a, port_b;
 
 
1707
1708	alen = get_unaligned_be32(cmd + 6);
1709	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1710	if (! arr)
1711		return DID_REQUEUE << 16;
1712	/*
1713	 * EVPD page 0x88 states we have two ports, one
1714	 * real and a fake port with no device connected.
1715	 * So we create two port groups with one port each
1716	 * and set the group with port B to unavailable.
1717	 */
1718	port_a = 0x1; /* relative port A */
1719	port_b = 0x2; /* relative port B */
1720	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1721			(devip->channel & 0x7f);
1722	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1723			(devip->channel & 0x7f) + 0x80;
1724
1725	/*
1726	 * The asymmetric access state is cycled according to the host_id.
1727	 */
1728	n = 4;
1729	if (sdebug_vpd_use_hostno == 0) {
1730		arr[n++] = host_no % 3; /* Asymm access state */
1731		arr[n++] = 0x0F; /* claim: all states are supported */
1732	} else {
1733		arr[n++] = 0x0; /* Active/Optimized path */
1734		arr[n++] = 0x01; /* only support active/optimized paths */
1735	}
1736	put_unaligned_be16(port_group_a, arr + n);
1737	n += 2;
1738	arr[n++] = 0;    /* Reserved */
1739	arr[n++] = 0;    /* Status code */
1740	arr[n++] = 0;    /* Vendor unique */
1741	arr[n++] = 0x1;  /* One port per group */
1742	arr[n++] = 0;    /* Reserved */
1743	arr[n++] = 0;    /* Reserved */
1744	put_unaligned_be16(port_a, arr + n);
1745	n += 2;
1746	arr[n++] = 3;    /* Port unavailable */
1747	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1748	put_unaligned_be16(port_group_b, arr + n);
1749	n += 2;
1750	arr[n++] = 0;    /* Reserved */
1751	arr[n++] = 0;    /* Status code */
1752	arr[n++] = 0;    /* Vendor unique */
1753	arr[n++] = 0x1;  /* One port per group */
1754	arr[n++] = 0;    /* Reserved */
1755	arr[n++] = 0;    /* Reserved */
1756	put_unaligned_be16(port_b, arr + n);
1757	n += 2;
1758
1759	rlen = n - 4;
1760	put_unaligned_be32(rlen, arr + 0);
1761
1762	/*
1763	 * Return the smallest value of either
1764	 * - The allocated length
1765	 * - The constructed command length
1766	 * - The maximum array size
1767	 */
1768	rlen = min(alen,n);
1769	ret = fill_from_dev_buffer(scp, arr,
1770				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1771	kfree(arr);
1772	return ret;
1773}
1774
1775static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1776			     struct sdebug_dev_info *devip)
1777{
1778	bool rctd;
1779	u8 reporting_opts, req_opcode, sdeb_i, supp;
1780	u16 req_sa, u;
1781	u32 alloc_len, a_len;
1782	int k, offset, len, errsts, count, bump, na;
1783	const struct opcode_info_t *oip;
1784	const struct opcode_info_t *r_oip;
1785	u8 *arr;
1786	u8 *cmd = scp->cmnd;
1787
1788	rctd = !!(cmd[2] & 0x80);
1789	reporting_opts = cmd[2] & 0x7;
1790	req_opcode = cmd[3];
1791	req_sa = get_unaligned_be16(cmd + 4);
1792	alloc_len = get_unaligned_be32(cmd + 6);
1793	if (alloc_len < 4 || alloc_len > 0xffff) {
1794		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1795		return check_condition_result;
1796	}
1797	if (alloc_len > 8192)
1798		a_len = 8192;
1799	else
1800		a_len = alloc_len;
1801	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1802	if (NULL == arr) {
1803		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1804				INSUFF_RES_ASCQ);
1805		return check_condition_result;
1806	}
1807	switch (reporting_opts) {
1808	case 0:	/* all commands */
1809		/* count number of commands */
1810		for (count = 0, oip = opcode_info_arr;
1811		     oip->num_attached != 0xff; ++oip) {
1812			if (F_INV_OP & oip->flags)
1813				continue;
1814			count += (oip->num_attached + 1);
1815		}
1816		bump = rctd ? 20 : 8;
1817		put_unaligned_be32(count * bump, arr);
1818		for (offset = 4, oip = opcode_info_arr;
1819		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1820			if (F_INV_OP & oip->flags)
1821				continue;
1822			na = oip->num_attached;
1823			arr[offset] = oip->opcode;
1824			put_unaligned_be16(oip->sa, arr + offset + 2);
1825			if (rctd)
1826				arr[offset + 5] |= 0x2;
1827			if (FF_SA & oip->flags)
1828				arr[offset + 5] |= 0x1;
1829			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1830			if (rctd)
1831				put_unaligned_be16(0xa, arr + offset + 8);
1832			r_oip = oip;
1833			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1834				if (F_INV_OP & oip->flags)
1835					continue;
1836				offset += bump;
1837				arr[offset] = oip->opcode;
1838				put_unaligned_be16(oip->sa, arr + offset + 2);
1839				if (rctd)
1840					arr[offset + 5] |= 0x2;
1841				if (FF_SA & oip->flags)
1842					arr[offset + 5] |= 0x1;
1843				put_unaligned_be16(oip->len_mask[0],
1844						   arr + offset + 6);
1845				if (rctd)
1846					put_unaligned_be16(0xa,
1847							   arr + offset + 8);
1848			}
1849			oip = r_oip;
1850			offset += bump;
1851		}
1852		break;
1853	case 1:	/* one command: opcode only */
1854	case 2:	/* one command: opcode plus service action */
1855	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1856		sdeb_i = opcode_ind_arr[req_opcode];
1857		oip = &opcode_info_arr[sdeb_i];
1858		if (F_INV_OP & oip->flags) {
1859			supp = 1;
1860			offset = 4;
1861		} else {
1862			if (1 == reporting_opts) {
1863				if (FF_SA & oip->flags) {
1864					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1865							     2, 2);
1866					kfree(arr);
1867					return check_condition_result;
1868				}
1869				req_sa = 0;
1870			} else if (2 == reporting_opts &&
1871				   0 == (FF_SA & oip->flags)) {
1872				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1873				kfree(arr);	/* point at requested sa */
1874				return check_condition_result;
1875			}
1876			if (0 == (FF_SA & oip->flags) &&
1877			    req_opcode == oip->opcode)
1878				supp = 3;
1879			else if (0 == (FF_SA & oip->flags)) {
1880				na = oip->num_attached;
1881				for (k = 0, oip = oip->arrp; k < na;
1882				     ++k, ++oip) {
1883					if (req_opcode == oip->opcode)
1884						break;
1885				}
1886				supp = (k >= na) ? 1 : 3;
1887			} else if (req_sa != oip->sa) {
1888				na = oip->num_attached;
1889				for (k = 0, oip = oip->arrp; k < na;
1890				     ++k, ++oip) {
1891					if (req_sa == oip->sa)
1892						break;
1893				}
1894				supp = (k >= na) ? 1 : 3;
1895			} else
1896				supp = 3;
1897			if (3 == supp) {
1898				u = oip->len_mask[0];
1899				put_unaligned_be16(u, arr + 2);
1900				arr[4] = oip->opcode;
1901				for (k = 1; k < u; ++k)
1902					arr[4 + k] = (k < 16) ?
1903						 oip->len_mask[k] : 0xff;
1904				offset = 4 + u;
1905			} else
1906				offset = 4;
1907		}
1908		arr[1] = (rctd ? 0x80 : 0) | supp;
1909		if (rctd) {
1910			put_unaligned_be16(0xa, arr + offset);
1911			offset += 12;
1912		}
1913		break;
1914	default:
1915		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1916		kfree(arr);
1917		return check_condition_result;
1918	}
1919	offset = (offset < a_len) ? offset : a_len;
1920	len = (offset < alloc_len) ? offset : alloc_len;
1921	errsts = fill_from_dev_buffer(scp, arr, len);
1922	kfree(arr);
1923	return errsts;
1924}
1925
1926static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1927			  struct sdebug_dev_info *devip)
1928{
1929	bool repd;
1930	u32 alloc_len, len;
1931	u8 arr[16];
1932	u8 *cmd = scp->cmnd;
1933
1934	memset(arr, 0, sizeof(arr));
1935	repd = !!(cmd[2] & 0x80);
1936	alloc_len = get_unaligned_be32(cmd + 6);
1937	if (alloc_len < 4) {
1938		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1939		return check_condition_result;
1940	}
1941	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1942	arr[1] = 0x1;		/* ITNRS */
1943	if (repd) {
1944		arr[3] = 0xc;
1945		len = 16;
1946	} else
1947		len = 4;
1948
1949	len = (len < alloc_len) ? len : alloc_len;
1950	return fill_from_dev_buffer(scp, arr, len);
1951}
1952
1953/* <<Following mode page info copied from ST318451LW>> */
1954
1955static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1956{	/* Read-Write Error Recovery page for mode_sense */
1957	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1958					5, 0, 0xff, 0xff};
1959
1960	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1961	if (1 == pcontrol)
1962		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1963	return sizeof(err_recov_pg);
1964}
1965
1966static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1967{ 	/* Disconnect-Reconnect page for mode_sense */
1968	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1969					 0, 0, 0, 0, 0, 0, 0, 0};
1970
1971	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1972	if (1 == pcontrol)
1973		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1974	return sizeof(disconnect_pg);
1975}
1976
1977static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1978{       /* Format device page for mode_sense */
1979	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1980				     0, 0, 0, 0, 0, 0, 0, 0,
1981				     0, 0, 0, 0, 0x40, 0, 0, 0};
1982
1983	memcpy(p, format_pg, sizeof(format_pg));
1984	put_unaligned_be16(sdebug_sectors_per, p + 10);
1985	put_unaligned_be16(sdebug_sector_size, p + 12);
1986	if (sdebug_removable)
1987		p[20] |= 0x20; /* should agree with INQUIRY */
1988	if (1 == pcontrol)
1989		memset(p + 2, 0, sizeof(format_pg) - 2);
1990	return sizeof(format_pg);
1991}
1992
1993static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1994				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1995				     0, 0, 0, 0};
1996
1997static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
1998{ 	/* Caching page for mode_sense */
1999	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2000		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2001	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2002		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2003
2004	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2005		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2006	memcpy(p, caching_pg, sizeof(caching_pg));
2007	if (1 == pcontrol)
2008		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2009	else if (2 == pcontrol)
2010		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2011	return sizeof(caching_pg);
2012}
2013
2014static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2015				    0, 0, 0x2, 0x4b};
2016
2017static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2018{ 	/* Control mode page for mode_sense */
2019	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2020					0, 0, 0, 0};
2021	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2022				     0, 0, 0x2, 0x4b};
2023
2024	if (sdebug_dsense)
2025		ctrl_m_pg[2] |= 0x4;
2026	else
2027		ctrl_m_pg[2] &= ~0x4;
2028
2029	if (sdebug_ato)
2030		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2031
2032	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2033	if (1 == pcontrol)
2034		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2035	else if (2 == pcontrol)
2036		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2037	return sizeof(ctrl_m_pg);
2038}
2039
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2040
2041static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2042{	/* Informational Exceptions control mode page for mode_sense */
2043	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2044				       0, 0, 0x0, 0x0};
2045	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2046				      0, 0, 0x0, 0x0};
2047
2048	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2049	if (1 == pcontrol)
2050		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2051	else if (2 == pcontrol)
2052		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2053	return sizeof(iec_m_pg);
2054}
2055
2056static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2057{	/* SAS SSP mode page - short format for mode_sense */
2058	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2059		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2060
2061	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2062	if (1 == pcontrol)
2063		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2064	return sizeof(sas_sf_m_pg);
2065}
2066
2067
2068static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2069			      int target_dev_id)
2070{	/* SAS phy control and discover mode page for mode_sense */
2071	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2072		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2073		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2074		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2075		    0x2, 0, 0, 0, 0, 0, 0, 0,
2076		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2077		    0, 0, 0, 0, 0, 0, 0, 0,
2078		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2079		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2080		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2081		    0x3, 0, 0, 0, 0, 0, 0, 0,
2082		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2083		    0, 0, 0, 0, 0, 0, 0, 0,
2084		};
2085	int port_a, port_b;
2086
2087	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2088	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2089	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2090	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2091	port_a = target_dev_id + 1;
2092	port_b = port_a + 1;
2093	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2094	put_unaligned_be32(port_a, p + 20);
2095	put_unaligned_be32(port_b, p + 48 + 20);
2096	if (1 == pcontrol)
2097		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2098	return sizeof(sas_pcd_m_pg);
2099}
2100
2101static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2102{	/* SAS SSP shared protocol specific port mode subpage */
2103	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2104		    0, 0, 0, 0, 0, 0, 0, 0,
2105		};
2106
2107	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2108	if (1 == pcontrol)
2109		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2110	return sizeof(sas_sha_m_pg);
2111}
2112
2113#define SDEBUG_MAX_MSENSE_SZ 256
 
2114
2115static int resp_mode_sense(struct scsi_cmnd *scp,
2116			   struct sdebug_dev_info *devip)
2117{
2118	int pcontrol, pcode, subpcode, bd_len;
2119	unsigned char dev_spec;
2120	int alloc_len, offset, len, target_dev_id;
 
2121	int target = scp->device->id;
2122	unsigned char *ap;
2123	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2124	unsigned char *cmd = scp->cmnd;
2125	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2126
 
 
 
2127	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2128	pcontrol = (cmd[2] & 0xc0) >> 6;
2129	pcode = cmd[2] & 0x3f;
2130	subpcode = cmd[3];
2131	msense_6 = (MODE_SENSE == cmd[0]);
2132	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2133	is_disk = (sdebug_ptype == TYPE_DISK);
2134	if (is_disk && !dbd)
 
2135		bd_len = llbaa ? 16 : 8;
2136	else
2137		bd_len = 0;
2138	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2139	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2140	if (0x3 == pcontrol) {  /* Saving values not supported */
2141		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2142		return check_condition_result;
2143	}
2144	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2145			(devip->target * 1000) - 3;
2146	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2147	if (is_disk)
2148		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2149	else
 
 
2150		dev_spec = 0x0;
2151	if (msense_6) {
2152		arr[2] = dev_spec;
2153		arr[3] = bd_len;
2154		offset = 4;
2155	} else {
2156		arr[3] = dev_spec;
2157		if (16 == bd_len)
2158			arr[4] = 0x1;	/* set LONGLBA bit */
2159		arr[7] = bd_len;	/* assume 255 or less */
2160		offset = 8;
2161	}
2162	ap = arr + offset;
2163	if ((bd_len > 0) && (!sdebug_capacity))
2164		sdebug_capacity = get_sdebug_capacity();
2165
2166	if (8 == bd_len) {
2167		if (sdebug_capacity > 0xfffffffe)
2168			put_unaligned_be32(0xffffffff, ap + 0);
2169		else
2170			put_unaligned_be32(sdebug_capacity, ap + 0);
2171		put_unaligned_be16(sdebug_sector_size, ap + 6);
2172		offset += bd_len;
2173		ap = arr + offset;
2174	} else if (16 == bd_len) {
2175		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2176		put_unaligned_be32(sdebug_sector_size, ap + 12);
2177		offset += bd_len;
2178		ap = arr + offset;
2179	}
2180
2181	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2182		/* TODO: Control Extension page */
2183		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2184		return check_condition_result;
2185	}
2186	bad_pcode = false;
2187
2188	switch (pcode) {
2189	case 0x1:	/* Read-Write error recovery page, direct access */
 
 
2190		len = resp_err_recov_pg(ap, pcontrol, target);
2191		offset += len;
2192		break;
2193	case 0x2:	/* Disconnect-Reconnect page, all devices */
 
 
2194		len = resp_disconnect_pg(ap, pcontrol, target);
2195		offset += len;
2196		break;
2197	case 0x3:       /* Format device page, direct access */
 
 
2198		if (is_disk) {
2199			len = resp_format_pg(ap, pcontrol, target);
2200			offset += len;
2201		} else
2202			bad_pcode = true;
 
2203		break;
2204	case 0x8:	/* Caching page, direct access */
2205		if (is_disk) {
 
 
2206			len = resp_caching_pg(ap, pcontrol, target);
2207			offset += len;
2208		} else
2209			bad_pcode = true;
 
2210		break;
2211	case 0xa:	/* Control Mode page, all devices */
2212		len = resp_ctrl_m_pg(ap, pcontrol, target);
 
 
 
 
 
 
 
 
 
 
 
 
 
2213		offset += len;
2214		break;
2215	case 0x19:	/* if spc==1 then sas phy, control+discover */
2216		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2217			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2218			return check_condition_result;
2219		}
2220		len = 0;
2221		if ((0x0 == subpcode) || (0xff == subpcode))
2222			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2223		if ((0x1 == subpcode) || (0xff == subpcode))
2224			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2225						  target_dev_id);
2226		if ((0x2 == subpcode) || (0xff == subpcode))
2227			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2228		offset += len;
2229		break;
2230	case 0x1c:	/* Informational Exceptions Mode page, all devices */
 
 
2231		len = resp_iec_m_pg(ap, pcontrol, target);
2232		offset += len;
2233		break;
2234	case 0x3f:	/* Read all Mode pages */
2235		if ((0 == subpcode) || (0xff == subpcode)) {
2236			len = resp_err_recov_pg(ap, pcontrol, target);
2237			len += resp_disconnect_pg(ap + len, pcontrol, target);
2238			if (is_disk) {
2239				len += resp_format_pg(ap + len, pcontrol,
2240						      target);
2241				len += resp_caching_pg(ap + len, pcontrol,
2242						       target);
2243			}
2244			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2245			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2246			if (0xff == subpcode) {
2247				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2248						  target, target_dev_id);
2249				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2250			}
2251			len += resp_iec_m_pg(ap + len, pcontrol, target);
2252			offset += len;
2253		} else {
2254			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2255			return check_condition_result;
2256		}
 
 
2257		break;
2258	default:
2259		bad_pcode = true;
2260		break;
2261	}
2262	if (bad_pcode) {
2263		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2264		return check_condition_result;
2265	}
2266	if (msense_6)
2267		arr[0] = offset - 1;
2268	else
2269		put_unaligned_be16((offset - 2), arr + 0);
2270	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
 
 
 
 
 
 
 
 
2271}
2272
2273#define SDEBUG_MAX_MSELECT_SZ 512
2274
2275static int resp_mode_select(struct scsi_cmnd *scp,
2276			    struct sdebug_dev_info *devip)
2277{
2278	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2279	int param_len, res, mpage;
2280	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2281	unsigned char *cmd = scp->cmnd;
2282	int mselect6 = (MODE_SELECT == cmd[0]);
2283
2284	memset(arr, 0, sizeof(arr));
2285	pf = cmd[1] & 0x10;
2286	sp = cmd[1] & 0x1;
2287	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2288	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2289		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2290		return check_condition_result;
2291	}
2292	res = fetch_to_dev_buffer(scp, arr, param_len);
2293	if (-1 == res)
2294		return DID_ERROR << 16;
2295	else if (sdebug_verbose && (res < param_len))
2296		sdev_printk(KERN_INFO, scp->device,
2297			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2298			    __func__, param_len, res);
2299	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2300	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2301	if (md_len > 2) {
 
2302		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2303		return check_condition_result;
2304	}
2305	off = bd_len + (mselect6 ? 4 : 8);
2306	mpage = arr[off] & 0x3f;
2307	ps = !!(arr[off] & 0x80);
2308	if (ps) {
2309		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2310		return check_condition_result;
2311	}
2312	spf = !!(arr[off] & 0x40);
2313	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2314		       (arr[off + 1] + 2);
2315	if ((pg_len + off) > param_len) {
2316		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2317				PARAMETER_LIST_LENGTH_ERR, 0);
2318		return check_condition_result;
2319	}
2320	switch (mpage) {
2321	case 0x8:      /* Caching Mode page */
2322		if (caching_pg[1] == arr[off + 1]) {
2323			memcpy(caching_pg + 2, arr + off + 2,
2324			       sizeof(caching_pg) - 2);
2325			goto set_mode_changed_ua;
2326		}
2327		break;
2328	case 0xa:      /* Control Mode page */
2329		if (ctrl_m_pg[1] == arr[off + 1]) {
2330			memcpy(ctrl_m_pg + 2, arr + off + 2,
2331			       sizeof(ctrl_m_pg) - 2);
 
 
 
 
2332			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2333			goto set_mode_changed_ua;
2334		}
2335		break;
2336	case 0x1c:      /* Informational Exceptions Mode page */
2337		if (iec_m_pg[1] == arr[off + 1]) {
2338			memcpy(iec_m_pg + 2, arr + off + 2,
2339			       sizeof(iec_m_pg) - 2);
2340			goto set_mode_changed_ua;
2341		}
2342		break;
2343	default:
2344		break;
2345	}
2346	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2347	return check_condition_result;
2348set_mode_changed_ua:
2349	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2350	return 0;
2351}
2352
2353static int resp_temp_l_pg(unsigned char *arr)
2354{
2355	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2356				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2357		};
2358
2359	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2360	return sizeof(temp_l_pg);
2361}
2362
2363static int resp_ie_l_pg(unsigned char *arr)
2364{
2365	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2366		};
2367
2368	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2369	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2370		arr[4] = THRESHOLD_EXCEEDED;
2371		arr[5] = 0xff;
2372	}
2373	return sizeof(ie_l_pg);
2374}
2375
 
 
 
 
 
 
 
 
 
 
 
 
2376#define SDEBUG_MAX_LSENSE_SZ 512
2377
2378static int resp_log_sense(struct scsi_cmnd *scp,
2379			  struct sdebug_dev_info *devip)
2380{
2381	int ppc, sp, pcode, subpcode, alloc_len, len, n;
 
2382	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2383	unsigned char *cmd = scp->cmnd;
2384
2385	memset(arr, 0, sizeof(arr));
2386	ppc = cmd[1] & 0x2;
2387	sp = cmd[1] & 0x1;
2388	if (ppc || sp) {
2389		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2390		return check_condition_result;
2391	}
2392	pcode = cmd[2] & 0x3f;
2393	subpcode = cmd[3] & 0xff;
2394	alloc_len = get_unaligned_be16(cmd + 7);
2395	arr[0] = pcode;
2396	if (0 == subpcode) {
2397		switch (pcode) {
2398		case 0x0:	/* Supported log pages log page */
2399			n = 4;
2400			arr[n++] = 0x0;		/* this page */
2401			arr[n++] = 0xd;		/* Temperature */
2402			arr[n++] = 0x2f;	/* Informational exceptions */
2403			arr[3] = n - 4;
2404			break;
2405		case 0xd:	/* Temperature log page */
2406			arr[3] = resp_temp_l_pg(arr + 4);
2407			break;
2408		case 0x2f:	/* Informational exceptions log page */
2409			arr[3] = resp_ie_l_pg(arr + 4);
2410			break;
2411		default:
2412			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2413			return check_condition_result;
2414		}
2415	} else if (0xff == subpcode) {
2416		arr[0] |= 0x40;
2417		arr[1] = subpcode;
2418		switch (pcode) {
2419		case 0x0:	/* Supported log pages and subpages log page */
2420			n = 4;
2421			arr[n++] = 0x0;
2422			arr[n++] = 0x0;		/* 0,0 page */
2423			arr[n++] = 0x0;
2424			arr[n++] = 0xff;	/* this page */
2425			arr[n++] = 0xd;
2426			arr[n++] = 0x0;		/* Temperature */
 
 
 
 
2427			arr[n++] = 0x2f;
2428			arr[n++] = 0x0;	/* Informational exceptions */
 
 
2429			arr[3] = n - 4;
2430			break;
2431		case 0xd:	/* Temperature subpages */
2432			n = 4;
2433			arr[n++] = 0xd;
2434			arr[n++] = 0x0;		/* Temperature */
 
 
 
 
2435			arr[3] = n - 4;
2436			break;
2437		case 0x2f:	/* Informational exceptions subpages */
2438			n = 4;
2439			arr[n++] = 0x2f;
2440			arr[n++] = 0x0;		/* Informational exceptions */
 
 
2441			arr[3] = n - 4;
2442			break;
2443		default:
2444			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2445			return check_condition_result;
2446		}
 
 
 
 
 
 
 
 
 
2447	} else {
2448		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2449		return check_condition_result;
2450	}
2451	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2452	return fill_from_dev_buffer(scp, arr,
2453		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2454}
2455
2456static int check_device_access_params(struct scsi_cmnd *scp,
2457				      unsigned long long lba, unsigned int num)
 
2458{
 
 
 
2459	if (lba + num > sdebug_capacity) {
2460		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2461		return check_condition_result;
2462	}
2463	/* transfer length excessive (tie in to block limits VPD page) */
2464	if (num > sdebug_store_sectors) {
2465		/* needs work to find which cdb byte 'num' comes from */
2466		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2467		return check_condition_result;
2468	}
 
 
 
 
 
 
 
2469	return 0;
2470}
2471
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2472/* Returns number of bytes copied or -1 if error. */
2473static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2474			    u32 num, bool do_write)
 
2475{
2476	int ret;
2477	u64 block, rest = 0;
2478	struct scsi_data_buffer *sdb;
2479	enum dma_data_direction dir;
 
 
2480
2481	if (do_write) {
2482		sdb = scsi_out(scmd);
2483		dir = DMA_TO_DEVICE;
2484		write_since_sync = true;
2485	} else {
2486		sdb = scsi_in(scmd);
2487		dir = DMA_FROM_DEVICE;
2488	}
2489
2490	if (!sdb->length)
2491		return 0;
2492	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2493		return -1;
2494
 
 
 
 
 
2495	block = do_div(lba, sdebug_store_sectors);
2496	if (block + num > sdebug_store_sectors)
2497		rest = block + num - sdebug_store_sectors;
2498
2499	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2500		   fake_storep + (block * sdebug_sector_size),
2501		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2502	if (ret != (num - rest) * sdebug_sector_size)
2503		return ret;
2504
2505	if (rest) {
2506		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2507			    fake_storep, rest * sdebug_sector_size,
2508			    sg_skip + ((num - rest) * sdebug_sector_size),
2509			    do_write);
2510	}
2511
2512	return ret;
2513}
2514
2515/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2516 * arr into fake_store(lba,num) and return true. If comparison fails then
 
 
 
 
 
 
 
 
 
 
 
 
 
2517 * return false. */
2518static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
 
2519{
2520	bool res;
2521	u64 block, rest = 0;
2522	u32 store_blks = sdebug_store_sectors;
2523	u32 lb_size = sdebug_sector_size;
 
2524
2525	block = do_div(lba, store_blks);
2526	if (block + num > store_blks)
2527		rest = block + num - store_blks;
2528
2529	res = !memcmp(fake_storep + (block * lb_size), arr,
2530		      (num - rest) * lb_size);
2531	if (!res)
2532		return res;
2533	if (rest)
2534		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2535			     rest * lb_size);
2536	if (!res)
2537		return res;
 
 
2538	arr += num * lb_size;
2539	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2540	if (rest)
2541		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2542		       rest * lb_size);
2543	return res;
2544}
2545
2546static __be16 dif_compute_csum(const void *buf, int len)
2547{
2548	__be16 csum;
2549
2550	if (sdebug_guard)
2551		csum = (__force __be16)ip_compute_csum(buf, len);
2552	else
2553		csum = cpu_to_be16(crc_t10dif(buf, len));
2554
2555	return csum;
2556}
2557
2558static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2559		      sector_t sector, u32 ei_lba)
2560{
2561	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2562
2563	if (sdt->guard_tag != csum) {
2564		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2565			(unsigned long)sector,
2566			be16_to_cpu(sdt->guard_tag),
2567			be16_to_cpu(csum));
2568		return 0x01;
2569	}
2570	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2571	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2572		pr_err("REF check failed on sector %lu\n",
2573			(unsigned long)sector);
2574		return 0x03;
2575	}
2576	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2577	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2578		pr_err("REF check failed on sector %lu\n",
2579			(unsigned long)sector);
2580		return 0x03;
2581	}
2582	return 0;
2583}
2584
2585static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2586			  unsigned int sectors, bool read)
2587{
2588	size_t resid;
2589	void *paddr;
 
 
 
2590	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2591	struct sg_mapping_iter miter;
2592
2593	/* Bytes of protection data to copy into sgl */
2594	resid = sectors * sizeof(*dif_storep);
2595
2596	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2597			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2598			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2599
2600	while (sg_miter_next(&miter) && resid > 0) {
2601		size_t len = min(miter.length, resid);
2602		void *start = dif_store(sector);
2603		size_t rest = 0;
2604
2605		if (dif_store_end < start + len)
2606			rest = start + len - dif_store_end;
2607
2608		paddr = miter.addr;
2609
2610		if (read)
2611			memcpy(paddr, start, len - rest);
2612		else
2613			memcpy(start, paddr, len - rest);
2614
2615		if (rest) {
2616			if (read)
2617				memcpy(paddr + len - rest, dif_storep, rest);
2618			else
2619				memcpy(dif_storep, paddr + len - rest, rest);
2620		}
2621
2622		sector += len / sizeof(*dif_storep);
2623		resid -= len;
2624	}
2625	sg_miter_stop(&miter);
2626}
2627
2628static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2629			    unsigned int sectors, u32 ei_lba)
2630{
 
2631	unsigned int i;
 
 
 
2632	struct t10_pi_tuple *sdt;
2633	sector_t sector;
2634
2635	for (i = 0; i < sectors; i++, ei_lba++) {
2636		int ret;
2637
2638		sector = start_sec + i;
2639		sdt = dif_store(sector);
2640
2641		if (sdt->app_tag == cpu_to_be16(0xffff))
2642			continue;
2643
2644		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2645		if (ret) {
2646			dif_errors++;
2647			return ret;
 
 
 
 
 
 
 
 
 
 
2648		}
2649	}
2650
2651	dif_copy_prot(SCpnt, start_sec, sectors, true);
2652	dix_reads++;
2653
2654	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2655}
2656
2657static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2658{
2659	u8 *cmd = scp->cmnd;
2660	struct sdebug_queued_cmd *sqcp;
2661	u64 lba;
2662	u32 num;
2663	u32 ei_lba;
2664	unsigned long iflags;
2665	int ret;
2666	bool check_prot;
 
 
2667
2668	switch (cmd[0]) {
2669	case READ_16:
2670		ei_lba = 0;
2671		lba = get_unaligned_be64(cmd + 2);
2672		num = get_unaligned_be32(cmd + 10);
2673		check_prot = true;
2674		break;
2675	case READ_10:
2676		ei_lba = 0;
2677		lba = get_unaligned_be32(cmd + 2);
2678		num = get_unaligned_be16(cmd + 7);
2679		check_prot = true;
2680		break;
2681	case READ_6:
2682		ei_lba = 0;
2683		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2684		      (u32)(cmd[1] & 0x1f) << 16;
2685		num = (0 == cmd[4]) ? 256 : cmd[4];
2686		check_prot = true;
2687		break;
2688	case READ_12:
2689		ei_lba = 0;
2690		lba = get_unaligned_be32(cmd + 2);
2691		num = get_unaligned_be32(cmd + 6);
2692		check_prot = true;
2693		break;
2694	case XDWRITEREAD_10:
2695		ei_lba = 0;
2696		lba = get_unaligned_be32(cmd + 2);
2697		num = get_unaligned_be16(cmd + 7);
2698		check_prot = false;
2699		break;
2700	default:	/* assume READ(32) */
2701		lba = get_unaligned_be64(cmd + 12);
2702		ei_lba = get_unaligned_be32(cmd + 20);
2703		num = get_unaligned_be32(cmd + 28);
2704		check_prot = false;
2705		break;
2706	}
2707	if (unlikely(have_dif_prot && check_prot)) {
2708		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2709		    (cmd[1] & 0xe0)) {
2710			mk_sense_invalid_opcode(scp);
2711			return check_condition_result;
2712		}
2713		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2714		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2715		    (cmd[1] & 0xe0) == 0)
2716			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2717				    "to DIF device\n");
2718	}
2719	if (unlikely(sdebug_any_injecting_opt)) {
2720		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2721
2722		if (sqcp) {
2723			if (sqcp->inj_short)
2724				num /= 2;
2725		}
2726	} else
2727		sqcp = NULL;
2728
2729	/* inline check_device_access_params() */
2730	if (unlikely(lba + num > sdebug_capacity)) {
2731		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2732		return check_condition_result;
2733	}
2734	/* transfer length excessive (tie in to block limits VPD page) */
2735	if (unlikely(num > sdebug_store_sectors)) {
2736		/* needs work to find which cdb byte 'num' comes from */
2737		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2738		return check_condition_result;
2739	}
2740
 
 
 
2741	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2742		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2743		     ((lba + num) > sdebug_medium_error_start))) {
2744		/* claim unrecoverable read error */
2745		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2746		/* set info field and valid bit for fixed descriptor */
2747		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2748			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2749			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2750			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2751			put_unaligned_be32(ret, scp->sense_buffer + 3);
2752		}
2753		scsi_set_resid(scp, scsi_bufflen(scp));
2754		return check_condition_result;
2755	}
2756
2757	read_lock_irqsave(&atomic_rw, iflags);
2758
2759	/* DIX + T10 DIF */
2760	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2761		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2762
2763		if (prot_ret) {
2764			read_unlock_irqrestore(&atomic_rw, iflags);
2765			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2766			return illegal_condition_result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2767		}
2768	}
2769
2770	ret = do_device_access(scp, 0, lba, num, false);
2771	read_unlock_irqrestore(&atomic_rw, iflags);
2772	if (unlikely(ret == -1))
2773		return DID_ERROR << 16;
2774
2775	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2776
2777	if (unlikely(sqcp)) {
2778		if (sqcp->inj_recovered) {
2779			mk_sense_buffer(scp, RECOVERED_ERROR,
2780					THRESHOLD_EXCEEDED, 0);
2781			return check_condition_result;
2782		} else if (sqcp->inj_transport) {
2783			mk_sense_buffer(scp, ABORTED_COMMAND,
2784					TRANSPORT_PROBLEM, ACK_NAK_TO);
2785			return check_condition_result;
2786		} else if (sqcp->inj_dif) {
2787			/* Logical block guard check failed */
2788			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
 
2789			return illegal_condition_result;
2790		} else if (sqcp->inj_dix) {
2791			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
 
2792			return illegal_condition_result;
2793		}
2794	}
2795	return 0;
2796}
2797
2798static void dump_sector(unsigned char *buf, int len)
2799{
2800	int i, j, n;
2801
2802	pr_err(">>> Sector Dump <<<\n");
2803	for (i = 0 ; i < len ; i += 16) {
2804		char b[128];
2805
2806		for (j = 0, n = 0; j < 16; j++) {
2807			unsigned char c = buf[i+j];
2808
2809			if (c >= 0x20 && c < 0x7e)
2810				n += scnprintf(b + n, sizeof(b) - n,
2811					       " %c ", buf[i+j]);
2812			else
2813				n += scnprintf(b + n, sizeof(b) - n,
2814					       "%02x ", buf[i+j]);
2815		}
2816		pr_err("%04d: %s\n", i, b);
2817	}
2818}
2819
2820static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2821			     unsigned int sectors, u32 ei_lba)
2822{
2823	int ret;
2824	struct t10_pi_tuple *sdt;
2825	void *daddr;
2826	sector_t sector = start_sec;
2827	int ppage_offset;
2828	int dpage_offset;
2829	struct sg_mapping_iter diter;
2830	struct sg_mapping_iter piter;
2831
2832	BUG_ON(scsi_sg_count(SCpnt) == 0);
2833	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2834
2835	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2836			scsi_prot_sg_count(SCpnt),
2837			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2838	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2839			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2840
2841	/* For each protection page */
2842	while (sg_miter_next(&piter)) {
2843		dpage_offset = 0;
2844		if (WARN_ON(!sg_miter_next(&diter))) {
2845			ret = 0x01;
2846			goto out;
2847		}
2848
2849		for (ppage_offset = 0; ppage_offset < piter.length;
2850		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2851			/* If we're at the end of the current
2852			 * data page advance to the next one
2853			 */
2854			if (dpage_offset >= diter.length) {
2855				if (WARN_ON(!sg_miter_next(&diter))) {
2856					ret = 0x01;
2857					goto out;
2858				}
2859				dpage_offset = 0;
2860			}
2861
2862			sdt = piter.addr + ppage_offset;
2863			daddr = diter.addr + dpage_offset;
2864
2865			ret = dif_verify(sdt, daddr, sector, ei_lba);
2866			if (ret) {
2867				dump_sector(daddr, sdebug_sector_size);
2868				goto out;
2869			}
2870
2871			sector++;
2872			ei_lba++;
2873			dpage_offset += sdebug_sector_size;
2874		}
2875		diter.consumed = dpage_offset;
2876		sg_miter_stop(&diter);
2877	}
2878	sg_miter_stop(&piter);
2879
2880	dif_copy_prot(SCpnt, start_sec, sectors, false);
2881	dix_writes++;
2882
2883	return 0;
2884
2885out:
2886	dif_errors++;
2887	sg_miter_stop(&diter);
2888	sg_miter_stop(&piter);
2889	return ret;
2890}
2891
2892static unsigned long lba_to_map_index(sector_t lba)
2893{
2894	if (sdebug_unmap_alignment)
2895		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2896	sector_div(lba, sdebug_unmap_granularity);
2897	return lba;
2898}
2899
2900static sector_t map_index_to_lba(unsigned long index)
2901{
2902	sector_t lba = index * sdebug_unmap_granularity;
2903
2904	if (sdebug_unmap_alignment)
2905		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2906	return lba;
2907}
2908
2909static unsigned int map_state(sector_t lba, unsigned int *num)
 
2910{
2911	sector_t end;
2912	unsigned int mapped;
2913	unsigned long index;
2914	unsigned long next;
2915
2916	index = lba_to_map_index(lba);
2917	mapped = test_bit(index, map_storep);
2918
2919	if (mapped)
2920		next = find_next_zero_bit(map_storep, map_size, index);
2921	else
2922		next = find_next_bit(map_storep, map_size, index);
2923
2924	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2925	*num = end - lba;
2926	return mapped;
2927}
2928
2929static void map_region(sector_t lba, unsigned int len)
 
2930{
2931	sector_t end = lba + len;
2932
2933	while (lba < end) {
2934		unsigned long index = lba_to_map_index(lba);
2935
2936		if (index < map_size)
2937			set_bit(index, map_storep);
2938
2939		lba = map_index_to_lba(index + 1);
2940	}
2941}
2942
2943static void unmap_region(sector_t lba, unsigned int len)
 
2944{
2945	sector_t end = lba + len;
 
2946
2947	while (lba < end) {
2948		unsigned long index = lba_to_map_index(lba);
2949
2950		if (lba == map_index_to_lba(index) &&
2951		    lba + sdebug_unmap_granularity <= end &&
2952		    index < map_size) {
2953			clear_bit(index, map_storep);
2954			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2955				memset(fake_storep +
2956				       lba * sdebug_sector_size,
2957				       (sdebug_lbprz & 1) ? 0 : 0xff,
2958				       sdebug_sector_size *
2959				       sdebug_unmap_granularity);
2960			}
2961			if (dif_storep) {
2962				memset(dif_storep + lba, 0xff,
2963				       sizeof(*dif_storep) *
2964				       sdebug_unmap_granularity);
2965			}
2966		}
2967		lba = map_index_to_lba(index + 1);
2968	}
2969}
2970
2971static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2972{
2973	u8 *cmd = scp->cmnd;
2974	u64 lba;
2975	u32 num;
 
2976	u32 ei_lba;
2977	unsigned long iflags;
2978	int ret;
2979	bool check_prot;
 
 
2980
2981	switch (cmd[0]) {
2982	case WRITE_16:
2983		ei_lba = 0;
2984		lba = get_unaligned_be64(cmd + 2);
2985		num = get_unaligned_be32(cmd + 10);
 
2986		check_prot = true;
2987		break;
2988	case WRITE_10:
2989		ei_lba = 0;
2990		lba = get_unaligned_be32(cmd + 2);
 
2991		num = get_unaligned_be16(cmd + 7);
2992		check_prot = true;
2993		break;
2994	case WRITE_6:
2995		ei_lba = 0;
2996		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2997		      (u32)(cmd[1] & 0x1f) << 16;
2998		num = (0 == cmd[4]) ? 256 : cmd[4];
2999		check_prot = true;
3000		break;
3001	case WRITE_12:
3002		ei_lba = 0;
3003		lba = get_unaligned_be32(cmd + 2);
3004		num = get_unaligned_be32(cmd + 6);
 
3005		check_prot = true;
3006		break;
3007	case 0x53:	/* XDWRITEREAD(10) */
3008		ei_lba = 0;
3009		lba = get_unaligned_be32(cmd + 2);
 
3010		num = get_unaligned_be16(cmd + 7);
3011		check_prot = false;
3012		break;
3013	default:	/* assume WRITE(32) */
 
3014		lba = get_unaligned_be64(cmd + 12);
3015		ei_lba = get_unaligned_be32(cmd + 20);
3016		num = get_unaligned_be32(cmd + 28);
3017		check_prot = false;
3018		break;
3019	}
3020	if (unlikely(have_dif_prot && check_prot)) {
3021		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3022		    (cmd[1] & 0xe0)) {
3023			mk_sense_invalid_opcode(scp);
3024			return check_condition_result;
3025		}
3026		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3027		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3028		    (cmd[1] & 0xe0) == 0)
3029			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3030				    "to DIF device\n");
3031	}
3032
3033	/* inline check_device_access_params() */
3034	if (unlikely(lba + num > sdebug_capacity)) {
3035		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3036		return check_condition_result;
3037	}
3038	/* transfer length excessive (tie in to block limits VPD page) */
3039	if (unlikely(num > sdebug_store_sectors)) {
3040		/* needs work to find which cdb byte 'num' comes from */
3041		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3042		return check_condition_result;
3043	}
3044
3045	write_lock_irqsave(&atomic_rw, iflags);
3046
3047	/* DIX + T10 DIF */
3048	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3049		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3050
3051		if (prot_ret) {
3052			write_unlock_irqrestore(&atomic_rw, iflags);
3053			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3054			return illegal_condition_result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3055		}
3056	}
3057
3058	ret = do_device_access(scp, 0, lba, num, true);
3059	if (unlikely(scsi_debug_lbp()))
3060		map_region(lba, num);
3061	write_unlock_irqrestore(&atomic_rw, iflags);
 
 
 
3062	if (unlikely(-1 == ret))
3063		return DID_ERROR << 16;
3064	else if (unlikely(sdebug_verbose &&
3065			  (ret < (num * sdebug_sector_size))))
3066		sdev_printk(KERN_INFO, scp->device,
3067			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3068			    my_name, num * sdebug_sector_size, ret);
3069
3070	if (unlikely(sdebug_any_injecting_opt)) {
3071		struct sdebug_queued_cmd *sqcp =
3072				(struct sdebug_queued_cmd *)scp->host_scribble;
3073
3074		if (sqcp) {
3075			if (sqcp->inj_recovered) {
3076				mk_sense_buffer(scp, RECOVERED_ERROR,
3077						THRESHOLD_EXCEEDED, 0);
3078				return check_condition_result;
3079			} else if (sqcp->inj_dif) {
3080				/* Logical block guard check failed */
3081				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3082				return illegal_condition_result;
3083			} else if (sqcp->inj_dix) {
3084				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3085				return illegal_condition_result;
3086			}
3087		}
3088	}
3089	return 0;
3090}
3091
3092/*
3093 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3094 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3095 */
3096static int resp_write_scat(struct scsi_cmnd *scp,
3097			   struct sdebug_dev_info *devip)
3098{
3099	u8 *cmd = scp->cmnd;
3100	u8 *lrdp = NULL;
3101	u8 *up;
 
3102	u8 wrprotect;
3103	u16 lbdof, num_lrd, k;
3104	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3105	u32 lb_size = sdebug_sector_size;
3106	u32 ei_lba;
3107	u64 lba;
3108	unsigned long iflags;
3109	int ret, res;
3110	bool is_16;
3111	static const u32 lrd_size = 32; /* + parameter list header size */
3112
3113	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3114		is_16 = false;
 
3115		wrprotect = (cmd[10] >> 5) & 0x7;
3116		lbdof = get_unaligned_be16(cmd + 12);
3117		num_lrd = get_unaligned_be16(cmd + 16);
3118		bt_len = get_unaligned_be32(cmd + 28);
3119	} else {        /* that leaves WRITE SCATTERED(16) */
3120		is_16 = true;
3121		wrprotect = (cmd[2] >> 5) & 0x7;
3122		lbdof = get_unaligned_be16(cmd + 4);
3123		num_lrd = get_unaligned_be16(cmd + 8);
3124		bt_len = get_unaligned_be32(cmd + 10);
 
3125		if (unlikely(have_dif_prot)) {
3126			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3127			    wrprotect) {
3128				mk_sense_invalid_opcode(scp);
3129				return illegal_condition_result;
3130			}
3131			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3132			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3133			     wrprotect == 0)
3134				sdev_printk(KERN_ERR, scp->device,
3135					    "Unprotected WR to DIF device\n");
3136		}
3137	}
3138	if ((num_lrd == 0) || (bt_len == 0))
3139		return 0;       /* T10 says these do-nothings are not errors */
3140	if (lbdof == 0) {
3141		if (sdebug_verbose)
3142			sdev_printk(KERN_INFO, scp->device,
3143				"%s: %s: LB Data Offset field bad\n",
3144				my_name, __func__);
3145		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3146		return illegal_condition_result;
3147	}
3148	lbdof_blen = lbdof * lb_size;
3149	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3150		if (sdebug_verbose)
3151			sdev_printk(KERN_INFO, scp->device,
3152				"%s: %s: LBA range descriptors don't fit\n",
3153				my_name, __func__);
3154		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3155		return illegal_condition_result;
3156	}
3157	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3158	if (lrdp == NULL)
3159		return SCSI_MLQUEUE_HOST_BUSY;
3160	if (sdebug_verbose)
3161		sdev_printk(KERN_INFO, scp->device,
3162			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3163			my_name, __func__, lbdof_blen);
3164	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3165	if (res == -1) {
3166		ret = DID_ERROR << 16;
3167		goto err_out;
3168	}
3169
3170	write_lock_irqsave(&atomic_rw, iflags);
3171	sg_off = lbdof_blen;
3172	/* Spec says Buffer xfer Length field in number of LBs in dout */
3173	cum_lb = 0;
3174	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3175		lba = get_unaligned_be64(up + 0);
3176		num = get_unaligned_be32(up + 8);
3177		if (sdebug_verbose)
3178			sdev_printk(KERN_INFO, scp->device,
3179				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3180				my_name, __func__, k, lba, num, sg_off);
3181		if (num == 0)
3182			continue;
3183		ret = check_device_access_params(scp, lba, num);
3184		if (ret)
3185			goto err_out_unlock;
3186		num_by = num * lb_size;
3187		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3188
3189		if ((cum_lb + num) > bt_len) {
3190			if (sdebug_verbose)
3191				sdev_printk(KERN_INFO, scp->device,
3192				    "%s: %s: sum of blocks > data provided\n",
3193				    my_name, __func__);
3194			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3195					0);
3196			ret = illegal_condition_result;
3197			goto err_out_unlock;
3198		}
3199
3200		/* DIX + T10 DIF */
3201		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3202			int prot_ret = prot_verify_write(scp, lba, num,
3203							 ei_lba);
3204
3205			if (prot_ret) {
3206				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3207						prot_ret);
3208				ret = illegal_condition_result;
3209				goto err_out_unlock;
3210			}
3211		}
3212
3213		ret = do_device_access(scp, sg_off, lba, num, true);
 
 
 
3214		if (unlikely(scsi_debug_lbp()))
3215			map_region(lba, num);
3216		if (unlikely(-1 == ret)) {
3217			ret = DID_ERROR << 16;
3218			goto err_out_unlock;
3219		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3220			sdev_printk(KERN_INFO, scp->device,
3221			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3222			    my_name, num_by, ret);
3223
3224		if (unlikely(sdebug_any_injecting_opt)) {
3225			struct sdebug_queued_cmd *sqcp =
3226				(struct sdebug_queued_cmd *)scp->host_scribble;
3227
3228			if (sqcp) {
3229				if (sqcp->inj_recovered) {
3230					mk_sense_buffer(scp, RECOVERED_ERROR,
3231							THRESHOLD_EXCEEDED, 0);
3232					ret = illegal_condition_result;
3233					goto err_out_unlock;
3234				} else if (sqcp->inj_dif) {
3235					/* Logical block guard check failed */
3236					mk_sense_buffer(scp, ABORTED_COMMAND,
3237							0x10, 1);
3238					ret = illegal_condition_result;
3239					goto err_out_unlock;
3240				} else if (sqcp->inj_dix) {
3241					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3242							0x10, 1);
3243					ret = illegal_condition_result;
3244					goto err_out_unlock;
3245				}
3246			}
3247		}
3248		sg_off += num_by;
3249		cum_lb += num;
3250	}
3251	ret = 0;
3252err_out_unlock:
3253	write_unlock_irqrestore(&atomic_rw, iflags);
3254err_out:
3255	kfree(lrdp);
3256	return ret;
3257}
3258
3259static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3260			   u32 ei_lba, bool unmap, bool ndob)
3261{
3262	unsigned long iflags;
 
3263	unsigned long long i;
 
 
3264	int ret;
3265	u64 lba_off;
3266
3267	ret = check_device_access_params(scp, lba, num);
3268	if (ret)
 
 
 
 
 
 
3269		return ret;
3270
3271	write_lock_irqsave(&atomic_rw, iflags);
3272
3273	if (unmap && scsi_debug_lbp()) {
3274		unmap_region(lba, num);
3275		goto out;
3276	}
3277
3278	lba_off = lba * sdebug_sector_size;
3279	/* if ndob then zero 1 logical block, else fetch 1 logical block */
 
 
3280	if (ndob) {
3281		memset(fake_storep + lba_off, 0, sdebug_sector_size);
3282		ret = 0;
3283	} else
3284		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3285					  sdebug_sector_size);
3286
3287	if (-1 == ret) {
3288		write_unlock_irqrestore(&atomic_rw, iflags);
3289		return DID_ERROR << 16;
3290	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3291		sdev_printk(KERN_INFO, scp->device,
3292			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3293			    my_name, "write same",
3294			    sdebug_sector_size, ret);
3295
3296	/* Copy first sector to remaining blocks */
3297	for (i = 1 ; i < num ; i++)
3298		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3299		       fake_storep + lba_off,
3300		       sdebug_sector_size);
3301
3302	if (scsi_debug_lbp())
3303		map_region(lba, num);
 
 
 
3304out:
3305	write_unlock_irqrestore(&atomic_rw, iflags);
3306
3307	return 0;
3308}
3309
3310static int resp_write_same_10(struct scsi_cmnd *scp,
3311			      struct sdebug_dev_info *devip)
3312{
3313	u8 *cmd = scp->cmnd;
3314	u32 lba;
3315	u16 num;
3316	u32 ei_lba = 0;
3317	bool unmap = false;
3318
3319	if (cmd[1] & 0x8) {
3320		if (sdebug_lbpws10 == 0) {
3321			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3322			return check_condition_result;
3323		} else
3324			unmap = true;
3325	}
3326	lba = get_unaligned_be32(cmd + 2);
3327	num = get_unaligned_be16(cmd + 7);
3328	if (num > sdebug_write_same_length) {
3329		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3330		return check_condition_result;
3331	}
3332	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3333}
3334
3335static int resp_write_same_16(struct scsi_cmnd *scp,
3336			      struct sdebug_dev_info *devip)
3337{
3338	u8 *cmd = scp->cmnd;
3339	u64 lba;
3340	u32 num;
3341	u32 ei_lba = 0;
3342	bool unmap = false;
3343	bool ndob = false;
3344
3345	if (cmd[1] & 0x8) {	/* UNMAP */
3346		if (sdebug_lbpws == 0) {
3347			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3348			return check_condition_result;
3349		} else
3350			unmap = true;
3351	}
3352	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3353		ndob = true;
3354	lba = get_unaligned_be64(cmd + 2);
3355	num = get_unaligned_be32(cmd + 10);
3356	if (num > sdebug_write_same_length) {
3357		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3358		return check_condition_result;
3359	}
3360	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3361}
3362
3363/* Note the mode field is in the same position as the (lower) service action
3364 * field. For the Report supported operation codes command, SPC-4 suggests
3365 * each mode of this command should be reported separately; for future. */
3366static int resp_write_buffer(struct scsi_cmnd *scp,
3367			     struct sdebug_dev_info *devip)
3368{
3369	u8 *cmd = scp->cmnd;
3370	struct scsi_device *sdp = scp->device;
3371	struct sdebug_dev_info *dp;
3372	u8 mode;
3373
3374	mode = cmd[1] & 0x1f;
3375	switch (mode) {
3376	case 0x4:	/* download microcode (MC) and activate (ACT) */
3377		/* set UAs on this device only */
3378		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3379		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3380		break;
3381	case 0x5:	/* download MC, save and ACT */
3382		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3383		break;
3384	case 0x6:	/* download MC with offsets and ACT */
3385		/* set UAs on most devices (LUs) in this target */
3386		list_for_each_entry(dp,
3387				    &devip->sdbg_host->dev_info_list,
3388				    dev_list)
3389			if (dp->target == sdp->id) {
3390				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3391				if (devip != dp)
3392					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3393						dp->uas_bm);
3394			}
3395		break;
3396	case 0x7:	/* download MC with offsets, save, and ACT */
3397		/* set UA on all devices (LUs) in this target */
3398		list_for_each_entry(dp,
3399				    &devip->sdbg_host->dev_info_list,
3400				    dev_list)
3401			if (dp->target == sdp->id)
3402				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3403					dp->uas_bm);
3404		break;
3405	default:
3406		/* do nothing for this command for other mode values */
3407		break;
3408	}
3409	return 0;
3410}
3411
3412static int resp_comp_write(struct scsi_cmnd *scp,
3413			   struct sdebug_dev_info *devip)
3414{
3415	u8 *cmd = scp->cmnd;
3416	u8 *arr;
3417	u8 *fake_storep_hold;
3418	u64 lba;
3419	u32 dnum;
3420	u32 lb_size = sdebug_sector_size;
3421	u8 num;
3422	unsigned long iflags;
3423	int ret;
3424	int retval = 0;
3425
3426	lba = get_unaligned_be64(cmd + 2);
3427	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3428	if (0 == num)
3429		return 0;	/* degenerate case, not an error */
3430	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3431	    (cmd[1] & 0xe0)) {
3432		mk_sense_invalid_opcode(scp);
3433		return check_condition_result;
3434	}
3435	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3436	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3437	    (cmd[1] & 0xe0) == 0)
3438		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3439			    "to DIF device\n");
3440
3441	/* inline check_device_access_params() */
3442	if (lba + num > sdebug_capacity) {
3443		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3444		return check_condition_result;
3445	}
3446	/* transfer length excessive (tie in to block limits VPD page) */
3447	if (num > sdebug_store_sectors) {
3448		/* needs work to find which cdb byte 'num' comes from */
3449		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3450		return check_condition_result;
3451	}
3452	dnum = 2 * num;
3453	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3454	if (NULL == arr) {
3455		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3456				INSUFF_RES_ASCQ);
3457		return check_condition_result;
3458	}
3459
3460	write_lock_irqsave(&atomic_rw, iflags);
3461
3462	/* trick do_device_access() to fetch both compare and write buffers
3463	 * from data-in into arr. Safe (atomic) since write_lock held. */
3464	fake_storep_hold = fake_storep;
3465	fake_storep = arr;
3466	ret = do_device_access(scp, 0, 0, dnum, true);
3467	fake_storep = fake_storep_hold;
3468	if (ret == -1) {
3469		retval = DID_ERROR << 16;
3470		goto cleanup;
3471	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3472		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3473			    "indicated=%u, IO sent=%d bytes\n", my_name,
3474			    dnum * lb_size, ret);
3475	if (!comp_write_worker(lba, num, arr)) {
3476		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3477		retval = check_condition_result;
3478		goto cleanup;
3479	}
3480	if (scsi_debug_lbp())
3481		map_region(lba, num);
3482cleanup:
3483	write_unlock_irqrestore(&atomic_rw, iflags);
3484	kfree(arr);
3485	return retval;
3486}
3487
3488struct unmap_block_desc {
3489	__be64	lba;
3490	__be32	blocks;
3491	__be32	__reserved;
3492};
3493
3494static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3495{
3496	unsigned char *buf;
3497	struct unmap_block_desc *desc;
 
3498	unsigned int i, payload_len, descriptors;
3499	int ret;
3500	unsigned long iflags;
3501
3502
3503	if (!scsi_debug_lbp())
3504		return 0;	/* fib and say its done */
3505	payload_len = get_unaligned_be16(scp->cmnd + 7);
3506	BUG_ON(scsi_bufflen(scp) != payload_len);
3507
3508	descriptors = (payload_len - 8) / 16;
3509	if (descriptors > sdebug_unmap_max_desc) {
3510		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3511		return check_condition_result;
3512	}
3513
3514	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3515	if (!buf) {
3516		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3517				INSUFF_RES_ASCQ);
3518		return check_condition_result;
3519	}
3520
3521	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3522
3523	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3524	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3525
3526	desc = (void *)&buf[8];
3527
3528	write_lock_irqsave(&atomic_rw, iflags);
3529
3530	for (i = 0 ; i < descriptors ; i++) {
3531		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3532		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3533
3534		ret = check_device_access_params(scp, lba, num);
3535		if (ret)
3536			goto out;
3537
3538		unmap_region(lba, num);
3539	}
3540
3541	ret = 0;
3542
3543out:
3544	write_unlock_irqrestore(&atomic_rw, iflags);
3545	kfree(buf);
3546
3547	return ret;
3548}
3549
3550#define SDEBUG_GET_LBA_STATUS_LEN 32
3551
3552static int resp_get_lba_status(struct scsi_cmnd *scp,
3553			       struct sdebug_dev_info *devip)
3554{
3555	u8 *cmd = scp->cmnd;
3556	u64 lba;
3557	u32 alloc_len, mapped, num;
 
3558	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3559	int ret;
3560
3561	lba = get_unaligned_be64(cmd + 2);
3562	alloc_len = get_unaligned_be32(cmd + 10);
3563
3564	if (alloc_len < 24)
3565		return 0;
3566
3567	ret = check_device_access_params(scp, lba, 1);
3568	if (ret)
3569		return ret;
3570
3571	if (scsi_debug_lbp())
3572		mapped = map_state(lba, &num);
3573	else {
 
 
3574		mapped = 1;
3575		/* following just in case virtual_gb changed */
3576		sdebug_capacity = get_sdebug_capacity();
3577		if (sdebug_capacity - lba <= 0xffffffff)
3578			num = sdebug_capacity - lba;
3579		else
3580			num = 0xffffffff;
3581	}
3582
3583	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3584	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3585	put_unaligned_be64(lba, arr + 8);	/* LBA */
3586	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3587	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3588
3589	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3590}
3591
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3592static int resp_sync_cache(struct scsi_cmnd *scp,
3593			   struct sdebug_dev_info *devip)
3594{
3595	int res = 0;
3596	u64 lba;
3597	u32 num_blocks;
3598	u8 *cmd = scp->cmnd;
3599
3600	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
3601		lba = get_unaligned_be32(cmd + 2);
3602		num_blocks = get_unaligned_be16(cmd + 7);
3603	} else {				/* SYNCHRONIZE_CACHE(16) */
3604		lba = get_unaligned_be64(cmd + 2);
3605		num_blocks = get_unaligned_be32(cmd + 10);
3606	}
3607	if (lba + num_blocks > sdebug_capacity) {
3608		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3609		return check_condition_result;
3610	}
3611	if (!write_since_sync || cmd[1] & 0x2)
3612		res = SDEG_RES_IMMED_MASK;
3613	else		/* delay if write_since_sync and IMMED clear */
3614		write_since_sync = false;
3615	return res;
3616}
3617
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3618#define RL_BUCKET_ELEMS 8
3619
3620/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3621 * (W-LUN), the normal Linux scanning logic does not associate it with a
3622 * device (e.g. /dev/sg7). The following magic will make that association:
3623 *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3624 * where <n> is a host number. If there are multiple targets in a host then
3625 * the above will associate a W-LUN to each target. To only get a W-LUN
3626 * for target 2, then use "echo '- 2 49409' > scan" .
3627 */
3628static int resp_report_luns(struct scsi_cmnd *scp,
3629			    struct sdebug_dev_info *devip)
3630{
3631	unsigned char *cmd = scp->cmnd;
3632	unsigned int alloc_len;
3633	unsigned char select_report;
3634	u64 lun;
3635	struct scsi_lun *lun_p;
3636	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3637	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3638	unsigned int wlun_cnt;	/* report luns W-LUN count */
3639	unsigned int tlun_cnt;	/* total LUN count */
3640	unsigned int rlen;	/* response length (in bytes) */
3641	int k, j, n, res;
3642	unsigned int off_rsp = 0;
3643	const int sz_lun = sizeof(struct scsi_lun);
3644
3645	clear_luns_changed_on_target(devip);
3646
3647	select_report = cmd[2];
3648	alloc_len = get_unaligned_be32(cmd + 6);
3649
3650	if (alloc_len < 4) {
3651		pr_err("alloc len too small %d\n", alloc_len);
3652		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3653		return check_condition_result;
3654	}
3655
3656	switch (select_report) {
3657	case 0:		/* all LUNs apart from W-LUNs */
3658		lun_cnt = sdebug_max_luns;
3659		wlun_cnt = 0;
3660		break;
3661	case 1:		/* only W-LUNs */
3662		lun_cnt = 0;
3663		wlun_cnt = 1;
3664		break;
3665	case 2:		/* all LUNs */
3666		lun_cnt = sdebug_max_luns;
3667		wlun_cnt = 1;
3668		break;
3669	case 0x10:	/* only administrative LUs */
3670	case 0x11:	/* see SPC-5 */
3671	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3672	default:
3673		pr_debug("select report invalid %d\n", select_report);
3674		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3675		return check_condition_result;
3676	}
3677
3678	if (sdebug_no_lun_0 && (lun_cnt > 0))
3679		--lun_cnt;
3680
3681	tlun_cnt = lun_cnt + wlun_cnt;
3682	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3683	scsi_set_resid(scp, scsi_bufflen(scp));
3684	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3685		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3686
3687	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3688	lun = sdebug_no_lun_0 ? 1 : 0;
3689	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3690		memset(arr, 0, sizeof(arr));
3691		lun_p = (struct scsi_lun *)&arr[0];
3692		if (k == 0) {
3693			put_unaligned_be32(rlen, &arr[0]);
3694			++lun_p;
3695			j = 1;
3696		}
3697		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3698			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3699				break;
3700			int_to_scsilun(lun++, lun_p);
 
 
3701		}
3702		if (j < RL_BUCKET_ELEMS)
3703			break;
3704		n = j * sz_lun;
3705		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3706		if (res)
3707			return res;
3708		off_rsp += n;
3709	}
3710	if (wlun_cnt) {
3711		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3712		++j;
3713	}
3714	if (j > 0)
3715		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3716	return res;
3717}
3718
3719static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3720			    unsigned int num, struct sdebug_dev_info *devip)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3721{
3722	int j;
3723	unsigned char *kaddr, *buf;
3724	unsigned int offset;
3725	struct scsi_data_buffer *sdb = scsi_in(scp);
3726	struct sg_mapping_iter miter;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3727
3728	/* better not to use temporary buffer. */
3729	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3730	if (!buf) {
3731		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3732				INSUFF_RES_ASCQ);
3733		return check_condition_result;
3734	}
3735
3736	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3737
3738	offset = 0;
3739	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3740			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3741
3742	while (sg_miter_next(&miter)) {
3743		kaddr = miter.addr;
3744		for (j = 0; j < miter.length; j++)
3745			*(kaddr + j) ^= *(buf + offset + j);
3746
3747		offset += miter.length;
 
 
 
 
 
 
 
3748	}
3749	sg_miter_stop(&miter);
3750	kfree(buf);
 
 
 
3751
3752	return 0;
 
3753}
3754
3755static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3756			       struct sdebug_dev_info *devip)
3757{
 
 
 
3758	u8 *cmd = scp->cmnd;
3759	u64 lba;
3760	u32 num;
3761	int errsts;
3762
3763	if (!scsi_bidi_cmnd(scp)) {
3764		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3765				INSUFF_RES_ASCQ);
3766		return check_condition_result;
3767	}
3768	errsts = resp_read_dt0(scp, devip);
3769	if (errsts)
3770		return errsts;
3771	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3772		errsts = resp_write_dt0(scp, devip);
3773		if (errsts)
3774			return errsts;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3775	}
3776	lba = get_unaligned_be32(cmd + 2);
3777	num = get_unaligned_be16(cmd + 7);
3778	return resp_xdwriteread(scp, lba, num, devip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3779}
3780
3781static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3782{
3783	u32 tag = blk_mq_unique_tag(cmnd->request);
3784	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3785
3786	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3787	if (WARN_ON_ONCE(hwq >= submit_queues))
3788		hwq = 0;
3789	return sdebug_q_arr + hwq;
3790}
3791
3792/* Queued (deferred) command completions converge here. */
3793static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3794{
3795	int qc_idx;
3796	int retiring = 0;
3797	unsigned long iflags;
3798	struct sdebug_queue *sqp;
3799	struct sdebug_queued_cmd *sqcp;
3800	struct scsi_cmnd *scp;
3801	struct sdebug_dev_info *devip;
3802
3803	sd_dp->defer_t = SDEB_DEFER_NONE;
3804	qc_idx = sd_dp->qc_idx;
3805	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3806	if (sdebug_statistics) {
3807		atomic_inc(&sdebug_completions);
3808		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3809			atomic_inc(&sdebug_miss_cpus);
3810	}
3811	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3812		pr_err("wild qc_idx=%d\n", qc_idx);
3813		return;
 
3814	}
3815	spin_lock_irqsave(&sqp->qc_lock, iflags);
3816	sqcp = &sqp->qc_arr[qc_idx];
3817	scp = sqcp->a_cmnd;
3818	if (unlikely(scp == NULL)) {
3819		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3820		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3821		       sd_dp->sqa_idx, qc_idx);
3822		return;
3823	}
3824	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3825	if (likely(devip))
3826		atomic_dec(&devip->num_in_q);
3827	else
3828		pr_err("devip=NULL\n");
3829	if (unlikely(atomic_read(&retired_max_queue) > 0))
3830		retiring = 1;
3831
3832	sqcp->a_cmnd = NULL;
3833	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3834		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3835		pr_err("Unexpected completion\n");
3836		return;
3837	}
3838
3839	if (unlikely(retiring)) {	/* user has reduced max_queue */
3840		int k, retval;
3841
3842		retval = atomic_read(&retired_max_queue);
3843		if (qc_idx >= retval) {
3844			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3845			pr_err("index %d too large\n", retval);
3846			return;
3847		}
3848		k = find_last_bit(sqp->in_use_bm, retval);
3849		if ((k < sdebug_max_queue) || (k == retval))
3850			atomic_set(&retired_max_queue, 0);
3851		else
3852			atomic_set(&retired_max_queue, k + 1);
3853	}
3854	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3855	scp->scsi_done(scp); /* callback to mid level */
3856}
3857
3858/* When high resolution timer goes off this function is called. */
3859static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3860{
3861	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3862						  hrt);
3863	sdebug_q_cmd_complete(sd_dp);
3864	return HRTIMER_NORESTART;
3865}
3866
3867/* When work queue schedules work, it calls this function. */
3868static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3869{
3870	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3871						  ew.work);
3872	sdebug_q_cmd_complete(sd_dp);
3873}
3874
3875static bool got_shared_uuid;
3876static uuid_t shared_uuid;
3877
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3878static struct sdebug_dev_info *sdebug_device_create(
3879			struct sdebug_host_info *sdbg_host, gfp_t flags)
3880{
3881	struct sdebug_dev_info *devip;
3882
3883	devip = kzalloc(sizeof(*devip), flags);
3884	if (devip) {
3885		if (sdebug_uuid_ctl == 1)
3886			uuid_gen(&devip->lu_name);
3887		else if (sdebug_uuid_ctl == 2) {
3888			if (got_shared_uuid)
3889				devip->lu_name = shared_uuid;
3890			else {
3891				uuid_gen(&shared_uuid);
3892				got_shared_uuid = true;
3893				devip->lu_name = shared_uuid;
3894			}
3895		}
3896		devip->sdbg_host = sdbg_host;
 
 
 
 
 
 
 
 
 
 
 
 
 
3897		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3898	}
3899	return devip;
3900}
3901
3902static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3903{
3904	struct sdebug_host_info *sdbg_host;
3905	struct sdebug_dev_info *open_devip = NULL;
3906	struct sdebug_dev_info *devip;
3907
3908	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3909	if (!sdbg_host) {
3910		pr_err("Host info NULL\n");
3911		return NULL;
3912	}
3913	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3914		if ((devip->used) && (devip->channel == sdev->channel) &&
3915		    (devip->target == sdev->id) &&
3916		    (devip->lun == sdev->lun))
3917			return devip;
3918		else {
3919			if ((!devip->used) && (!open_devip))
3920				open_devip = devip;
3921		}
3922	}
3923	if (!open_devip) { /* try and make a new one */
3924		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3925		if (!open_devip) {
3926			pr_err("out of memory at line %d\n", __LINE__);
3927			return NULL;
3928		}
3929	}
3930
3931	open_devip->channel = sdev->channel;
3932	open_devip->target = sdev->id;
3933	open_devip->lun = sdev->lun;
3934	open_devip->sdbg_host = sdbg_host;
3935	atomic_set(&open_devip->num_in_q, 0);
3936	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3937	open_devip->used = true;
3938	return open_devip;
3939}
3940
3941static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3942{
3943	if (sdebug_verbose)
3944		pr_info("slave_alloc <%u %u %u %llu>\n",
3945		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3946	blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
3947	return 0;
3948}
3949
3950static int scsi_debug_slave_configure(struct scsi_device *sdp)
3951{
3952	struct sdebug_dev_info *devip =
3953			(struct sdebug_dev_info *)sdp->hostdata;
 
3954
3955	if (sdebug_verbose)
3956		pr_info("slave_configure <%u %u %u %llu>\n",
3957		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3958	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3959		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3960	if (devip == NULL) {
3961		devip = find_build_dev_info(sdp);
3962		if (devip == NULL)
3963			return 1;  /* no resources, will be marked offline */
3964	}
3965	sdp->hostdata = devip;
3966	blk_queue_max_segment_size(sdp->request_queue, -1U);
3967	if (sdebug_no_uld)
3968		sdp->no_uld_attach = 1;
3969	config_cdb_len(sdp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3970	return 0;
3971}
3972
3973static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3974{
3975	struct sdebug_dev_info *devip =
3976		(struct sdebug_dev_info *)sdp->hostdata;
 
3977
3978	if (sdebug_verbose)
3979		pr_info("slave_destroy <%u %u %u %llu>\n",
3980		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3981	if (devip) {
3982		/* make this slot available for re-use */
3983		devip->used = false;
3984		sdp->hostdata = NULL;
 
 
 
 
3985	}
 
 
 
 
 
 
 
3986}
3987
3988static void stop_qc_helper(struct sdebug_defer *sd_dp,
 
3989			   enum sdeb_defer_type defer_t)
3990{
3991	if (!sd_dp)
3992		return;
3993	if (defer_t == SDEB_DEFER_HRT)
3994		hrtimer_cancel(&sd_dp->hrt);
3995	else if (defer_t == SDEB_DEFER_WQ)
3996		cancel_work_sync(&sd_dp->ew.work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3997}
3998
3999/* If @cmnd found deletes its timer or work queue and returns true; else
4000   returns false */
4001static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
4002{
4003	unsigned long iflags;
4004	int j, k, qmax, r_qmax;
4005	enum sdeb_defer_type l_defer_t;
4006	struct sdebug_queue *sqp;
4007	struct sdebug_queued_cmd *sqcp;
4008	struct sdebug_dev_info *devip;
4009	struct sdebug_defer *sd_dp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4010
4011	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4012		spin_lock_irqsave(&sqp->qc_lock, iflags);
4013		qmax = sdebug_max_queue;
4014		r_qmax = atomic_read(&retired_max_queue);
4015		if (r_qmax > qmax)
4016			qmax = r_qmax;
4017		for (k = 0; k < qmax; ++k) {
4018			if (test_bit(k, sqp->in_use_bm)) {
4019				sqcp = &sqp->qc_arr[k];
4020				if (cmnd != sqcp->a_cmnd)
4021					continue;
4022				/* found */
4023				devip = (struct sdebug_dev_info *)
4024						cmnd->device->hostdata;
4025				if (devip)
4026					atomic_dec(&devip->num_in_q);
4027				sqcp->a_cmnd = NULL;
4028				sd_dp = sqcp->sd_dp;
4029				if (sd_dp) {
4030					l_defer_t = sd_dp->defer_t;
4031					sd_dp->defer_t = SDEB_DEFER_NONE;
4032				} else
4033					l_defer_t = SDEB_DEFER_NONE;
4034				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4035				stop_qc_helper(sd_dp, l_defer_t);
4036				clear_bit(k, sqp->in_use_bm);
4037				return true;
4038			}
4039		}
4040		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4041	}
4042	return false;
4043}
4044
4045/* Deletes (stops) timers or work queues of all queued commands */
4046static void stop_all_queued(void)
4047{
4048	unsigned long iflags;
4049	int j, k;
4050	enum sdeb_defer_type l_defer_t;
4051	struct sdebug_queue *sqp;
4052	struct sdebug_queued_cmd *sqcp;
4053	struct sdebug_dev_info *devip;
4054	struct sdebug_defer *sd_dp;
4055
4056	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4057		spin_lock_irqsave(&sqp->qc_lock, iflags);
4058		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4059			if (test_bit(k, sqp->in_use_bm)) {
4060				sqcp = &sqp->qc_arr[k];
4061				if (sqcp->a_cmnd == NULL)
4062					continue;
4063				devip = (struct sdebug_dev_info *)
4064					sqcp->a_cmnd->device->hostdata;
4065				if (devip)
4066					atomic_dec(&devip->num_in_q);
4067				sqcp->a_cmnd = NULL;
4068				sd_dp = sqcp->sd_dp;
4069				if (sd_dp) {
4070					l_defer_t = sd_dp->defer_t;
4071					sd_dp->defer_t = SDEB_DEFER_NONE;
4072				} else
4073					l_defer_t = SDEB_DEFER_NONE;
4074				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4075				stop_qc_helper(sd_dp, l_defer_t);
4076				clear_bit(k, sqp->in_use_bm);
4077				spin_lock_irqsave(&sqp->qc_lock, iflags);
4078			}
4079		}
4080		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4081	}
 
4082}
4083
4084/* Free queued command memory on heap */
4085static void free_all_queued(void)
4086{
4087	int j, k;
4088	struct sdebug_queue *sqp;
4089	struct sdebug_queued_cmd *sqcp;
 
 
 
 
 
 
 
 
 
 
 
 
 
4090
4091	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4092		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4093			sqcp = &sqp->qc_arr[k];
4094			kfree(sqcp->sd_dp);
4095			sqcp->sd_dp = NULL;
4096		}
4097	}
 
 
 
4098}
4099
4100static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4101{
4102	bool ok;
 
 
4103
4104	++num_aborts;
4105	if (SCpnt) {
4106		ok = stop_queued_cmnd(SCpnt);
4107		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4108			sdev_printk(KERN_INFO, SCpnt->device,
4109				    "%s: command%s found\n", __func__,
4110				    ok ? "" : " not");
 
 
 
 
4111	}
 
4112	return SUCCESS;
4113}
4114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4115static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4116{
 
 
 
 
 
4117	++num_dev_resets;
4118	if (SCpnt && SCpnt->device) {
4119		struct scsi_device *sdp = SCpnt->device;
4120		struct sdebug_dev_info *devip =
4121				(struct sdebug_dev_info *)sdp->hostdata;
4122
4123		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4124			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4125		if (devip)
4126			set_bit(SDEBUG_UA_POR, devip->uas_bm);
 
 
4127	}
 
4128	return SUCCESS;
4129}
4130
 
 
 
 
 
 
 
 
 
 
 
 
4131static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4132{
4133	struct sdebug_host_info *sdbg_host;
 
4134	struct sdebug_dev_info *devip;
4135	struct scsi_device *sdp;
4136	struct Scsi_Host *hp;
4137	int k = 0;
4138
4139	++num_target_resets;
4140	if (!SCpnt)
4141		goto lie;
4142	sdp = SCpnt->device;
4143	if (!sdp)
4144		goto lie;
4145	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4146		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4147	hp = sdp->host;
4148	if (!hp)
4149		goto lie;
4150	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4151	if (sdbg_host) {
4152		list_for_each_entry(devip,
4153				    &sdbg_host->dev_info_list,
4154				    dev_list)
4155			if (devip->target == sdp->id) {
4156				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4157				++k;
4158			}
4159	}
 
4160	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4161		sdev_printk(KERN_INFO, sdp,
4162			    "%s: %d device(s) found in target\n", __func__, k);
4163lie:
 
 
 
 
 
 
4164	return SUCCESS;
4165}
4166
4167static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4168{
4169	struct sdebug_host_info *sdbg_host;
 
4170	struct sdebug_dev_info *devip;
4171	struct scsi_device *sdp;
4172	struct Scsi_Host *hp;
4173	int k = 0;
4174
4175	++num_bus_resets;
4176	if (!(SCpnt && SCpnt->device))
4177		goto lie;
4178	sdp = SCpnt->device;
4179	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4180		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4181	hp = sdp->host;
4182	if (hp) {
4183		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4184		if (sdbg_host) {
4185			list_for_each_entry(devip,
4186					    &sdbg_host->dev_info_list,
4187					    dev_list) {
4188				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4189				++k;
4190			}
4191		}
4192	}
 
4193	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4194		sdev_printk(KERN_INFO, sdp,
4195			    "%s: %d device(s) found in host\n", __func__, k);
4196lie:
4197	return SUCCESS;
4198}
4199
4200static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4201{
4202	struct sdebug_host_info *sdbg_host;
4203	struct sdebug_dev_info *devip;
4204	int k = 0;
4205
4206	++num_host_resets;
4207	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4208		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4209	spin_lock(&sdebug_host_list_lock);
4210	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4211		list_for_each_entry(devip, &sdbg_host->dev_info_list,
4212				    dev_list) {
4213			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4214			++k;
4215		}
4216	}
4217	spin_unlock(&sdebug_host_list_lock);
4218	stop_all_queued();
4219	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4220		sdev_printk(KERN_INFO, SCpnt->device,
4221			    "%s: %d device(s) found\n", __func__, k);
4222	return SUCCESS;
4223}
4224
4225static void __init sdebug_build_parts(unsigned char *ramp,
4226				      unsigned long store_size)
4227{
4228	struct partition *pp;
4229	int starts[SDEBUG_MAX_PARTS + 2];
4230	int sectors_per_part, num_sectors, k;
4231	int heads_by_sects, start_sec, end_sec;
4232
4233	/* assume partition table already zeroed */
4234	if ((sdebug_num_parts < 1) || (store_size < 1048576))
4235		return;
4236	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4237		sdebug_num_parts = SDEBUG_MAX_PARTS;
4238		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4239	}
4240	num_sectors = (int)sdebug_store_sectors;
4241	sectors_per_part = (num_sectors - sdebug_sectors_per)
4242			   / sdebug_num_parts;
4243	heads_by_sects = sdebug_heads * sdebug_sectors_per;
4244	starts[0] = sdebug_sectors_per;
4245	for (k = 1; k < sdebug_num_parts; ++k)
 
4246		starts[k] = ((k * sectors_per_part) / heads_by_sects)
4247			    * heads_by_sects;
 
 
 
4248	starts[sdebug_num_parts] = num_sectors;
4249	starts[sdebug_num_parts + 1] = 0;
4250
4251	ramp[510] = 0x55;	/* magic partition markings */
4252	ramp[511] = 0xAA;
4253	pp = (struct partition *)(ramp + 0x1be);
4254	for (k = 0; starts[k + 1]; ++k, ++pp) {
4255		start_sec = starts[k];
4256		end_sec = starts[k + 1] - 1;
4257		pp->boot_ind = 0;
4258
4259		pp->cyl = start_sec / heads_by_sects;
4260		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4261			   / sdebug_sectors_per;
4262		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4263
4264		pp->end_cyl = end_sec / heads_by_sects;
4265		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4266			       / sdebug_sectors_per;
4267		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4268
4269		pp->start_sect = cpu_to_le32(start_sec);
4270		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4271		pp->sys_ind = 0x83;	/* plain Linux partition */
4272	}
4273}
4274
4275static void block_unblock_all_queues(bool block)
4276{
4277	int j;
4278	struct sdebug_queue *sqp;
 
 
 
 
4279
4280	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4281		atomic_set(&sqp->blocked, (int)block);
 
 
 
4282}
4283
4284/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4285 * commands will be processed normally before triggers occur.
4286 */
4287static void tweak_cmnd_count(void)
4288{
4289	int count, modulo;
4290
4291	modulo = abs(sdebug_every_nth);
4292	if (modulo < 2)
4293		return;
 
 
4294	block_unblock_all_queues(true);
4295	count = atomic_read(&sdebug_cmnd_count);
4296	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4297	block_unblock_all_queues(false);
 
4298}
4299
4300static void clear_queue_stats(void)
4301{
4302	atomic_set(&sdebug_cmnd_count, 0);
4303	atomic_set(&sdebug_completions, 0);
4304	atomic_set(&sdebug_miss_cpus, 0);
4305	atomic_set(&sdebug_a_tsf, 0);
4306}
4307
4308static void setup_inject(struct sdebug_queue *sqp,
4309			 struct sdebug_queued_cmd *sqcp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4310{
4311	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4312		if (sdebug_every_nth > 0)
4313			sqcp->inj_recovered = sqcp->inj_transport
4314				= sqcp->inj_dif
4315				= sqcp->inj_dix = sqcp->inj_short = 0;
4316		return;
4317	}
4318	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4319	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4320	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4321	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4322	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4323	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
 
 
 
4324}
4325
4326/* Complete the processing of the thread that queued a SCSI command to this
4327 * driver. It either completes the command by calling cmnd_done() or
4328 * schedules a hr timer or work queue then returns 0. Returns
4329 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4330 */
4331static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4332			 int scsi_result,
4333			 int (*pfp)(struct scsi_cmnd *,
4334				    struct sdebug_dev_info *),
4335			 int delta_jiff, int ndelay)
4336{
4337	unsigned long iflags;
4338	int k, num_in_q, qdepth, inject;
4339	struct sdebug_queue *sqp;
 
 
4340	struct sdebug_queued_cmd *sqcp;
4341	struct scsi_device *sdp;
4342	struct sdebug_defer *sd_dp;
4343
4344	if (unlikely(devip == NULL)) {
4345		if (scsi_result == 0)
4346			scsi_result = DID_NO_CONNECT << 16;
4347		goto respond_in_thread;
4348	}
4349	sdp = cmnd->device;
4350
4351	if (delta_jiff == 0)
4352		goto respond_in_thread;
4353
4354	/* schedule the response at a later time if resources permit */
4355	sqp = get_queue(cmnd);
4356	spin_lock_irqsave(&sqp->qc_lock, iflags);
4357	if (unlikely(atomic_read(&sqp->blocked))) {
4358		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4359		return SCSI_MLQUEUE_HOST_BUSY;
4360	}
4361	num_in_q = atomic_read(&devip->num_in_q);
4362	qdepth = cmnd->device->queue_depth;
4363	inject = 0;
4364	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4365		if (scsi_result) {
4366			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4367			goto respond_in_thread;
4368		} else
4369			scsi_result = device_qfull_result;
4370	} else if (unlikely(sdebug_every_nth &&
4371			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4372			    (scsi_result == 0))) {
4373		if ((num_in_q == (qdepth - 1)) &&
4374		    (atomic_inc_return(&sdebug_a_tsf) >=
4375		     abs(sdebug_every_nth))) {
4376			atomic_set(&sdebug_a_tsf, 0);
4377			inject = 1;
4378			scsi_result = device_qfull_result;
 
 
 
 
4379		}
4380	}
4381
4382	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4383	if (unlikely(k >= sdebug_max_queue)) {
4384		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4385		if (scsi_result)
4386			goto respond_in_thread;
4387		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4388			scsi_result = device_qfull_result;
4389		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4390			sdev_printk(KERN_INFO, sdp,
4391				    "%s: max_queue=%d exceeded, %s\n",
4392				    __func__, sdebug_max_queue,
4393				    (scsi_result ?  "status: TASK SET FULL" :
4394						    "report: host busy"));
4395		if (scsi_result)
4396			goto respond_in_thread;
4397		else
4398			return SCSI_MLQUEUE_HOST_BUSY;
4399	}
4400	__set_bit(k, sqp->in_use_bm);
4401	atomic_inc(&devip->num_in_q);
4402	sqcp = &sqp->qc_arr[k];
4403	sqcp->a_cmnd = cmnd;
4404	cmnd->host_scribble = (unsigned char *)sqcp;
4405	sd_dp = sqcp->sd_dp;
4406	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4407	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4408		setup_inject(sqp, sqcp);
4409	if (sd_dp == NULL) {
4410		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4411		if (sd_dp == NULL)
4412			return SCSI_MLQUEUE_HOST_BUSY;
4413	}
 
 
 
 
4414
4415	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
 
4416	if (cmnd->result & SDEG_RES_IMMED_MASK) {
4417		/*
4418		 * This is the F_DELAY_OVERR case. No delay.
4419		 */
4420		cmnd->result &= ~SDEG_RES_IMMED_MASK;
4421		delta_jiff = ndelay = 0;
4422	}
4423	if (cmnd->result == 0 && scsi_result != 0)
4424		cmnd->result = scsi_result;
 
 
 
 
 
 
 
4425
4426	if (unlikely(sdebug_verbose && cmnd->result))
4427		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4428			    __func__, cmnd->result);
4429
4430	if (delta_jiff > 0 || ndelay > 0) {
4431		ktime_t kt;
4432
4433		if (delta_jiff > 0) {
4434			kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4435		} else
4436			kt = ndelay;
4437		if (!sd_dp->init_hrt) {
4438			sd_dp->init_hrt = true;
4439			sqcp->sd_dp = sd_dp;
4440			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4441				     HRTIMER_MODE_REL_PINNED);
4442			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4443			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4444			sd_dp->qc_idx = k;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4445		}
4446		if (sdebug_statistics)
4447			sd_dp->issuing_cpu = raw_smp_processor_id();
4448		sd_dp->defer_t = SDEB_DEFER_HRT;
4449		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4450	} else {	/* jdelay < 0, use work queue */
4451		if (!sd_dp->init_wq) {
4452			sd_dp->init_wq = true;
4453			sqcp->sd_dp = sd_dp;
4454			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4455			sd_dp->qc_idx = k;
4456			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4457		}
 
4458		if (sdebug_statistics)
4459			sd_dp->issuing_cpu = raw_smp_processor_id();
4460		sd_dp->defer_t = SDEB_DEFER_WQ;
4461		schedule_work(&sd_dp->ew.work);
 
 
 
 
 
 
 
 
 
 
 
4462	}
4463	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4464		     (scsi_result == device_qfull_result)))
4465		sdev_printk(KERN_INFO, sdp,
4466			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4467			    num_in_q, (inject ? "<inject> " : ""),
4468			    "status: TASK SET FULL");
4469	return 0;
4470
4471respond_in_thread:	/* call back to mid-layer using invocation thread */
4472	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4473	cmnd->result &= ~SDEG_RES_IMMED_MASK;
4474	if (cmnd->result == 0 && scsi_result != 0)
4475		cmnd->result = scsi_result;
4476	cmnd->scsi_done(cmnd);
4477	return 0;
4478}
4479
4480/* Note: The following macros create attribute files in the
4481   /sys/module/scsi_debug/parameters directory. Unfortunately this
4482   driver is unaware of a change and cannot trigger auxiliary actions
4483   as it can when the corresponding attribute in the
4484   /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4485 */
4486module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4487module_param_named(ato, sdebug_ato, int, S_IRUGO);
4488module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4489module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4490module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4491module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4492module_param_named(dif, sdebug_dif, int, S_IRUGO);
4493module_param_named(dix, sdebug_dix, int, S_IRUGO);
4494module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4495module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4496module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4497module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4498module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4499module_param_string(inq_vendor, sdebug_inq_vendor_id,
4500		    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4501module_param_string(inq_product, sdebug_inq_product_id,
4502		    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4503module_param_string(inq_rev, sdebug_inq_product_rev,
4504		    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
 
 
 
4505module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4506module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4507module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4508module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4509module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
 
4510module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4511module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4512module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4513module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
 
 
4514module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4515module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
 
4516module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4517module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4518module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4519module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
 
4520module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
 
 
4521module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4522module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4523module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
 
4524module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4525module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4526module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4527module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4528module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4529module_param_named(submit_queues, submit_queues, int, S_IRUGO);
 
 
4530module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4531module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4532module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4533module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
 
4534module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4535module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4536module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4537		   S_IRUGO | S_IWUSR);
 
4538module_param_named(write_same_length, sdebug_write_same_length, int,
4539		   S_IRUGO | S_IWUSR);
 
 
 
 
 
 
4540
4541MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4542MODULE_DESCRIPTION("SCSI debug adapter driver");
4543MODULE_LICENSE("GPL");
4544MODULE_VERSION(SDEBUG_VERSION);
4545
4546MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4547MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4548MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4549MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4550MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4551MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4552MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4553MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4554MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4555MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4556MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4557MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4558MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4559MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
 
4560MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4561MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4562		 SDEBUG_VERSION "\")");
 
 
 
4563MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4564MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4565MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4566MODULE_PARM_DESC(lbprz,
4567	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4568MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
 
4569MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4570MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
 
4571MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4572MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4573MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4574MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
 
4575MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4576MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4577MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4578MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
 
4579MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
 
4580MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4581MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4582MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
 
4583MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4584MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4585MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4586MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4587MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4588MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
 
4589MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4590MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4591MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4592MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4593MODULE_PARM_DESC(uuid_ctl,
4594		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4595MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4596MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
 
4597MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
 
 
 
 
 
 
4598
4599#define SDEBUG_INFO_LEN 256
4600static char sdebug_info[SDEBUG_INFO_LEN];
4601
4602static const char *scsi_debug_info(struct Scsi_Host *shp)
4603{
4604	int k;
4605
4606	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4607		      my_name, SDEBUG_VERSION, sdebug_version_date);
4608	if (k >= (SDEBUG_INFO_LEN - 1))
4609		return sdebug_info;
4610	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4611		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4612		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4613		  "statistics", (int)sdebug_statistics);
4614	return sdebug_info;
4615}
4616
4617/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4618static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4619				 int length)
4620{
4621	char arr[16];
4622	int opts;
4623	int minLen = length > 15 ? 15 : length;
4624
4625	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4626		return -EACCES;
4627	memcpy(arr, buffer, minLen);
4628	arr[minLen] = '\0';
4629	if (1 != sscanf(arr, "%d", &opts))
4630		return -EINVAL;
4631	sdebug_opts = opts;
4632	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4633	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4634	if (sdebug_every_nth != 0)
4635		tweak_cmnd_count();
4636	return length;
4637}
4638
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4639/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4640 * same for each scsi_debug host (if more than one). Some of the counters
4641 * output are not atomics so might be inaccurate in a busy system. */
4642static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4643{
4644	int f, j, l;
4645	struct sdebug_queue *sqp;
4646
4647	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4648		   SDEBUG_VERSION, sdebug_version_date);
4649	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4650		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4651		   sdebug_opts, sdebug_every_nth);
4652	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4653		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4654		   sdebug_sector_size, "bytes");
4655	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4656		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4657		   num_aborts);
4658	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4659		   num_dev_resets, num_target_resets, num_bus_resets,
4660		   num_host_resets);
4661	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4662		   dix_reads, dix_writes, dif_errors);
4663	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4664		   sdebug_statistics);
4665	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4666		   atomic_read(&sdebug_cmnd_count),
4667		   atomic_read(&sdebug_completions),
4668		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4669		   atomic_read(&sdebug_a_tsf));
 
4670
4671	seq_printf(m, "submit_queues=%d\n", submit_queues);
4672	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
 
 
 
 
 
 
4673		seq_printf(m, "  queue %d:\n", j);
4674		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4675		if (f != sdebug_max_queue) {
4676			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4677			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4678				   "first,last bits", f, l);
4679		}
4680	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4681	return 0;
4682}
4683
4684static ssize_t delay_show(struct device_driver *ddp, char *buf)
4685{
4686	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4687}
4688/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4689 * of delay is jiffies.
4690 */
4691static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4692			   size_t count)
4693{
4694	int jdelay, res;
4695
4696	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4697		res = count;
4698		if (sdebug_jdelay != jdelay) {
4699			int j, k;
4700			struct sdebug_queue *sqp;
4701
 
4702			block_unblock_all_queues(true);
4703			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4704			     ++j, ++sqp) {
4705				k = find_first_bit(sqp->in_use_bm,
4706						   sdebug_max_queue);
4707				if (k != sdebug_max_queue) {
4708					res = -EBUSY;   /* queued commands */
4709					break;
4710				}
4711			}
4712			if (res > 0) {
4713				sdebug_jdelay = jdelay;
4714				sdebug_ndelay = 0;
4715			}
4716			block_unblock_all_queues(false);
 
4717		}
4718		return res;
4719	}
4720	return -EINVAL;
4721}
4722static DRIVER_ATTR_RW(delay);
4723
4724static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4725{
4726	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4727}
4728/* Returns -EBUSY if ndelay is being changed and commands are queued */
4729/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4730static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4731			    size_t count)
4732{
4733	int ndelay, res;
4734
4735	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4736	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4737		res = count;
4738		if (sdebug_ndelay != ndelay) {
4739			int j, k;
4740			struct sdebug_queue *sqp;
4741
 
4742			block_unblock_all_queues(true);
4743			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4744			     ++j, ++sqp) {
4745				k = find_first_bit(sqp->in_use_bm,
4746						   sdebug_max_queue);
4747				if (k != sdebug_max_queue) {
4748					res = -EBUSY;   /* queued commands */
4749					break;
4750				}
4751			}
 
4752			if (res > 0) {
4753				sdebug_ndelay = ndelay;
4754				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4755							: DEF_JDELAY;
4756			}
4757			block_unblock_all_queues(false);
 
4758		}
4759		return res;
4760	}
4761	return -EINVAL;
4762}
4763static DRIVER_ATTR_RW(ndelay);
4764
4765static ssize_t opts_show(struct device_driver *ddp, char *buf)
4766{
4767	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4768}
4769
4770static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4771			  size_t count)
4772{
4773	int opts;
4774	char work[20];
4775
4776	if (sscanf(buf, "%10s", work) == 1) {
4777		if (strncasecmp(work, "0x", 2) == 0) {
4778			if (kstrtoint(work + 2, 16, &opts) == 0)
4779				goto opts_done;
4780		} else {
4781			if (kstrtoint(work, 10, &opts) == 0)
4782				goto opts_done;
4783		}
4784	}
4785	return -EINVAL;
4786opts_done:
4787	sdebug_opts = opts;
4788	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4789	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4790	tweak_cmnd_count();
4791	return count;
4792}
4793static DRIVER_ATTR_RW(opts);
4794
4795static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4796{
4797	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4798}
4799static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4800			   size_t count)
4801{
4802	int n;
4803
 
 
 
 
4804	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
 
 
4805		sdebug_ptype = n;
4806		return count;
4807	}
4808	return -EINVAL;
4809}
4810static DRIVER_ATTR_RW(ptype);
4811
4812static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4813{
4814	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4815}
4816static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4817			    size_t count)
4818{
4819	int n;
4820
4821	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4822		sdebug_dsense = n;
4823		return count;
4824	}
4825	return -EINVAL;
4826}
4827static DRIVER_ATTR_RW(dsense);
4828
4829static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4830{
4831	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4832}
4833static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4834			     size_t count)
4835{
4836	int n;
4837
4838	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
 
 
 
4839		n = (n > 0);
4840		sdebug_fake_rw = (sdebug_fake_rw > 0);
4841		if (sdebug_fake_rw != n) {
4842			if ((0 == n) && (NULL == fake_storep)) {
4843				unsigned long sz =
4844					(unsigned long)sdebug_dev_size_mb *
4845					1048576;
4846
4847				fake_storep = vmalloc(sz);
4848				if (NULL == fake_storep) {
4849					pr_err("out of memory, 9\n");
4850					return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
4851				}
4852				memset(fake_storep, 0, sz);
4853			}
4854			sdebug_fake_rw = n;
 
 
4855		}
 
4856		return count;
4857	}
4858	return -EINVAL;
4859}
4860static DRIVER_ATTR_RW(fake_rw);
4861
4862static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4863{
4864	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4865}
4866static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4867			      size_t count)
4868{
4869	int n;
4870
4871	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4872		sdebug_no_lun_0 = n;
4873		return count;
4874	}
4875	return -EINVAL;
4876}
4877static DRIVER_ATTR_RW(no_lun_0);
4878
4879static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4880{
4881	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4882}
4883static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4884			      size_t count)
4885{
4886	int n;
4887
4888	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4889		sdebug_num_tgts = n;
4890		sdebug_max_tgts_luns();
4891		return count;
4892	}
4893	return -EINVAL;
4894}
4895static DRIVER_ATTR_RW(num_tgts);
4896
4897static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4898{
4899	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4900}
4901static DRIVER_ATTR_RO(dev_size_mb);
4902
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4903static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4904{
4905	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4906}
4907static DRIVER_ATTR_RO(num_parts);
4908
4909static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4910{
4911	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4912}
4913static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4914			       size_t count)
4915{
4916	int nth;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4917
4918	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4919		sdebug_every_nth = nth;
4920		if (nth && !sdebug_statistics) {
4921			pr_info("every_nth needs statistics=1, set it\n");
4922			sdebug_statistics = true;
 
 
4923		}
4924		tweak_cmnd_count();
4925		return count;
4926	}
4927	return -EINVAL;
4928}
4929static DRIVER_ATTR_RW(every_nth);
4930
4931static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4932{
4933	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4934}
4935static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4936			      size_t count)
4937{
4938	int n;
4939	bool changed;
4940
4941	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4942		if (n > 256) {
4943			pr_warn("max_luns can be no more than 256\n");
4944			return -EINVAL;
4945		}
4946		changed = (sdebug_max_luns != n);
4947		sdebug_max_luns = n;
4948		sdebug_max_tgts_luns();
4949		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4950			struct sdebug_host_info *sdhp;
4951			struct sdebug_dev_info *dp;
4952
4953			spin_lock(&sdebug_host_list_lock);
4954			list_for_each_entry(sdhp, &sdebug_host_list,
4955					    host_list) {
4956				list_for_each_entry(dp, &sdhp->dev_info_list,
4957						    dev_list) {
4958					set_bit(SDEBUG_UA_LUNS_CHANGED,
4959						dp->uas_bm);
4960				}
4961			}
4962			spin_unlock(&sdebug_host_list_lock);
4963		}
4964		return count;
4965	}
4966	return -EINVAL;
4967}
4968static DRIVER_ATTR_RW(max_luns);
4969
4970static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4971{
4972	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4973}
4974/* N.B. max_queue can be changed while there are queued commands. In flight
4975 * commands beyond the new max_queue will be completed. */
4976static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4977			       size_t count)
4978{
4979	int j, n, k, a;
4980	struct sdebug_queue *sqp;
4981
4982	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4983	    (n <= SDEBUG_CANQUEUE)) {
4984		block_unblock_all_queues(true);
4985		k = 0;
4986		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4987		     ++j, ++sqp) {
4988			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4989			if (a > k)
4990				k = a;
4991		}
4992		sdebug_max_queue = n;
4993		if (k == SDEBUG_CANQUEUE)
4994			atomic_set(&retired_max_queue, 0);
4995		else if (k >= n)
4996			atomic_set(&retired_max_queue, k + 1);
4997		else
4998			atomic_set(&retired_max_queue, 0);
4999		block_unblock_all_queues(false);
5000		return count;
5001	}
5002	return -EINVAL;
5003}
5004static DRIVER_ATTR_RW(max_queue);
5005
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5006static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
5007{
5008	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
5009}
5010static DRIVER_ATTR_RO(no_uld);
5011
5012static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
5013{
5014	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
5015}
5016static DRIVER_ATTR_RO(scsi_level);
5017
5018static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
5019{
5020	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
5021}
5022static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
5023				size_t count)
5024{
5025	int n;
5026	bool changed;
5027
 
 
 
 
5028	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5029		changed = (sdebug_virtual_gb != n);
5030		sdebug_virtual_gb = n;
5031		sdebug_capacity = get_sdebug_capacity();
5032		if (changed) {
5033			struct sdebug_host_info *sdhp;
5034			struct sdebug_dev_info *dp;
5035
5036			spin_lock(&sdebug_host_list_lock);
5037			list_for_each_entry(sdhp, &sdebug_host_list,
5038					    host_list) {
5039				list_for_each_entry(dp, &sdhp->dev_info_list,
5040						    dev_list) {
5041					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
5042						dp->uas_bm);
5043				}
5044			}
5045			spin_unlock(&sdebug_host_list_lock);
5046		}
5047		return count;
5048	}
5049	return -EINVAL;
5050}
5051static DRIVER_ATTR_RW(virtual_gb);
5052
5053static ssize_t add_host_show(struct device_driver *ddp, char *buf)
5054{
5055	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
 
5056}
5057
5058static int sdebug_add_adapter(void);
5059static void sdebug_remove_adapter(void);
5060
5061static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
5062			      size_t count)
5063{
 
 
 
 
5064	int delta_hosts;
5065
5066	if (sscanf(buf, "%d", &delta_hosts) != 1)
5067		return -EINVAL;
5068	if (delta_hosts > 0) {
5069		do {
5070			sdebug_add_adapter();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5071		} while (--delta_hosts);
5072	} else if (delta_hosts < 0) {
5073		do {
5074			sdebug_remove_adapter();
5075		} while (++delta_hosts);
5076	}
5077	return count;
5078}
5079static DRIVER_ATTR_RW(add_host);
5080
5081static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5082{
5083	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5084}
5085static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5086				    size_t count)
5087{
5088	int n;
5089
5090	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5091		sdebug_vpd_use_hostno = n;
5092		return count;
5093	}
5094	return -EINVAL;
5095}
5096static DRIVER_ATTR_RW(vpd_use_hostno);
5097
5098static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5099{
5100	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5101}
5102static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5103				size_t count)
5104{
5105	int n;
5106
5107	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5108		if (n > 0)
5109			sdebug_statistics = true;
5110		else {
5111			clear_queue_stats();
5112			sdebug_statistics = false;
5113		}
5114		return count;
5115	}
5116	return -EINVAL;
5117}
5118static DRIVER_ATTR_RW(statistics);
5119
5120static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5121{
5122	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5123}
5124static DRIVER_ATTR_RO(sector_size);
5125
5126static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5127{
5128	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5129}
5130static DRIVER_ATTR_RO(submit_queues);
5131
5132static ssize_t dix_show(struct device_driver *ddp, char *buf)
5133{
5134	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5135}
5136static DRIVER_ATTR_RO(dix);
5137
5138static ssize_t dif_show(struct device_driver *ddp, char *buf)
5139{
5140	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5141}
5142static DRIVER_ATTR_RO(dif);
5143
5144static ssize_t guard_show(struct device_driver *ddp, char *buf)
5145{
5146	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5147}
5148static DRIVER_ATTR_RO(guard);
5149
5150static ssize_t ato_show(struct device_driver *ddp, char *buf)
5151{
5152	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5153}
5154static DRIVER_ATTR_RO(ato);
5155
5156static ssize_t map_show(struct device_driver *ddp, char *buf)
5157{
5158	ssize_t count;
5159
5160	if (!scsi_debug_lbp())
5161		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5162				 sdebug_store_sectors);
5163
5164	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5165			  (int)map_size, map_storep);
 
 
 
 
 
5166	buf[count++] = '\n';
5167	buf[count] = '\0';
5168
5169	return count;
5170}
5171static DRIVER_ATTR_RO(map);
5172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5173static ssize_t removable_show(struct device_driver *ddp, char *buf)
5174{
5175	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5176}
5177static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5178			       size_t count)
5179{
5180	int n;
5181
5182	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5183		sdebug_removable = (n > 0);
5184		return count;
5185	}
5186	return -EINVAL;
5187}
5188static DRIVER_ATTR_RW(removable);
5189
5190static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5191{
5192	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5193}
5194/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5195static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5196			       size_t count)
5197{
5198	int n;
5199
5200	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5201		sdebug_host_lock = (n > 0);
5202		return count;
5203	}
5204	return -EINVAL;
5205}
5206static DRIVER_ATTR_RW(host_lock);
5207
5208static ssize_t strict_show(struct device_driver *ddp, char *buf)
5209{
5210	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5211}
5212static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5213			    size_t count)
5214{
5215	int n;
5216
5217	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5218		sdebug_strict = (n > 0);
5219		return count;
5220	}
5221	return -EINVAL;
5222}
5223static DRIVER_ATTR_RW(strict);
5224
5225static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5226{
5227	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5228}
5229static DRIVER_ATTR_RO(uuid_ctl);
5230
5231static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5232{
5233	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5234}
5235static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5236			     size_t count)
5237{
5238	int ret, n;
5239
5240	ret = kstrtoint(buf, 0, &n);
5241	if (ret)
5242		return ret;
5243	sdebug_cdb_len = n;
5244	all_config_cdb_len();
5245	return count;
5246}
5247static DRIVER_ATTR_RW(cdb_len);
5248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5249
5250/* Note: The following array creates attribute files in the
5251   /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5252   files (over those found in the /sys/module/scsi_debug/parameters
5253   directory) is that auxiliary actions can be triggered when an attribute
5254   is changed. For example see: sdebug_add_host_store() above.
5255 */
5256
5257static struct attribute *sdebug_drv_attrs[] = {
5258	&driver_attr_delay.attr,
5259	&driver_attr_opts.attr,
5260	&driver_attr_ptype.attr,
5261	&driver_attr_dsense.attr,
5262	&driver_attr_fake_rw.attr,
 
5263	&driver_attr_no_lun_0.attr,
5264	&driver_attr_num_tgts.attr,
5265	&driver_attr_dev_size_mb.attr,
5266	&driver_attr_num_parts.attr,
5267	&driver_attr_every_nth.attr,
 
5268	&driver_attr_max_luns.attr,
5269	&driver_attr_max_queue.attr,
 
5270	&driver_attr_no_uld.attr,
5271	&driver_attr_scsi_level.attr,
5272	&driver_attr_virtual_gb.attr,
5273	&driver_attr_add_host.attr,
 
5274	&driver_attr_vpd_use_hostno.attr,
5275	&driver_attr_sector_size.attr,
5276	&driver_attr_statistics.attr,
5277	&driver_attr_submit_queues.attr,
5278	&driver_attr_dix.attr,
5279	&driver_attr_dif.attr,
5280	&driver_attr_guard.attr,
5281	&driver_attr_ato.attr,
5282	&driver_attr_map.attr,
 
5283	&driver_attr_removable.attr,
5284	&driver_attr_host_lock.attr,
5285	&driver_attr_ndelay.attr,
5286	&driver_attr_strict.attr,
5287	&driver_attr_uuid_ctl.attr,
5288	&driver_attr_cdb_len.attr,
 
 
 
5289	NULL,
5290};
5291ATTRIBUTE_GROUPS(sdebug_drv);
5292
5293static struct device *pseudo_primary;
5294
5295static int __init scsi_debug_init(void)
5296{
 
5297	unsigned long sz;
5298	int host_to_add;
5299	int k;
5300	int ret;
5301
5302	atomic_set(&retired_max_queue, 0);
5303
5304	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5305		pr_warn("ndelay must be less than 1 second, ignored\n");
5306		sdebug_ndelay = 0;
5307	} else if (sdebug_ndelay > 0)
5308		sdebug_jdelay = JDELAY_OVERRIDDEN;
5309
5310	switch (sdebug_sector_size) {
5311	case  512:
5312	case 1024:
5313	case 2048:
5314	case 4096:
5315		break;
5316	default:
5317		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5318		return -EINVAL;
5319	}
5320
5321	switch (sdebug_dif) {
5322	case T10_PI_TYPE0_PROTECTION:
5323		break;
5324	case T10_PI_TYPE1_PROTECTION:
5325	case T10_PI_TYPE2_PROTECTION:
5326	case T10_PI_TYPE3_PROTECTION:
5327		have_dif_prot = true;
5328		break;
5329
5330	default:
5331		pr_err("dif must be 0, 1, 2 or 3\n");
5332		return -EINVAL;
5333	}
5334
 
 
 
 
 
5335	if (sdebug_guard > 1) {
5336		pr_err("guard must be 0 or 1\n");
5337		return -EINVAL;
5338	}
5339
5340	if (sdebug_ato > 1) {
5341		pr_err("ato must be 0 or 1\n");
5342		return -EINVAL;
5343	}
5344
5345	if (sdebug_physblk_exp > 15) {
5346		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5347		return -EINVAL;
5348	}
 
 
 
 
 
 
 
5349	if (sdebug_max_luns > 256) {
5350		pr_warn("max_luns can be no more than 256, use default\n");
5351		sdebug_max_luns = DEF_MAX_LUNS;
 
 
 
5352	}
5353
5354	if (sdebug_lowest_aligned > 0x3fff) {
5355		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5356		return -EINVAL;
5357	}
5358
5359	if (submit_queues < 1) {
5360		pr_err("submit_queues must be 1 or more\n");
5361		return -EINVAL;
5362	}
5363	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5364			       GFP_KERNEL);
5365	if (sdebug_q_arr == NULL)
5366		return -ENOMEM;
5367	for (k = 0; k < submit_queues; ++k)
5368		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5370	if (sdebug_dev_size_mb < 1)
5371		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5372	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5373	sdebug_store_sectors = sz / sdebug_sector_size;
5374	sdebug_capacity = get_sdebug_capacity();
5375
5376	/* play around with geometry, don't waste too much on track 0 */
5377	sdebug_heads = 8;
5378	sdebug_sectors_per = 32;
5379	if (sdebug_dev_size_mb >= 256)
5380		sdebug_heads = 64;
5381	else if (sdebug_dev_size_mb >= 16)
5382		sdebug_heads = 32;
5383	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5384			       (sdebug_sectors_per * sdebug_heads);
5385	if (sdebug_cylinders_per >= 1024) {
5386		/* other LLDs do this; implies >= 1GB ram disk ... */
5387		sdebug_heads = 255;
5388		sdebug_sectors_per = 63;
5389		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5390			       (sdebug_sectors_per * sdebug_heads);
5391	}
5392
5393	if (sdebug_fake_rw == 0) {
5394		fake_storep = vmalloc(sz);
5395		if (NULL == fake_storep) {
5396			pr_err("out of memory, 1\n");
5397			ret = -ENOMEM;
5398			goto free_q_arr;
5399		}
5400		memset(fake_storep, 0, sz);
5401		if (sdebug_num_parts > 0)
5402			sdebug_build_parts(fake_storep, sz);
5403	}
5404
5405	if (sdebug_dix) {
5406		int dif_size;
5407
5408		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5409		dif_storep = vmalloc(dif_size);
5410
5411		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5412
5413		if (dif_storep == NULL) {
5414			pr_err("out of mem. (DIX)\n");
5415			ret = -ENOMEM;
5416			goto free_vm;
5417		}
5418
5419		memset(dif_storep, 0xff, dif_size);
5420	}
5421
5422	/* Logical Block Provisioning */
5423	if (scsi_debug_lbp()) {
5424		sdebug_unmap_max_blocks =
5425			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5426
5427		sdebug_unmap_max_desc =
5428			clamp(sdebug_unmap_max_desc, 0U, 256U);
5429
5430		sdebug_unmap_granularity =
5431			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5432
5433		if (sdebug_unmap_alignment &&
5434		    sdebug_unmap_granularity <=
5435		    sdebug_unmap_alignment) {
5436			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5437			ret = -EINVAL;
5438			goto free_vm;
5439		}
5440
5441		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5442		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5443
5444		pr_info("%lu provisioning blocks\n", map_size);
5445
5446		if (map_storep == NULL) {
5447			pr_err("out of mem. (MAP)\n");
5448			ret = -ENOMEM;
5449			goto free_vm;
5450		}
5451
5452		bitmap_zero(map_storep, map_size);
5453
5454		/* Map first 1KB for partition table */
5455		if (sdebug_num_parts)
5456			map_region(0, 2);
5457	}
5458
5459	pseudo_primary = root_device_register("pseudo_0");
5460	if (IS_ERR(pseudo_primary)) {
5461		pr_warn("root_device_register() error\n");
5462		ret = PTR_ERR(pseudo_primary);
5463		goto free_vm;
5464	}
5465	ret = bus_register(&pseudo_lld_bus);
5466	if (ret < 0) {
5467		pr_warn("bus_register error: %d\n", ret);
5468		goto dev_unreg;
5469	}
5470	ret = driver_register(&sdebug_driverfs_driver);
5471	if (ret < 0) {
5472		pr_warn("driver_register error: %d\n", ret);
5473		goto bus_unreg;
5474	}
5475
5476	host_to_add = sdebug_add_host;
5477	sdebug_add_host = 0;
5478
5479	for (k = 0; k < host_to_add; k++) {
5480		if (sdebug_add_adapter()) {
5481			pr_err("sdebug_add_adapter failed k=%d\n", k);
5482			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5483		}
5484	}
5485
5486	if (sdebug_verbose)
5487		pr_info("built %d host(s)\n", sdebug_add_host);
5488
5489	return 0;
5490
 
 
5491bus_unreg:
5492	bus_unregister(&pseudo_lld_bus);
5493dev_unreg:
5494	root_device_unregister(pseudo_primary);
5495free_vm:
5496	vfree(map_storep);
5497	vfree(dif_storep);
5498	vfree(fake_storep);
5499free_q_arr:
5500	kfree(sdebug_q_arr);
5501	return ret;
5502}
5503
5504static void __exit scsi_debug_exit(void)
5505{
5506	int k = sdebug_add_host;
5507
5508	stop_all_queued();
5509	free_all_queued();
5510	for (; k; k--)
5511		sdebug_remove_adapter();
 
5512	driver_unregister(&sdebug_driverfs_driver);
5513	bus_unregister(&pseudo_lld_bus);
5514	root_device_unregister(pseudo_primary);
5515
5516	vfree(map_storep);
5517	vfree(dif_storep);
5518	vfree(fake_storep);
5519	kfree(sdebug_q_arr);
5520}
5521
5522device_initcall(scsi_debug_init);
5523module_exit(scsi_debug_exit);
5524
5525static void sdebug_release_adapter(struct device *dev)
5526{
5527	struct sdebug_host_info *sdbg_host;
5528
5529	sdbg_host = to_sdebug_host(dev);
5530	kfree(sdbg_host);
5531}
5532
5533static int sdebug_add_adapter(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5534{
5535	int k, devs_per_host;
5536	int error = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5537	struct sdebug_host_info *sdbg_host;
5538	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5539
5540	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5541	if (sdbg_host == NULL) {
5542		pr_err("out of memory at line %d\n", __LINE__);
5543		return -ENOMEM;
5544	}
 
 
 
5545
5546	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5547
5548	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5549	for (k = 0; k < devs_per_host; k++) {
5550		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5551		if (!sdbg_devinfo) {
5552			pr_err("out of memory at line %d\n", __LINE__);
5553			error = -ENOMEM;
5554			goto clean;
5555		}
5556	}
5557
5558	spin_lock(&sdebug_host_list_lock);
5559	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5560	spin_unlock(&sdebug_host_list_lock);
5561
5562	sdbg_host->dev.bus = &pseudo_lld_bus;
5563	sdbg_host->dev.parent = pseudo_primary;
5564	sdbg_host->dev.release = &sdebug_release_adapter;
5565	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5566
5567	error = device_register(&sdbg_host->dev);
5568
5569	if (error)
 
 
5570		goto clean;
 
5571
5572	++sdebug_add_host;
5573	return error;
5574
5575clean:
5576	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5577				 dev_list) {
5578		list_del(&sdbg_devinfo->dev_list);
 
5579		kfree(sdbg_devinfo);
5580	}
 
 
 
 
 
 
 
 
 
 
 
5581
5582	kfree(sdbg_host);
5583	return error;
 
 
 
 
5584}
5585
5586static void sdebug_remove_adapter(void)
5587{
 
5588	struct sdebug_host_info *sdbg_host = NULL;
 
5589
5590	spin_lock(&sdebug_host_list_lock);
5591	if (!list_empty(&sdebug_host_list)) {
5592		sdbg_host = list_entry(sdebug_host_list.prev,
5593				       struct sdebug_host_info, host_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5594		list_del(&sdbg_host->host_list);
5595	}
5596	spin_unlock(&sdebug_host_list_lock);
5597
5598	if (!sdbg_host)
5599		return;
5600
5601	device_unregister(&sdbg_host->dev);
5602	--sdebug_add_host;
5603}
5604
5605static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5606{
5607	int num_in_q = 0;
5608	struct sdebug_dev_info *devip;
 
 
5609
 
5610	block_unblock_all_queues(true);
5611	devip = (struct sdebug_dev_info *)sdev->hostdata;
5612	if (NULL == devip) {
5613		block_unblock_all_queues(false);
5614		return	-ENODEV;
 
5615	}
5616	num_in_q = atomic_read(&devip->num_in_q);
5617
5618	if (qdepth < 1)
5619		qdepth = 1;
5620	/* allow to exceed max host qc_arr elements for testing */
5621	if (qdepth > SDEBUG_CANQUEUE + 10)
5622		qdepth = SDEBUG_CANQUEUE + 10;
5623	scsi_change_queue_depth(sdev, qdepth);
5624
5625	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5626		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5627			    __func__, qdepth, num_in_q);
5628	}
5629	block_unblock_all_queues(false);
 
 
 
 
 
5630	return sdev->queue_depth;
5631}
5632
5633static bool fake_timeout(struct scsi_cmnd *scp)
5634{
5635	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5636		if (sdebug_every_nth < -1)
5637			sdebug_every_nth = -1;
5638		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5639			return true; /* ignore command causing timeout */
5640		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5641			 scsi_medium_access_command(scp))
5642			return true; /* time out reads and writes */
5643	}
5644	return false;
5645}
5646
5647static bool fake_host_busy(struct scsi_cmnd *scp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5648{
5649	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5650		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5651}
5652
5653static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5654				   struct scsi_cmnd *scp)
5655{
5656	u8 sdeb_i;
5657	struct scsi_device *sdp = scp->device;
5658	const struct opcode_info_t *oip;
5659	const struct opcode_info_t *r_oip;
5660	struct sdebug_dev_info *devip;
5661	u8 *cmd = scp->cmnd;
5662	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5663	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
5664	int k, na;
5665	int errsts = 0;
 
5666	u32 flags;
5667	u16 sa;
5668	u8 opcode = cmd[0];
5669	bool has_wlun_rl;
 
 
 
5670
5671	scsi_set_resid(scp, 0);
5672	if (sdebug_statistics)
5673		atomic_inc(&sdebug_cmnd_count);
 
 
 
 
5674	if (unlikely(sdebug_verbose &&
5675		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5676		char b[120];
5677		int n, len, sb;
5678
5679		len = scp->cmd_len;
5680		sb = (int)sizeof(b);
5681		if (len > 32)
5682			strcpy(b, "too long, over 32 bytes");
5683		else {
5684			for (k = 0, n = 0; k < len && n < sb; ++k)
5685				n += scnprintf(b + n, sb - n, "%02x ",
5686					       (u32)cmd[k]);
5687		}
5688		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5689			    blk_mq_unique_tag(scp->request), b);
5690	}
5691	if (fake_host_busy(scp))
5692		return SCSI_MLQUEUE_HOST_BUSY;
5693	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5694	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5695		goto err_out;
5696
5697	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5698	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5699	devip = (struct sdebug_dev_info *)sdp->hostdata;
5700	if (unlikely(!devip)) {
5701		devip = find_build_dev_info(sdp);
5702		if (NULL == devip)
5703			goto err_out;
5704	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5705	na = oip->num_attached;
5706	r_pfp = oip->pfp;
5707	if (na) {	/* multiple commands with this opcode */
5708		r_oip = oip;
5709		if (FF_SA & r_oip->flags) {
5710			if (F_SA_LOW & oip->flags)
5711				sa = 0x1f & cmd[1];
5712			else
5713				sa = get_unaligned_be16(cmd + 8);
5714			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5715				if (opcode == oip->opcode && sa == oip->sa)
5716					break;
5717			}
5718		} else {   /* since no service action only check opcode */
5719			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5720				if (opcode == oip->opcode)
5721					break;
5722			}
5723		}
5724		if (k > na) {
5725			if (F_SA_LOW & r_oip->flags)
5726				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5727			else if (F_SA_HIGH & r_oip->flags)
5728				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5729			else
5730				mk_sense_invalid_opcode(scp);
5731			goto check_cond;
5732		}
5733	}	/* else (when na==0) we assume the oip is a match */
5734	flags = oip->flags;
5735	if (unlikely(F_INV_OP & flags)) {
5736		mk_sense_invalid_opcode(scp);
5737		goto check_cond;
5738	}
5739	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5740		if (sdebug_verbose)
5741			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5742				    my_name, opcode, " supported for wlun");
5743		mk_sense_invalid_opcode(scp);
5744		goto check_cond;
5745	}
5746	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5747		u8 rem;
5748		int j;
5749
5750		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5751			rem = ~oip->len_mask[k] & cmd[k];
5752			if (rem) {
5753				for (j = 7; j >= 0; --j, rem <<= 1) {
5754					if (0x80 & rem)
5755						break;
5756				}
5757				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5758				goto check_cond;
5759			}
5760		}
5761	}
5762	if (unlikely(!(F_SKIP_UA & flags) &&
5763		     find_first_bit(devip->uas_bm,
5764				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5765		errsts = make_ua(scp, devip);
5766		if (errsts)
5767			goto check_cond;
5768	}
5769	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5770		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5771		if (sdebug_verbose)
5772			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5773				    "%s\n", my_name, "initializing command "
5774				    "required");
5775		errsts = check_condition_result;
5776		goto fini;
5777	}
5778	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5779		goto fini;
5780	if (unlikely(sdebug_every_nth)) {
5781		if (fake_timeout(scp))
5782			return 0;	/* ignore command: make trouble */
5783	}
5784	if (likely(oip->pfp))
5785		pfp = oip->pfp;	/* calls a resp_* function */
5786	else
5787		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
5788
5789fini:
5790	if (F_DELAY_OVERR & flags)
5791		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5792	else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
 
5793		/*
5794		 * If any delay is active, for F_SSU_DELAY want at least 1
5795		 * second and if sdebug_jdelay>0 want a long delay of that
5796		 * many seconds; for F_SYNC_DELAY want 1/20 of that.
 
5797		 */
5798		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5799		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5800
5801		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5802		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5803	} else
5804		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
5805				     sdebug_ndelay);
5806check_cond:
5807	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
5808err_out:
5809	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
5810}
5811
 
 
 
 
 
 
 
 
 
5812static struct scsi_host_template sdebug_driver_template = {
5813	.show_info =		scsi_debug_show_info,
5814	.write_info =		scsi_debug_write_info,
5815	.proc_name =		sdebug_proc_name,
5816	.name =			"SCSI DEBUG",
5817	.info =			scsi_debug_info,
5818	.slave_alloc =		scsi_debug_slave_alloc,
5819	.slave_configure =	scsi_debug_slave_configure,
5820	.slave_destroy =	scsi_debug_slave_destroy,
5821	.ioctl =		scsi_debug_ioctl,
5822	.queuecommand =		scsi_debug_queuecommand,
5823	.change_queue_depth =	sdebug_change_qdepth,
 
 
5824	.eh_abort_handler =	scsi_debug_abort,
5825	.eh_device_reset_handler = scsi_debug_device_reset,
5826	.eh_target_reset_handler = scsi_debug_target_reset,
5827	.eh_bus_reset_handler = scsi_debug_bus_reset,
5828	.eh_host_reset_handler = scsi_debug_host_reset,
5829	.can_queue =		SDEBUG_CANQUEUE,
5830	.this_id =		7,
5831	.sg_tablesize =		SG_MAX_SEGMENTS,
5832	.cmd_per_lun =		DEF_CMD_PER_LUN,
5833	.max_sectors =		-1U,
5834	.use_clustering = 	DISABLE_CLUSTERING,
5835	.module =		THIS_MODULE,
5836	.track_queue_depth =	1,
 
 
 
 
5837};
5838
5839static int sdebug_driver_probe(struct device *dev)
5840{
5841	int error = 0;
5842	struct sdebug_host_info *sdbg_host;
5843	struct Scsi_Host *hpnt;
5844	int hprot;
5845
5846	sdbg_host = to_sdebug_host(dev);
5847
5848	sdebug_driver_template.can_queue = sdebug_max_queue;
5849	if (sdebug_clustering)
5850		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5851	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
 
 
5852	if (NULL == hpnt) {
5853		pr_err("scsi_host_alloc failed\n");
5854		error = -ENODEV;
5855		return error;
5856	}
5857	if (submit_queues > nr_cpu_ids) {
5858		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5859			my_name, submit_queues, nr_cpu_ids);
5860		submit_queues = nr_cpu_ids;
5861	}
5862	/* Decide whether to tell scsi subsystem that we want mq */
5863	/* Following should give the same answer for each host */
5864	if (shost_use_blk_mq(hpnt))
5865		hpnt->nr_hw_queues = submit_queues;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5866
5867	sdbg_host->shost = hpnt;
5868	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5869	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5870		hpnt->max_id = sdebug_num_tgts + 1;
5871	else
5872		hpnt->max_id = sdebug_num_tgts;
5873	/* = sdebug_max_luns; */
5874	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5875
5876	hprot = 0;
5877
5878	switch (sdebug_dif) {
5879
5880	case T10_PI_TYPE1_PROTECTION:
5881		hprot = SHOST_DIF_TYPE1_PROTECTION;
5882		if (sdebug_dix)
5883			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5884		break;
5885
5886	case T10_PI_TYPE2_PROTECTION:
5887		hprot = SHOST_DIF_TYPE2_PROTECTION;
5888		if (sdebug_dix)
5889			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5890		break;
5891
5892	case T10_PI_TYPE3_PROTECTION:
5893		hprot = SHOST_DIF_TYPE3_PROTECTION;
5894		if (sdebug_dix)
5895			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5896		break;
5897
5898	default:
5899		if (sdebug_dix)
5900			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5901		break;
5902	}
5903
5904	scsi_host_set_prot(hpnt, hprot);
5905
5906	if (have_dif_prot || sdebug_dix)
5907		pr_info("host protection%s%s%s%s%s%s%s\n",
5908			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5909			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5910			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5911			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5912			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5913			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5914			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5915
5916	if (sdebug_guard == 1)
5917		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5918	else
5919		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5920
5921	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5922	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5923	if (sdebug_every_nth)	/* need stats counters for every_nth */
5924		sdebug_statistics = true;
5925	error = scsi_add_host(hpnt, &sdbg_host->dev);
5926	if (error) {
5927		pr_err("scsi_add_host failed\n");
5928		error = -ENODEV;
5929		scsi_host_put(hpnt);
5930	} else
5931		scsi_scan_host(hpnt);
 
5932
5933	return error;
5934}
5935
5936static int sdebug_driver_remove(struct device *dev)
5937{
5938	struct sdebug_host_info *sdbg_host;
5939	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5940
5941	sdbg_host = to_sdebug_host(dev);
5942
5943	if (!sdbg_host) {
5944		pr_err("Unable to locate host info\n");
5945		return -ENODEV;
5946	}
5947
5948	scsi_remove_host(sdbg_host->shost);
5949
5950	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5951				 dev_list) {
5952		list_del(&sdbg_devinfo->dev_list);
 
5953		kfree(sdbg_devinfo);
5954	}
5955
5956	scsi_host_put(sdbg_host->shost);
5957	return 0;
5958}
5959
5960static int pseudo_lld_bus_match(struct device *dev,
5961				struct device_driver *dev_driver)
5962{
5963	return 1;
5964}
5965
5966static struct bus_type pseudo_lld_bus = {
5967	.name = "pseudo",
5968	.match = pseudo_lld_bus_match,
5969	.probe = sdebug_driver_probe,
5970	.remove = sdebug_driver_remove,
5971	.drv_groups = sdebug_drv_groups,
5972};