Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
   3 *  Copyright (C) 1992  Eric Youngdale
   4 *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
   5 *  to make sure that we are not getting blocks mixed up, and PANIC if
   6 *  anything out of the ordinary is seen.
   7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
   8 *
   9 *  This version is more generic, simulating a variable number of disk
  10 *  (or disk like devices) sharing a common amount of RAM. To be more
  11 *  realistic, the simulated devices have the transport attributes of
  12 *  SAS disks.
  13 *
 
 
 
 
  14 *
  15 *  For documentation see http://sg.danny.cz/sg/sdebug26.html
  16 *
  17 *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
  18 *   dpg: work for devfs large number of disks [20010809]
  19 *        forked for lk 2.5 series [20011216, 20020101]
  20 *        use vmalloc() more inquiry+mode_sense [20020302]
  21 *        add timers for delayed responses [20020721]
  22 *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
  23 *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
  24 *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
  25 *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
  26 */
  27
 
 
 
  28#include <linux/module.h>
  29
  30#include <linux/kernel.h>
  31#include <linux/errno.h>
  32#include <linux/timer.h>
  33#include <linux/slab.h>
  34#include <linux/types.h>
  35#include <linux/string.h>
  36#include <linux/genhd.h>
  37#include <linux/fs.h>
  38#include <linux/init.h>
  39#include <linux/proc_fs.h>
  40#include <linux/vmalloc.h>
  41#include <linux/moduleparam.h>
  42#include <linux/scatterlist.h>
  43#include <linux/blkdev.h>
  44#include <linux/crc-t10dif.h>
 
 
 
 
 
 
  45
  46#include <net/checksum.h>
  47
  48#include <asm/unaligned.h>
  49
  50#include <scsi/scsi.h>
  51#include <scsi/scsi_cmnd.h>
  52#include <scsi/scsi_device.h>
  53#include <scsi/scsi_host.h>
  54#include <scsi/scsicam.h>
  55#include <scsi/scsi_eh.h>
 
  56#include <scsi/scsi_dbg.h>
  57
  58#include "sd.h"
  59#include "scsi_logging.h"
  60
  61#define SCSI_DEBUG_VERSION "1.82"
  62static const char * scsi_debug_version_date = "20100324";
 
 
 
  63
  64/* Additional Sense Code (ASC) */
  65#define NO_ADDITIONAL_SENSE 0x0
  66#define LOGICAL_UNIT_NOT_READY 0x4
 
  67#define UNRECOVERED_READ_ERR 0x11
  68#define PARAMETER_LIST_LENGTH_ERR 0x1a
  69#define INVALID_OPCODE 0x20
  70#define ADDR_OUT_OF_RANGE 0x21
  71#define INVALID_COMMAND_OPCODE 0x20
  72#define INVALID_FIELD_IN_CDB 0x24
  73#define INVALID_FIELD_IN_PARAM_LIST 0x26
  74#define POWERON_RESET 0x29
 
 
 
 
 
 
 
 
 
  75#define SAVING_PARAMS_UNSUP 0x39
  76#define TRANSPORT_PROBLEM 0x4b
  77#define THRESHOLD_EXCEEDED 0x5d
  78#define LOW_POWER_COND_ON 0x5e
 
 
 
  79
  80/* Additional Sense Code Qualifier (ASCQ) */
  81#define ACK_NAK_TO 0x3
  82
  83#define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
  84
  85/* Default values for driver parameters */
  86#define DEF_NUM_HOST   1
  87#define DEF_NUM_TGTS   1
  88#define DEF_MAX_LUNS   1
  89/* With these defaults, this driver will make 1 host with 1 target
  90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
  91 */
  92#define DEF_ATO 1
  93#define DEF_DELAY   1
  94#define DEF_DEV_SIZE_MB   8
  95#define DEF_DIF 0
  96#define DEF_DIX 0
  97#define DEF_D_SENSE   0
  98#define DEF_EVERY_NTH   0
  99#define DEF_FAKE_RW	0
 100#define DEF_GUARD 0
 
 101#define DEF_LBPU 0
 102#define DEF_LBPWS 0
 103#define DEF_LBPWS10 0
 104#define DEF_LBPRZ 1
 105#define DEF_LOWEST_ALIGNED 0
 
 106#define DEF_NO_LUN_0   0
 107#define DEF_NUM_PARTS   0
 108#define DEF_OPTS   0
 109#define DEF_OPT_BLKS 64
 110#define DEF_PHYSBLK_EXP 0
 111#define DEF_PTYPE   0
 112#define DEF_SCSI_LEVEL   5    /* INQUIRY, byte2 [5->SPC-3] */
 
 113#define DEF_SECTOR_SIZE 512
 114#define DEF_UNMAP_ALIGNMENT 0
 115#define DEF_UNMAP_GRANULARITY 1
 116#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
 117#define DEF_UNMAP_MAX_DESC 256
 118#define DEF_VIRTUAL_GB   0
 119#define DEF_VPD_USE_HOSTNO 1
 120#define DEF_WRITESAME_LENGTH 0xFFFF
 121
 122/* bit mask values for scsi_debug_opts */
 123#define SCSI_DEBUG_OPT_NOISE   1
 124#define SCSI_DEBUG_OPT_MEDIUM_ERR   2
 125#define SCSI_DEBUG_OPT_TIMEOUT   4
 126#define SCSI_DEBUG_OPT_RECOVERED_ERR   8
 127#define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
 128#define SCSI_DEBUG_OPT_DIF_ERR   32
 129#define SCSI_DEBUG_OPT_DIX_ERR   64
 130#define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 131/* When "every_nth" > 0 then modulo "every_nth" commands:
 132 *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
 133 *   - a RECOVERED_ERROR is simulated on successful read and write
 134 *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
 135 *   - a TRANSPORT_ERROR is simulated on successful read and write
 136 *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
 137 *
 138 * When "every_nth" < 0 then after "- every_nth" commands:
 139 *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
 140 *   - a RECOVERED_ERROR is simulated on successful read and write
 141 *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
 142 *   - a TRANSPORT_ERROR is simulated on successful read and write
 143 *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
 144 * This will continue until some other action occurs (e.g. the user
 145 * writing a new value (other than -1 or 1) to every_nth via sysfs).
 
 146 */
 147
 148/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
 
 
 
 
 
 
 
 
 
 
 
 
 
 149 * sector on read commands: */
 150#define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
 151#define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
 152
 153/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
 154 * or "peripheral device" addressing (value 0) */
 155#define SAM2_LUN_ADDRESS_METHOD 0
 156#define SAM2_WLUN_REPORT_LUNS 0xc101
 157
 158/* Can queue up to this number of commands. Typically commands that
 159 * that have a non-zero delay are queued. */
 160#define SCSI_DEBUG_CANQUEUE  255
 161
 162static int scsi_debug_add_host = DEF_NUM_HOST;
 163static int scsi_debug_ato = DEF_ATO;
 164static int scsi_debug_delay = DEF_DELAY;
 165static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
 166static int scsi_debug_dif = DEF_DIF;
 167static int scsi_debug_dix = DEF_DIX;
 168static int scsi_debug_dsense = DEF_D_SENSE;
 169static int scsi_debug_every_nth = DEF_EVERY_NTH;
 170static int scsi_debug_fake_rw = DEF_FAKE_RW;
 171static int scsi_debug_guard = DEF_GUARD;
 172static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
 173static int scsi_debug_max_luns = DEF_MAX_LUNS;
 174static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
 175static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
 176static int scsi_debug_no_uld = 0;
 177static int scsi_debug_num_parts = DEF_NUM_PARTS;
 178static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
 179static int scsi_debug_opt_blks = DEF_OPT_BLKS;
 180static int scsi_debug_opts = DEF_OPTS;
 181static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
 182static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
 183static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
 184static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
 185static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
 186static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
 187static unsigned int scsi_debug_lbpu = DEF_LBPU;
 188static unsigned int scsi_debug_lbpws = DEF_LBPWS;
 189static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
 190static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
 191static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 192static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 193static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
 194static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
 195static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
 196
 197static int scsi_debug_cmnd_count = 0;
 198
 199#define DEV_READONLY(TGT)      (0)
 200#define DEV_REMOVEABLE(TGT)    (0)
 201
 202static unsigned int sdebug_store_sectors;
 203static sector_t sdebug_capacity;	/* in sectors */
 204
 205/* old BIOS stuff, kernel may get rid of them but some mode sense pages
 206   may still need them */
 207static int sdebug_heads;		/* heads per disk */
 208static int sdebug_cylinders_per;	/* cylinders per surface */
 209static int sdebug_sectors_per;		/* sectors per cylinder */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 210
 211#define SDEBUG_MAX_PARTS 4
 212
 213#define SDEBUG_SENSE_LEN 32
 214
 215#define SCSI_DEBUG_MAX_CMD_LEN 32
 216
 217static unsigned int scsi_debug_lbp(void)
 218{
 219	return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
 220}
 221
 222struct sdebug_dev_info {
 223	struct list_head dev_list;
 224	unsigned char sense_buff[SDEBUG_SENSE_LEN];	/* weak nexus */
 225	unsigned int channel;
 226	unsigned int target;
 227	unsigned int lun;
 
 228	struct sdebug_host_info *sdbg_host;
 229	unsigned int wlun;
 230	char reset;
 231	char stopped;
 232	char used;
 233};
 234
 235struct sdebug_host_info {
 236	struct list_head host_list;
 237	struct Scsi_Host *shost;
 238	struct device dev;
 239	struct list_head dev_info_list;
 240};
 241
 242#define to_sdebug_host(d)	\
 243	container_of(d, struct sdebug_host_info, dev)
 244
 245static LIST_HEAD(sdebug_host_list);
 246static DEFINE_SPINLOCK(sdebug_host_list_lock);
 247
 248typedef void (* done_funct_t) (struct scsi_cmnd *);
 
 
 
 249
 250struct sdebug_queued_cmd {
 251	int in_use;
 252	struct timer_list cmnd_timer;
 253	done_funct_t done_funct;
 254	struct scsi_cmnd * a_cmnd;
 255	int scsi_result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 256};
 257static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
 258
 259static unsigned char * fake_storep;	/* ramdisk storage */
 260static unsigned char *dif_storep;	/* protection info */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261static void *map_storep;		/* provisioning map */
 262
 263static unsigned long map_size;
 264static int num_aborts = 0;
 265static int num_dev_resets = 0;
 266static int num_bus_resets = 0;
 267static int num_host_resets = 0;
 
 268static int dix_writes;
 269static int dix_reads;
 270static int dif_errors;
 271
 272static DEFINE_SPINLOCK(queued_arr_lock);
 
 
 273static DEFINE_RWLOCK(atomic_rw);
 274
 275static char sdebug_proc_name[] = "scsi_debug";
 
 276
 277static struct bus_type pseudo_lld_bus;
 278
 279static inline sector_t dif_offset(sector_t sector)
 280{
 281	return sector << 3;
 282}
 283
 284static struct device_driver sdebug_driverfs_driver = {
 285	.name 		= sdebug_proc_name,
 286	.bus		= &pseudo_lld_bus,
 287};
 288
 289static const int check_condition_result =
 290		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 291
 292static const int illegal_condition_result =
 293	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
 294
 295static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
 296				    0, 0, 0x2, 0x4b};
 297static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
 298			           0, 0, 0x0, 0x0};
 299
 300static int sdebug_add_adapter(void);
 301static void sdebug_remove_adapter(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 302
 303static void sdebug_max_tgts_luns(void)
 304{
 305	struct sdebug_host_info *sdbg_host;
 306	struct Scsi_Host *hpnt;
 307
 308	spin_lock(&sdebug_host_list_lock);
 309	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 310		hpnt = sdbg_host->shost;
 311		if ((hpnt->this_id >= 0) &&
 312		    (scsi_debug_num_tgts > hpnt->this_id))
 313			hpnt->max_id = scsi_debug_num_tgts + 1;
 314		else
 315			hpnt->max_id = scsi_debug_num_tgts;
 316		/* scsi_debug_max_luns; */
 317		hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
 318	}
 319	spin_unlock(&sdebug_host_list_lock);
 320}
 321
 322static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
 323			    int asc, int asq)
 
 
 
 
 324{
 325	unsigned char *sbuff;
 
 
 326
 327	sbuff = devip->sense_buff;
 328	memset(sbuff, 0, SDEBUG_SENSE_LEN);
 329
 330	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
 331
 332	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 333		printk(KERN_INFO "scsi_debug:    [sense_key,asc,ascq]: "
 334		      "[0x%x,0x%x,0x%x]\n", key, asc, asq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335}
 336
 337static void get_data_transfer_info(unsigned char *cmd,
 338				   unsigned long long *lba, unsigned int *num,
 339				   u32 *ei_lba)
 340{
 341	*ei_lba = 0;
 342
 343	switch (*cmd) {
 344	case VARIABLE_LENGTH_CMD:
 345		*lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
 346			(u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
 347			(u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
 348			(u64)cmd[13] << 48 | (u64)cmd[12] << 56;
 349
 350		*ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
 351			(u32)cmd[21] << 16 | (u32)cmd[20] << 24;
 352
 353		*num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
 354			(u32)cmd[28] << 24;
 355		break;
 356
 357	case WRITE_SAME_16:
 358	case WRITE_16:
 359	case READ_16:
 360		*lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
 361			(u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
 362			(u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
 363			(u64)cmd[3] << 48 | (u64)cmd[2] << 56;
 364
 365		*num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
 366			(u32)cmd[10] << 24;
 367		break;
 368	case WRITE_12:
 369	case READ_12:
 370		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
 371			(u32)cmd[2] << 24;
 372
 373		*num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
 374			(u32)cmd[6] << 24;
 375		break;
 376	case WRITE_SAME:
 377	case WRITE_10:
 378	case READ_10:
 379	case XDWRITEREAD_10:
 380		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 |	(u32)cmd[3] << 16 |
 381			(u32)cmd[2] << 24;
 382
 383		*num = (u32)cmd[8] | (u32)cmd[7] << 8;
 384		break;
 385	case WRITE_6:
 386	case READ_6:
 387		*lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
 388			(u32)(cmd[1] & 0x1f) << 16;
 389		*num = (0 == cmd[4]) ? 256 : cmd[4];
 390		break;
 391	default:
 392		break;
 393	}
 394}
 395
 396static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
 397{
 398	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
 399		printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
 
 
 
 
 
 
 
 
 
 400	}
 401	return -EINVAL;
 402	/* return -ENOTTY; // correct return but upsets fdisk */
 403}
 404
 405static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
 406			   struct sdebug_dev_info * devip)
 407{
 408	if (devip->reset) {
 409		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 410			printk(KERN_INFO "scsi_debug: Reporting Unit "
 411			       "attention: power on reset\n");
 412		devip->reset = 0;
 413		mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
 414		return check_condition_result;
 
 
 
 415	}
 416	if ((0 == reset_only) && devip->stopped) {
 417		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 418			printk(KERN_INFO "scsi_debug: Reporting Not "
 419			       "ready: initializing command required\n");
 420		mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
 421				0x2);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422		return check_condition_result;
 423	}
 424	return 0;
 425}
 426
 427/* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
 428static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 429				int arr_len)
 430{
 431	int act_len;
 432	struct scsi_data_buffer *sdb = scsi_in(scp);
 433
 434	if (!sdb->length)
 435		return 0;
 436	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
 437		return (DID_ERROR << 16);
 438
 439	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
 440				      arr, arr_len);
 441	if (sdb->resid)
 442		sdb->resid -= act_len;
 443	else
 444		sdb->resid = scsi_bufflen(scp) - act_len;
 445
 446	return 0;
 447}
 448
 449/* Returns number of bytes fetched into 'arr' or -1 if error. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 451			       int arr_len)
 452{
 453	if (!scsi_bufflen(scp))
 454		return 0;
 455	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
 456		return -1;
 457
 458	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
 459}
 460
 461
 462static const char * inq_vendor_id = "Linux   ";
 463static const char * inq_product_id = "scsi_debug      ";
 464static const char * inq_product_rev = "0004";
 465
 466static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
 467			   int target_dev_id, int dev_id_num,
 468			   const char * dev_id_str,
 469			   int dev_id_str_len)
 
 
 
 
 
 470{
 471	int num, port_a;
 472	char b[32];
 473
 474	port_a = target_dev_id + 1;
 475	/* T10 vendor identifier field format (faked) */
 476	arr[0] = 0x2;	/* ASCII */
 477	arr[1] = 0x1;
 478	arr[2] = 0x0;
 479	memcpy(&arr[4], inq_vendor_id, 8);
 480	memcpy(&arr[12], inq_product_id, 16);
 481	memcpy(&arr[28], dev_id_str, dev_id_str_len);
 482	num = 8 + 16 + dev_id_str_len;
 483	arr[3] = num;
 484	num += 4;
 485	if (dev_id_num >= 0) {
 486		/* NAA-5, Logical unit identifier (binary) */
 487		arr[num++] = 0x1;	/* binary (not necessarily sas) */
 488		arr[num++] = 0x3;	/* PIV=0, lu, naa */
 489		arr[num++] = 0x0;
 490		arr[num++] = 0x8;
 491		arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
 492		arr[num++] = 0x33;
 493		arr[num++] = 0x33;
 494		arr[num++] = 0x30;
 495		arr[num++] = (dev_id_num >> 24);
 496		arr[num++] = (dev_id_num >> 16) & 0xff;
 497		arr[num++] = (dev_id_num >> 8) & 0xff;
 498		arr[num++] = dev_id_num & 0xff;
 
 
 
 
 
 
 499		/* Target relative port number */
 500		arr[num++] = 0x61;	/* proto=sas, binary */
 501		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
 502		arr[num++] = 0x0;	/* reserved */
 503		arr[num++] = 0x4;	/* length */
 504		arr[num++] = 0x0;	/* reserved */
 505		arr[num++] = 0x0;	/* reserved */
 506		arr[num++] = 0x0;
 507		arr[num++] = 0x1;	/* relative port A */
 508	}
 509	/* NAA-5, Target port identifier */
 510	arr[num++] = 0x61;	/* proto=sas, binary */
 511	arr[num++] = 0x93;	/* piv=1, target port, naa */
 512	arr[num++] = 0x0;
 513	arr[num++] = 0x8;
 514	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
 515	arr[num++] = 0x22;
 516	arr[num++] = 0x22;
 517	arr[num++] = 0x20;
 518	arr[num++] = (port_a >> 24);
 519	arr[num++] = (port_a >> 16) & 0xff;
 520	arr[num++] = (port_a >> 8) & 0xff;
 521	arr[num++] = port_a & 0xff;
 522	/* NAA-5, Target port group identifier */
 523	arr[num++] = 0x61;	/* proto=sas, binary */
 524	arr[num++] = 0x95;	/* piv=1, target port group id */
 525	arr[num++] = 0x0;
 526	arr[num++] = 0x4;
 527	arr[num++] = 0;
 528	arr[num++] = 0;
 529	arr[num++] = (port_group_id >> 8) & 0xff;
 530	arr[num++] = port_group_id & 0xff;
 531	/* NAA-5, Target device identifier */
 532	arr[num++] = 0x61;	/* proto=sas, binary */
 533	arr[num++] = 0xa3;	/* piv=1, target device, naa */
 534	arr[num++] = 0x0;
 535	arr[num++] = 0x8;
 536	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
 537	arr[num++] = 0x22;
 538	arr[num++] = 0x22;
 539	arr[num++] = 0x20;
 540	arr[num++] = (target_dev_id >> 24);
 541	arr[num++] = (target_dev_id >> 16) & 0xff;
 542	arr[num++] = (target_dev_id >> 8) & 0xff;
 543	arr[num++] = target_dev_id & 0xff;
 544	/* SCSI name string: Target device identifier */
 545	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
 546	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
 547	arr[num++] = 0x0;
 548	arr[num++] = 24;
 549	memcpy(arr + num, "naa.52222220", 12);
 550	num += 12;
 551	snprintf(b, sizeof(b), "%08X", target_dev_id);
 552	memcpy(arr + num, b, 8);
 553	num += 8;
 554	memset(arr + num, 0, 4);
 555	num += 4;
 556	return num;
 557}
 558
 559
 560static unsigned char vpd84_data[] = {
 561/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
 562    0x22,0x22,0x22,0x0,0xbb,0x1,
 563    0x22,0x22,0x22,0x0,0xbb,0x2,
 564};
 565
 566static int inquiry_evpd_84(unsigned char * arr)
 
 567{
 568	memcpy(arr, vpd84_data, sizeof(vpd84_data));
 569	return sizeof(vpd84_data);
 570}
 571
 572static int inquiry_evpd_85(unsigned char * arr)
 
 573{
 574	int num = 0;
 575	const char * na1 = "https://www.kernel.org/config";
 576	const char * na2 = "http://www.kernel.org/log";
 577	int plen, olen;
 578
 579	arr[num++] = 0x1;	/* lu, storage config */
 580	arr[num++] = 0x0;	/* reserved */
 581	arr[num++] = 0x0;
 582	olen = strlen(na1);
 583	plen = olen + 1;
 584	if (plen % 4)
 585		plen = ((plen / 4) + 1) * 4;
 586	arr[num++] = plen;	/* length, null termianted, padded */
 587	memcpy(arr + num, na1, olen);
 588	memset(arr + num + olen, 0, plen - olen);
 589	num += plen;
 590
 591	arr[num++] = 0x4;	/* lu, logging */
 592	arr[num++] = 0x0;	/* reserved */
 593	arr[num++] = 0x0;
 594	olen = strlen(na2);
 595	plen = olen + 1;
 596	if (plen % 4)
 597		plen = ((plen / 4) + 1) * 4;
 598	arr[num++] = plen;	/* length, null terminated, padded */
 599	memcpy(arr + num, na2, olen);
 600	memset(arr + num + olen, 0, plen - olen);
 601	num += plen;
 602
 603	return num;
 604}
 605
 606/* SCSI ports VPD page */
 607static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
 608{
 609	int num = 0;
 610	int port_a, port_b;
 611
 612	port_a = target_dev_id + 1;
 613	port_b = port_a + 1;
 614	arr[num++] = 0x0;	/* reserved */
 615	arr[num++] = 0x0;	/* reserved */
 616	arr[num++] = 0x0;
 617	arr[num++] = 0x1;	/* relative port 1 (primary) */
 618	memset(arr + num, 0, 6);
 619	num += 6;
 620	arr[num++] = 0x0;
 621	arr[num++] = 12;	/* length tp descriptor */
 622	/* naa-5 target port identifier (A) */
 623	arr[num++] = 0x61;	/* proto=sas, binary */
 624	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
 625	arr[num++] = 0x0;	/* reserved */
 626	arr[num++] = 0x8;	/* length */
 627	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
 628	arr[num++] = 0x22;
 629	arr[num++] = 0x22;
 630	arr[num++] = 0x20;
 631	arr[num++] = (port_a >> 24);
 632	arr[num++] = (port_a >> 16) & 0xff;
 633	arr[num++] = (port_a >> 8) & 0xff;
 634	arr[num++] = port_a & 0xff;
 635
 636	arr[num++] = 0x0;	/* reserved */
 637	arr[num++] = 0x0;	/* reserved */
 638	arr[num++] = 0x0;
 639	arr[num++] = 0x2;	/* relative port 2 (secondary) */
 640	memset(arr + num, 0, 6);
 641	num += 6;
 642	arr[num++] = 0x0;
 643	arr[num++] = 12;	/* length tp descriptor */
 644	/* naa-5 target port identifier (B) */
 645	arr[num++] = 0x61;	/* proto=sas, binary */
 646	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
 647	arr[num++] = 0x0;	/* reserved */
 648	arr[num++] = 0x8;	/* length */
 649	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
 650	arr[num++] = 0x22;
 651	arr[num++] = 0x22;
 652	arr[num++] = 0x20;
 653	arr[num++] = (port_b >> 24);
 654	arr[num++] = (port_b >> 16) & 0xff;
 655	arr[num++] = (port_b >> 8) & 0xff;
 656	arr[num++] = port_b & 0xff;
 657
 658	return num;
 659}
 660
 661
 662static unsigned char vpd89_data[] = {
 663/* from 4th byte */ 0,0,0,0,
 664'l','i','n','u','x',' ',' ',' ',
 665'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
 666'1','2','3','4',
 6670x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
 6680xec,0,0,0,
 6690x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
 6700,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
 6710x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
 6720x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
 6730x53,0x41,
 6740x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
 6750x20,0x20,
 6760x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
 6770x10,0x80,
 6780,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
 6790x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
 6800x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
 6810,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
 6820x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
 6830x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
 6840,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
 6850,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6870,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6880x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
 6890,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
 6900xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
 6910,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
 6920,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6930,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6940,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6950,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6960,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6970,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6980,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 6990,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 7000,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 7010,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 7020,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 7030,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
 704};
 705
 706static int inquiry_evpd_89(unsigned char * arr)
 
 707{
 708	memcpy(arr, vpd89_data, sizeof(vpd89_data));
 709	return sizeof(vpd89_data);
 710}
 711
 712
 713/* Block limits VPD page (SBC-3) */
 714static unsigned char vpdb0_data[] = {
 715	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
 716	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 717	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 718	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 719};
 720
 721static int inquiry_evpd_b0(unsigned char * arr)
 
 722{
 723	unsigned int gran;
 724
 725	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
 726
 727	/* Optimal transfer length granularity */
 728	gran = 1 << scsi_debug_physblk_exp;
 729	arr[2] = (gran >> 8) & 0xff;
 730	arr[3] = gran & 0xff;
 731
 732	/* Maximum Transfer Length */
 733	if (sdebug_store_sectors > 0x400) {
 734		arr[4] = (sdebug_store_sectors >> 24) & 0xff;
 735		arr[5] = (sdebug_store_sectors >> 16) & 0xff;
 736		arr[6] = (sdebug_store_sectors >> 8) & 0xff;
 737		arr[7] = sdebug_store_sectors & 0xff;
 738	}
 739
 740	/* Optimal Transfer Length */
 741	put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
 742
 743	if (scsi_debug_lbpu) {
 744		/* Maximum Unmap LBA Count */
 745		put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
 746
 747		/* Maximum Unmap Block Descriptor Count */
 748		put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
 749	}
 750
 751	/* Unmap Granularity Alignment */
 752	if (scsi_debug_unmap_alignment) {
 753		put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
 754		arr[28] |= 0x80; /* UGAVALID */
 755	}
 756
 757	/* Optimal Unmap Granularity */
 758	put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
 759
 760	/* Maximum WRITE SAME Length */
 761	put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
 762
 763	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
 764
 765	return sizeof(vpdb0_data);
 766}
 767
 768/* Block device characteristics VPD page (SBC-3) */
 769static int inquiry_evpd_b1(unsigned char *arr)
 770{
 771	memset(arr, 0, 0x3c);
 772	arr[0] = 0;
 773	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
 774	arr[2] = 0;
 775	arr[3] = 5;	/* less than 1.8" */
 776
 777	return 0x3c;
 778}
 779
 780/* Logical block provisioning VPD page (SBC-3) */
 781static int inquiry_evpd_b2(unsigned char *arr)
 782{
 783	memset(arr, 0, 0x4);
 784	arr[0] = 0;			/* threshold exponent */
 785
 786	if (scsi_debug_lbpu)
 787		arr[1] = 1 << 7;
 788
 789	if (scsi_debug_lbpws)
 790		arr[1] |= 1 << 6;
 791
 792	if (scsi_debug_lbpws10)
 793		arr[1] |= 1 << 5;
 794
 795	if (scsi_debug_lbprz)
 796		arr[1] |= 1 << 2;
 797
 
 798	return 0x4;
 799}
 800
 801#define SDEBUG_LONG_INQ_SZ 96
 802#define SDEBUG_MAX_INQ_ARR_SZ 584
 803
 804static int resp_inquiry(struct scsi_cmnd * scp, int target,
 805			struct sdebug_dev_info * devip)
 806{
 807	unsigned char pq_pdt;
 808	unsigned char * arr;
 809	unsigned char *cmd = (unsigned char *)scp->cmnd;
 810	int alloc_len, n, ret;
 
 811
 812	alloc_len = (cmd[3] << 8) + cmd[4];
 813	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
 814	if (! arr)
 815		return DID_REQUEUE << 16;
 816	if (devip->wlun)
 817		pq_pdt = 0x1e;	/* present, wlun */
 818	else if (scsi_debug_no_lun_0 && (0 == devip->lun))
 819		pq_pdt = 0x7f;	/* not present, no device type */
 
 
 820	else
 821		pq_pdt = (scsi_debug_ptype & 0x1f);
 822	arr[0] = pq_pdt;
 823	if (0x2 & cmd[1]) {  /* CMDDT bit set */
 824		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
 825			       	0);
 826		kfree(arr);
 827		return check_condition_result;
 828	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
 829		int lu_id_num, port_group_id, target_dev_id, len;
 830		char lu_id_str[6];
 831		int host_no = devip->sdbg_host->shost->host_no;
 832		
 833		port_group_id = (((host_no + 1) & 0x7f) << 8) +
 834		    (devip->channel & 0x7f);
 835		if (0 == scsi_debug_vpd_use_hostno)
 836			host_no = 0;
 837		lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
 838			    (devip->target * 1000) + devip->lun);
 839		target_dev_id = ((host_no + 1) * 2000) +
 840				 (devip->target * 1000) - 3;
 841		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
 842		if (0 == cmd[2]) { /* supported vital product data pages */
 843			arr[1] = cmd[2];	/*sanity */
 844			n = 4;
 845			arr[n++] = 0x0;   /* this page */
 846			arr[n++] = 0x80;  /* unit serial number */
 847			arr[n++] = 0x83;  /* device identification */
 848			arr[n++] = 0x84;  /* software interface ident. */
 849			arr[n++] = 0x85;  /* management network addresses */
 850			arr[n++] = 0x86;  /* extended inquiry */
 851			arr[n++] = 0x87;  /* mode page policy */
 852			arr[n++] = 0x88;  /* SCSI ports */
 853			arr[n++] = 0x89;  /* ATA information */
 854			arr[n++] = 0xb0;  /* Block limits (SBC) */
 855			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
 856			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
 857				arr[n++] = 0xb2;
 
 858			arr[3] = n - 4;	  /* number of supported VPD pages */
 859		} else if (0x80 == cmd[2]) { /* unit serial number */
 860			arr[1] = cmd[2];	/*sanity */
 861			arr[3] = len;
 862			memcpy(&arr[4], lu_id_str, len);
 863		} else if (0x83 == cmd[2]) { /* device identification */
 864			arr[1] = cmd[2];	/*sanity */
 865			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
 866						 target_dev_id, lu_id_num,
 867						 lu_id_str, len);
 
 868		} else if (0x84 == cmd[2]) { /* Software interface ident. */
 869			arr[1] = cmd[2];	/*sanity */
 870			arr[3] = inquiry_evpd_84(&arr[4]);
 871		} else if (0x85 == cmd[2]) { /* Management network addresses */
 872			arr[1] = cmd[2];	/*sanity */
 873			arr[3] = inquiry_evpd_85(&arr[4]);
 874		} else if (0x86 == cmd[2]) { /* extended inquiry */
 875			arr[1] = cmd[2];	/*sanity */
 876			arr[3] = 0x3c;	/* number of following entries */
 877			if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
 878				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
 879			else if (scsi_debug_dif)
 880				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
 881			else
 882				arr[4] = 0x0;   /* no protection stuff */
 883			arr[5] = 0x7;   /* head of q, ordered + simple q's */
 884		} else if (0x87 == cmd[2]) { /* mode page policy */
 885			arr[1] = cmd[2];	/*sanity */
 886			arr[3] = 0x8;	/* number of following entries */
 887			arr[4] = 0x2;	/* disconnect-reconnect mp */
 888			arr[6] = 0x80;	/* mlus, shared */
 889			arr[8] = 0x18;	 /* protocol specific lu */
 890			arr[10] = 0x82;	 /* mlus, per initiator port */
 891		} else if (0x88 == cmd[2]) { /* SCSI Ports */
 892			arr[1] = cmd[2];	/*sanity */
 893			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
 894		} else if (0x89 == cmd[2]) { /* ATA information */
 895			arr[1] = cmd[2];        /*sanity */
 896			n = inquiry_evpd_89(&arr[4]);
 897			arr[2] = (n >> 8);
 898			arr[3] = (n & 0xff);
 899		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
 900			arr[1] = cmd[2];        /*sanity */
 901			arr[3] = inquiry_evpd_b0(&arr[4]);
 902		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
 903			arr[1] = cmd[2];        /*sanity */
 904			arr[3] = inquiry_evpd_b1(&arr[4]);
 905		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
 906			arr[1] = cmd[2];        /*sanity */
 907			arr[3] = inquiry_evpd_b2(&arr[4]);
 908		} else {
 909			/* Illegal request, invalid field in cdb */
 910			mk_sense_buffer(devip, ILLEGAL_REQUEST,
 911					INVALID_FIELD_IN_CDB, 0);
 912			kfree(arr);
 913			return check_condition_result;
 914		}
 915		len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
 916		ret = fill_from_dev_buffer(scp, arr,
 917			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
 918		kfree(arr);
 919		return ret;
 920	}
 921	/* drops through here for a standard inquiry */
 922	arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0;	/* Removable disk */
 923	arr[2] = scsi_debug_scsi_level;
 924	arr[3] = 2;    /* response_data_format==2 */
 925	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
 926	arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
 927	if (0 == scsi_debug_vpd_use_hostno)
 928		arr[5] = 0x10; /* claim: implicit TGPS */
 929	arr[6] = 0x10; /* claim: MultiP */
 930	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
 931	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
 932	memcpy(&arr[8], inq_vendor_id, 8);
 933	memcpy(&arr[16], inq_product_id, 16);
 934	memcpy(&arr[32], inq_product_rev, 4);
 935	/* version descriptors (2 bytes each) follow */
 936	arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
 937	arr[60] = 0x3; arr[61] = 0x14;  /* SPC-3 ANSI */
 938	n = 62;
 939	if (scsi_debug_ptype == 0) {
 940		arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
 941	} else if (scsi_debug_ptype == 1) {
 942		arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
 
 
 943	}
 944	arr[n++] = 0xc; arr[n++] = 0xf;  /* SAS-1.1 rev 10 */
 945	ret = fill_from_dev_buffer(scp, arr,
 946			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
 947	kfree(arr);
 948	return ret;
 949}
 950
 
 
 
 951static int resp_requests(struct scsi_cmnd * scp,
 952			 struct sdebug_dev_info * devip)
 953{
 954	unsigned char * sbuff;
 955	unsigned char *cmd = (unsigned char *)scp->cmnd;
 956	unsigned char arr[SDEBUG_SENSE_LEN];
 957	int want_dsense;
 958	int len = 18;
 959
 960	memset(arr, 0, sizeof(arr));
 961	if (devip->reset == 1)
 962		mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
 963	want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
 964	sbuff = devip->sense_buff;
 965	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
 966		if (want_dsense) {
 967			arr[0] = 0x72;
 968			arr[1] = 0x0;		/* NO_SENSE in sense_key */
 969			arr[2] = THRESHOLD_EXCEEDED;
 970			arr[3] = 0xff;		/* TEST set and MRIE==6 */
 
 971		} else {
 972			arr[0] = 0x70;
 973			arr[2] = 0x0;		/* NO_SENSE in sense_key */
 974			arr[7] = 0xa;   	/* 18 byte sense buffer */
 975			arr[12] = THRESHOLD_EXCEEDED;
 976			arr[13] = 0xff;		/* TEST set and MRIE==6 */
 977		}
 978	} else {
 979		memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
 980		if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
 981			/* DESC bit set and sense_buff in fixed format */
 982			memset(arr, 0, sizeof(arr));
 
 
 
 
 
 
 
 
 
 
 
 983			arr[0] = 0x72;
 984			arr[1] = sbuff[2];     /* sense key */
 985			arr[2] = sbuff[12];    /* asc */
 986			arr[3] = sbuff[13];    /* ascq */
 987			len = 8;
 
 
 
 
 
 
 
 988		}
 
 989	}
 990	mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
 991	return fill_from_dev_buffer(scp, arr, len);
 992}
 993
 994static int resp_start_stop(struct scsi_cmnd * scp,
 995			   struct sdebug_dev_info * devip)
 996{
 997	unsigned char *cmd = (unsigned char *)scp->cmnd;
 998	int power_cond, errsts, start;
 999
1000	if ((errsts = check_readiness(scp, 1, devip)))
1001		return errsts;
1002	power_cond = (cmd[4] & 0xf0) >> 4;
1003	if (power_cond) {
1004		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1005			       	0);
1006		return check_condition_result;
1007	}
1008	start = cmd[4] & 1;
1009	if (start == devip->stopped)
1010		devip->stopped = !start;
1011	return 0;
1012}
1013
1014static sector_t get_sdebug_capacity(void)
1015{
1016	if (scsi_debug_virtual_gb > 0)
1017		return (sector_t)scsi_debug_virtual_gb *
1018			(1073741824 / scsi_debug_sector_size);
 
 
1019	else
1020		return sdebug_store_sectors;
1021}
1022
1023#define SDEBUG_READCAP_ARR_SZ 8
1024static int resp_readcap(struct scsi_cmnd * scp,
1025			struct sdebug_dev_info * devip)
1026{
1027	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1028	unsigned int capac;
1029	int errsts;
1030
1031	if ((errsts = check_readiness(scp, 1, devip)))
1032		return errsts;
1033	/* following just in case virtual_gb changed */
1034	sdebug_capacity = get_sdebug_capacity();
1035	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1036	if (sdebug_capacity < 0xffffffff) {
1037		capac = (unsigned int)sdebug_capacity - 1;
1038		arr[0] = (capac >> 24);
1039		arr[1] = (capac >> 16) & 0xff;
1040		arr[2] = (capac >> 8) & 0xff;
1041		arr[3] = capac & 0xff;
1042	} else {
1043		arr[0] = 0xff;
1044		arr[1] = 0xff;
1045		arr[2] = 0xff;
1046		arr[3] = 0xff;
1047	}
1048	arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1049	arr[7] = scsi_debug_sector_size & 0xff;
1050	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1051}
1052
1053#define SDEBUG_READCAP16_ARR_SZ 32
1054static int resp_readcap16(struct scsi_cmnd * scp,
1055			  struct sdebug_dev_info * devip)
1056{
1057	unsigned char *cmd = (unsigned char *)scp->cmnd;
1058	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1059	unsigned long long capac;
1060	int errsts, k, alloc_len;
1061
1062	if ((errsts = check_readiness(scp, 1, devip)))
1063		return errsts;
1064	alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1065		     + cmd[13]);
1066	/* following just in case virtual_gb changed */
1067	sdebug_capacity = get_sdebug_capacity();
1068	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1069	capac = sdebug_capacity - 1;
1070	for (k = 0; k < 8; ++k, capac >>= 8)
1071		arr[7 - k] = capac & 0xff;
1072	arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1073	arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1074	arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1075	arr[11] = scsi_debug_sector_size & 0xff;
1076	arr[13] = scsi_debug_physblk_exp & 0xf;
1077	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1078
1079	if (scsi_debug_lbp()) {
1080		arr[14] |= 0x80; /* LBPME */
1081		if (scsi_debug_lbprz)
1082			arr[14] |= 0x40; /* LBPRZ */
 
 
 
 
1083	}
1084
1085	arr[15] = scsi_debug_lowest_aligned & 0xff;
1086
1087	if (scsi_debug_dif) {
1088		arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1089		arr[12] |= 1; /* PROT_EN */
1090	}
1091
1092	return fill_from_dev_buffer(scp, arr,
1093				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1094}
1095
1096#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1097
1098static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1099			      struct sdebug_dev_info * devip)
1100{
1101	unsigned char *cmd = (unsigned char *)scp->cmnd;
1102	unsigned char * arr;
1103	int host_no = devip->sdbg_host->shost->host_no;
1104	int n, ret, alen, rlen;
1105	int port_group_a, port_group_b, port_a, port_b;
1106
1107	alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1108		+ cmd[9]);
1109
1110	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1111	if (! arr)
1112		return DID_REQUEUE << 16;
1113	/*
1114	 * EVPD page 0x88 states we have two ports, one
1115	 * real and a fake port with no device connected.
1116	 * So we create two port groups with one port each
1117	 * and set the group with port B to unavailable.
1118	 */
1119	port_a = 0x1; /* relative port A */
1120	port_b = 0x2; /* relative port B */
1121	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1122	    (devip->channel & 0x7f);
1123	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1124	    (devip->channel & 0x7f) + 0x80;
1125
1126	/*
1127	 * The asymmetric access state is cycled according to the host_id.
1128	 */
1129	n = 4;
1130	if (0 == scsi_debug_vpd_use_hostno) {
1131	    arr[n++] = host_no % 3; /* Asymm access state */
1132	    arr[n++] = 0x0F; /* claim: all states are supported */
1133	} else {
1134	    arr[n++] = 0x0; /* Active/Optimized path */
1135	    arr[n++] = 0x01; /* claim: only support active/optimized paths */
1136	}
1137	arr[n++] = (port_group_a >> 8) & 0xff;
1138	arr[n++] = port_group_a & 0xff;
1139	arr[n++] = 0;    /* Reserved */
1140	arr[n++] = 0;    /* Status code */
1141	arr[n++] = 0;    /* Vendor unique */
1142	arr[n++] = 0x1;  /* One port per group */
1143	arr[n++] = 0;    /* Reserved */
1144	arr[n++] = 0;    /* Reserved */
1145	arr[n++] = (port_a >> 8) & 0xff;
1146	arr[n++] = port_a & 0xff;
1147	arr[n++] = 3;    /* Port unavailable */
1148	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1149	arr[n++] = (port_group_b >> 8) & 0xff;
1150	arr[n++] = port_group_b & 0xff;
1151	arr[n++] = 0;    /* Reserved */
1152	arr[n++] = 0;    /* Status code */
1153	arr[n++] = 0;    /* Vendor unique */
1154	arr[n++] = 0x1;  /* One port per group */
1155	arr[n++] = 0;    /* Reserved */
1156	arr[n++] = 0;    /* Reserved */
1157	arr[n++] = (port_b >> 8) & 0xff;
1158	arr[n++] = port_b & 0xff;
1159
1160	rlen = n - 4;
1161	arr[0] = (rlen >> 24) & 0xff;
1162	arr[1] = (rlen >> 16) & 0xff;
1163	arr[2] = (rlen >> 8) & 0xff;
1164	arr[3] = rlen & 0xff;
1165
1166	/*
1167	 * Return the smallest value of either
1168	 * - The allocated length
1169	 * - The constructed command length
1170	 * - The maximum array size
1171	 */
1172	rlen = min(alen,n);
1173	ret = fill_from_dev_buffer(scp, arr,
1174				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1175	kfree(arr);
1176	return ret;
1177}
1178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1179/* <<Following mode page info copied from ST318451LW>> */
1180
1181static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1182{	/* Read-Write Error Recovery page for mode_sense */
1183	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1184					5, 0, 0xff, 0xff};
1185
1186	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1187	if (1 == pcontrol)
1188		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1189	return sizeof(err_recov_pg);
1190}
1191
1192static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1193{ 	/* Disconnect-Reconnect page for mode_sense */
1194	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1195					 0, 0, 0, 0, 0, 0, 0, 0};
1196
1197	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1198	if (1 == pcontrol)
1199		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1200	return sizeof(disconnect_pg);
1201}
1202
1203static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1204{       /* Format device page for mode_sense */
1205	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1206				     0, 0, 0, 0, 0, 0, 0, 0,
1207				     0, 0, 0, 0, 0x40, 0, 0, 0};
1208
1209	memcpy(p, format_pg, sizeof(format_pg));
1210	p[10] = (sdebug_sectors_per >> 8) & 0xff;
1211	p[11] = sdebug_sectors_per & 0xff;
1212	p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1213	p[13] = scsi_debug_sector_size & 0xff;
1214	if (DEV_REMOVEABLE(target))
1215		p[20] |= 0x20; /* should agree with INQUIRY */
1216	if (1 == pcontrol)
1217		memset(p + 2, 0, sizeof(format_pg) - 2);
1218	return sizeof(format_pg);
1219}
1220
 
 
 
 
1221static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1222{ 	/* Caching page for mode_sense */
1223	unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
 
 
1224		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1225
 
 
1226	memcpy(p, caching_pg, sizeof(caching_pg));
1227	if (1 == pcontrol)
1228		memset(p + 2, 0, sizeof(caching_pg) - 2);
 
 
1229	return sizeof(caching_pg);
1230}
1231
 
 
 
1232static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1233{ 	/* Control mode page for mode_sense */
1234	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1235				        0, 0, 0, 0};
1236	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1237				     0, 0, 0x2, 0x4b};
1238
1239	if (scsi_debug_dsense)
1240		ctrl_m_pg[2] |= 0x4;
1241	else
1242		ctrl_m_pg[2] &= ~0x4;
1243
1244	if (scsi_debug_ato)
1245		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1246
1247	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1248	if (1 == pcontrol)
1249		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1250	else if (2 == pcontrol)
1251		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1252	return sizeof(ctrl_m_pg);
1253}
1254
1255
1256static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1257{	/* Informational Exceptions control mode page for mode_sense */
1258	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1259				       0, 0, 0x0, 0x0};
1260	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1261				      0, 0, 0x0, 0x0};
1262
1263	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1264	if (1 == pcontrol)
1265		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1266	else if (2 == pcontrol)
1267		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1268	return sizeof(iec_m_pg);
1269}
1270
1271static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1272{	/* SAS SSP mode page - short format for mode_sense */
1273	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1274		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1275
1276	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1277	if (1 == pcontrol)
1278		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1279	return sizeof(sas_sf_m_pg);
1280}
1281
1282
1283static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1284			      int target_dev_id)
1285{	/* SAS phy control and discover mode page for mode_sense */
1286	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1287		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1288		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1289		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1290		    0x2, 0, 0, 0, 0, 0, 0, 0,
1291		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1292		    0, 0, 0, 0, 0, 0, 0, 0,
1293		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1294		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1295		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1296		    0x3, 0, 0, 0, 0, 0, 0, 0,
1297		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1298		    0, 0, 0, 0, 0, 0, 0, 0,
1299		};
1300	int port_a, port_b;
1301
 
 
 
 
1302	port_a = target_dev_id + 1;
1303	port_b = port_a + 1;
1304	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1305	p[20] = (port_a >> 24);
1306	p[21] = (port_a >> 16) & 0xff;
1307	p[22] = (port_a >> 8) & 0xff;
1308	p[23] = port_a & 0xff;
1309	p[48 + 20] = (port_b >> 24);
1310	p[48 + 21] = (port_b >> 16) & 0xff;
1311	p[48 + 22] = (port_b >> 8) & 0xff;
1312	p[48 + 23] = port_b & 0xff;
1313	if (1 == pcontrol)
1314		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1315	return sizeof(sas_pcd_m_pg);
1316}
1317
1318static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1319{	/* SAS SSP shared protocol specific port mode subpage */
1320	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1321		    0, 0, 0, 0, 0, 0, 0, 0,
1322		};
1323
1324	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1325	if (1 == pcontrol)
1326		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1327	return sizeof(sas_sha_m_pg);
1328}
1329
1330#define SDEBUG_MAX_MSENSE_SZ 256
1331
1332static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1333			   struct sdebug_dev_info * devip)
1334{
1335	unsigned char dbd, llbaa;
1336	int pcontrol, pcode, subpcode, bd_len;
1337	unsigned char dev_spec;
1338	int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
 
1339	unsigned char * ap;
1340	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1341	unsigned char *cmd = (unsigned char *)scp->cmnd;
 
1342
1343	if ((errsts = check_readiness(scp, 1, devip)))
1344		return errsts;
1345	dbd = !!(cmd[1] & 0x8);
1346	pcontrol = (cmd[2] & 0xc0) >> 6;
1347	pcode = cmd[2] & 0x3f;
1348	subpcode = cmd[3];
1349	msense_6 = (MODE_SENSE == cmd[0]);
1350	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1351	if ((0 == scsi_debug_ptype) && (0 == dbd))
 
1352		bd_len = llbaa ? 16 : 8;
1353	else
1354		bd_len = 0;
1355	alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1356	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1357	if (0x3 == pcontrol) {  /* Saving values not supported */
1358		mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1359			       	0);
1360		return check_condition_result;
1361	}
1362	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1363			(devip->target * 1000) - 3;
1364	/* set DPOFUA bit for disks */
1365	if (0 == scsi_debug_ptype)
1366		dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1367	else
1368		dev_spec = 0x0;
1369	if (msense_6) {
1370		arr[2] = dev_spec;
1371		arr[3] = bd_len;
1372		offset = 4;
1373	} else {
1374		arr[3] = dev_spec;
1375		if (16 == bd_len)
1376			arr[4] = 0x1;	/* set LONGLBA bit */
1377		arr[7] = bd_len;	/* assume 255 or less */
1378		offset = 8;
1379	}
1380	ap = arr + offset;
1381	if ((bd_len > 0) && (!sdebug_capacity))
1382		sdebug_capacity = get_sdebug_capacity();
1383
1384	if (8 == bd_len) {
1385		if (sdebug_capacity > 0xfffffffe) {
1386			ap[0] = 0xff;
1387			ap[1] = 0xff;
1388			ap[2] = 0xff;
1389			ap[3] = 0xff;
1390		} else {
1391			ap[0] = (sdebug_capacity >> 24) & 0xff;
1392			ap[1] = (sdebug_capacity >> 16) & 0xff;
1393			ap[2] = (sdebug_capacity >> 8) & 0xff;
1394			ap[3] = sdebug_capacity & 0xff;
1395		}
1396		ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1397		ap[7] = scsi_debug_sector_size & 0xff;
1398		offset += bd_len;
1399		ap = arr + offset;
1400	} else if (16 == bd_len) {
1401		unsigned long long capac = sdebug_capacity;
1402
1403        	for (k = 0; k < 8; ++k, capac >>= 8)
1404                	ap[7 - k] = capac & 0xff;
1405		ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1406		ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1407		ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1408		ap[15] = scsi_debug_sector_size & 0xff;
1409		offset += bd_len;
1410		ap = arr + offset;
1411	}
1412
1413	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1414		/* TODO: Control Extension page */
1415		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1416			       	0);
1417		return check_condition_result;
1418	}
 
 
1419	switch (pcode) {
1420	case 0x1:	/* Read-Write error recovery page, direct access */
1421		len = resp_err_recov_pg(ap, pcontrol, target);
1422		offset += len;
1423		break;
1424	case 0x2:	/* Disconnect-Reconnect page, all devices */
1425		len = resp_disconnect_pg(ap, pcontrol, target);
1426		offset += len;
1427		break;
1428        case 0x3:       /* Format device page, direct access */
1429                len = resp_format_pg(ap, pcontrol, target);
1430                offset += len;
 
 
 
1431                break;
1432	case 0x8:	/* Caching page, direct access */
1433		len = resp_caching_pg(ap, pcontrol, target);
1434		offset += len;
 
 
 
1435		break;
1436	case 0xa:	/* Control Mode page, all devices */
1437		len = resp_ctrl_m_pg(ap, pcontrol, target);
1438		offset += len;
1439		break;
1440	case 0x19:	/* if spc==1 then sas phy, control+discover */
1441		if ((subpcode > 0x2) && (subpcode < 0xff)) {
1442		        mk_sense_buffer(devip, ILLEGAL_REQUEST,
1443					INVALID_FIELD_IN_CDB, 0);
1444			return check_condition_result;
1445	        }
1446		len = 0;
1447		if ((0x0 == subpcode) || (0xff == subpcode))
1448			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1449		if ((0x1 == subpcode) || (0xff == subpcode))
1450			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1451						  target_dev_id);
1452		if ((0x2 == subpcode) || (0xff == subpcode))
1453			len += resp_sas_sha_m_spg(ap + len, pcontrol);
1454		offset += len;
1455		break;
1456	case 0x1c:	/* Informational Exceptions Mode page, all devices */
1457		len = resp_iec_m_pg(ap, pcontrol, target);
1458		offset += len;
1459		break;
1460	case 0x3f:	/* Read all Mode pages */
1461		if ((0 == subpcode) || (0xff == subpcode)) {
1462			len = resp_err_recov_pg(ap, pcontrol, target);
1463			len += resp_disconnect_pg(ap + len, pcontrol, target);
1464			len += resp_format_pg(ap + len, pcontrol, target);
1465			len += resp_caching_pg(ap + len, pcontrol, target);
 
 
 
 
1466			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1467			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1468			if (0xff == subpcode) {
1469				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1470						  target, target_dev_id);
1471				len += resp_sas_sha_m_spg(ap + len, pcontrol);
1472			}
1473			len += resp_iec_m_pg(ap + len, pcontrol, target);
 
1474		} else {
1475			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1476					INVALID_FIELD_IN_CDB, 0);
1477			return check_condition_result;
1478                }
1479		offset += len;
1480		break;
1481	default:
1482		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1483			       	0);
 
 
 
1484		return check_condition_result;
1485	}
1486	if (msense_6)
1487		arr[0] = offset - 1;
1488	else {
1489		arr[0] = ((offset - 2) >> 8) & 0xff;
1490		arr[1] = (offset - 2) & 0xff;
1491	}
1492	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1493}
1494
1495#define SDEBUG_MAX_MSELECT_SZ 512
1496
1497static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1498			    struct sdebug_dev_info * devip)
1499{
1500	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1501	int param_len, res, errsts, mpage;
1502	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1503	unsigned char *cmd = (unsigned char *)scp->cmnd;
 
1504
1505	if ((errsts = check_readiness(scp, 1, devip)))
1506		return errsts;
1507	memset(arr, 0, sizeof(arr));
1508	pf = cmd[1] & 0x10;
1509	sp = cmd[1] & 0x1;
1510	param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1511	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1512		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1513				INVALID_FIELD_IN_CDB, 0);
1514		return check_condition_result;
1515	}
1516        res = fetch_to_dev_buffer(scp, arr, param_len);
1517        if (-1 == res)
1518                return (DID_ERROR << 16);
1519        else if ((res < param_len) &&
1520                 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1521                printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1522                       " IO sent=%d bytes\n", param_len, res);
1523	md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1524	bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1525	if (md_len > 2) {
1526		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1527				INVALID_FIELD_IN_PARAM_LIST, 0);
1528		return check_condition_result;
1529	}
1530	off = bd_len + (mselect6 ? 4 : 8);
1531	mpage = arr[off] & 0x3f;
1532	ps = !!(arr[off] & 0x80);
1533	if (ps) {
1534		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1535				INVALID_FIELD_IN_PARAM_LIST, 0);
1536		return check_condition_result;
1537	}
1538	spf = !!(arr[off] & 0x40);
1539	pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1540		       (arr[off + 1] + 2);
1541	if ((pg_len + off) > param_len) {
1542		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1543				PARAMETER_LIST_LENGTH_ERR, 0);
1544		return check_condition_result;
1545	}
1546	switch (mpage) {
 
 
 
 
 
 
 
1547	case 0xa:      /* Control Mode page */
1548		if (ctrl_m_pg[1] == arr[off + 1]) {
1549			memcpy(ctrl_m_pg + 2, arr + off + 2,
1550			       sizeof(ctrl_m_pg) - 2);
1551			scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1552			return 0;
1553		}
1554		break;
1555	case 0x1c:      /* Informational Exceptions Mode page */
1556		if (iec_m_pg[1] == arr[off + 1]) {
1557			memcpy(iec_m_pg + 2, arr + off + 2,
1558			       sizeof(iec_m_pg) - 2);
1559			return 0;
1560		}
1561		break;
1562	default:
1563		break;
1564	}
1565	mk_sense_buffer(devip, ILLEGAL_REQUEST,
1566			INVALID_FIELD_IN_PARAM_LIST, 0);
1567	return check_condition_result;
 
 
 
1568}
1569
1570static int resp_temp_l_pg(unsigned char * arr)
1571{
1572	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1573				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
1574		};
1575
1576        memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1577        return sizeof(temp_l_pg);
1578}
1579
1580static int resp_ie_l_pg(unsigned char * arr)
1581{
1582	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1583		};
1584
1585        memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1586	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
1587		arr[4] = THRESHOLD_EXCEEDED;
1588		arr[5] = 0xff;
1589	}
1590        return sizeof(ie_l_pg);
1591}
1592
1593#define SDEBUG_MAX_LSENSE_SZ 512
1594
1595static int resp_log_sense(struct scsi_cmnd * scp,
1596                          struct sdebug_dev_info * devip)
1597{
1598	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1599	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1600	unsigned char *cmd = (unsigned char *)scp->cmnd;
1601
1602	if ((errsts = check_readiness(scp, 1, devip)))
1603		return errsts;
1604	memset(arr, 0, sizeof(arr));
1605	ppc = cmd[1] & 0x2;
1606	sp = cmd[1] & 0x1;
1607	if (ppc || sp) {
1608		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1609				INVALID_FIELD_IN_CDB, 0);
1610		return check_condition_result;
1611	}
1612	pcontrol = (cmd[2] & 0xc0) >> 6;
1613	pcode = cmd[2] & 0x3f;
1614	subpcode = cmd[3] & 0xff;
1615	alloc_len = (cmd[7] << 8) + cmd[8];
1616	arr[0] = pcode;
1617	if (0 == subpcode) {
1618		switch (pcode) {
1619		case 0x0:	/* Supported log pages log page */
1620			n = 4;
1621			arr[n++] = 0x0;		/* this page */
1622			arr[n++] = 0xd;		/* Temperature */
1623			arr[n++] = 0x2f;	/* Informational exceptions */
1624			arr[3] = n - 4;
1625			break;
1626		case 0xd:	/* Temperature log page */
1627			arr[3] = resp_temp_l_pg(arr + 4);
1628			break;
1629		case 0x2f:	/* Informational exceptions log page */
1630			arr[3] = resp_ie_l_pg(arr + 4);
1631			break;
1632		default:
1633			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1634					INVALID_FIELD_IN_CDB, 0);
1635			return check_condition_result;
1636		}
1637	} else if (0xff == subpcode) {
1638		arr[0] |= 0x40;
1639		arr[1] = subpcode;
1640		switch (pcode) {
1641		case 0x0:	/* Supported log pages and subpages log page */
1642			n = 4;
1643			arr[n++] = 0x0;
1644			arr[n++] = 0x0;		/* 0,0 page */
1645			arr[n++] = 0x0;
1646			arr[n++] = 0xff;	/* this page */
1647			arr[n++] = 0xd;
1648			arr[n++] = 0x0;		/* Temperature */
1649			arr[n++] = 0x2f;
1650			arr[n++] = 0x0;	/* Informational exceptions */
1651			arr[3] = n - 4;
1652			break;
1653		case 0xd:	/* Temperature subpages */
1654			n = 4;
1655			arr[n++] = 0xd;
1656			arr[n++] = 0x0;		/* Temperature */
1657			arr[3] = n - 4;
1658			break;
1659		case 0x2f:	/* Informational exceptions subpages */
1660			n = 4;
1661			arr[n++] = 0x2f;
1662			arr[n++] = 0x0;		/* Informational exceptions */
1663			arr[3] = n - 4;
1664			break;
1665		default:
1666			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1667					INVALID_FIELD_IN_CDB, 0);
1668			return check_condition_result;
1669		}
1670	} else {
1671		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1672				INVALID_FIELD_IN_CDB, 0);
1673		return check_condition_result;
1674	}
1675	len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1676	return fill_from_dev_buffer(scp, arr,
1677		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1678}
1679
1680static int check_device_access_params(struct sdebug_dev_info *devi,
1681				      unsigned long long lba, unsigned int num)
1682{
1683	if (lba + num > sdebug_capacity) {
1684		mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1685		return check_condition_result;
1686	}
1687	/* transfer length excessive (tie in to block limits VPD page) */
1688	if (num > sdebug_store_sectors) {
1689		mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
 
1690		return check_condition_result;
1691	}
1692	return 0;
1693}
1694
1695static int do_device_access(struct scsi_cmnd *scmd,
1696			    struct sdebug_dev_info *devi,
1697			    unsigned long long lba, unsigned int num, int write)
1698{
1699	int ret;
1700	unsigned long long block, rest = 0;
1701	int (*func)(struct scsi_cmnd *, unsigned char *, int);
 
 
 
 
 
 
 
 
 
1702
1703	func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
 
 
 
1704
1705	block = do_div(lba, sdebug_store_sectors);
1706	if (block + num > sdebug_store_sectors)
1707		rest = block + num - sdebug_store_sectors;
1708
1709	ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1710		   (num - rest) * scsi_debug_sector_size);
1711	if (!ret && rest)
1712		ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
 
 
 
 
 
 
 
1713
1714	return ret;
1715}
1716
1717static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1718			    unsigned int sectors, u32 ei_lba)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1719{
1720	unsigned int i, resid;
1721	struct scatterlist *psgl;
1722	struct sd_dif_tuple *sdt;
1723	sector_t sector;
1724	sector_t tmp_sec = start_sec;
1725	void *paddr;
1726
1727	start_sec = do_div(tmp_sec, sdebug_store_sectors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1728
1729	sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
 
 
 
 
 
 
1730
1731	for (i = 0 ; i < sectors ; i++) {
1732		u16 csum;
1733
1734		if (sdt[i].app_tag == 0xffff)
1735			continue;
 
1736
1737		sector = start_sec + i;
 
 
 
1738
1739		switch (scsi_debug_guard) {
1740		case 1:
1741			csum = ip_compute_csum(fake_storep +
1742					       sector * scsi_debug_sector_size,
1743					       scsi_debug_sector_size);
1744			break;
1745		case 0:
1746			csum = crc_t10dif(fake_storep +
1747					  sector * scsi_debug_sector_size,
1748					  scsi_debug_sector_size);
1749			csum = cpu_to_be16(csum);
1750			break;
1751		default:
1752			BUG();
1753		}
1754
1755		if (sdt[i].guard_tag != csum) {
1756			printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1757			       " rcvd 0x%04x, data 0x%04x\n", __func__,
1758			       (unsigned long)sector,
1759			       be16_to_cpu(sdt[i].guard_tag),
1760			       be16_to_cpu(csum));
1761			dif_errors++;
1762			return 0x01;
1763		}
1764
1765		if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1766		    be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1767			printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1768			       __func__, (unsigned long)sector);
1769			dif_errors++;
1770			return 0x03;
1771		}
1772
1773		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1774		    be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1775			printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1776			       __func__, (unsigned long)sector);
1777			dif_errors++;
1778			return 0x03;
1779		}
1780
1781		ei_lba++;
 
1782	}
 
 
1783
1784	resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1785	sector = start_sec;
 
 
 
 
 
 
 
1786
1787	scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1788		int len = min(psgl->length, resid);
1789
1790		paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1791		memcpy(paddr, dif_storep + dif_offset(sector), len);
1792
1793		sector += len >> 3;
1794		if (sector >= sdebug_store_sectors) {
1795			/* Force wrap */
1796			tmp_sec = sector;
1797			sector = do_div(tmp_sec, sdebug_store_sectors);
1798		}
1799		resid -= len;
1800		kunmap_atomic(paddr);
1801	}
1802
 
1803	dix_reads++;
1804
1805	return 0;
1806}
1807
1808static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1809		     unsigned int num, struct sdebug_dev_info *devip,
1810		     u32 ei_lba)
1811{
 
 
 
 
 
1812	unsigned long iflags;
1813	int ret;
 
1814
1815	ret = check_device_access_params(devip, lba, num);
1816	if (ret)
1817		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1818
1819	if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1820	    (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1821	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1822		/* claim unrecoverable read error */
1823		mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1824		/* set info field and valid bit for fixed descriptor */
1825		if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1826			devip->sense_buff[0] |= 0x80;	/* Valid bit */
1827			ret = (lba < OPT_MEDIUM_ERR_ADDR)
1828			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1829			devip->sense_buff[3] = (ret >> 24) & 0xff;
1830			devip->sense_buff[4] = (ret >> 16) & 0xff;
1831			devip->sense_buff[5] = (ret >> 8) & 0xff;
1832			devip->sense_buff[6] = ret & 0xff;
1833		}
1834	        scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1835		return check_condition_result;
1836	}
1837
 
 
1838	/* DIX + T10 DIF */
1839	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1840		int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1841
1842		if (prot_ret) {
1843			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
 
1844			return illegal_condition_result;
1845		}
1846	}
1847
1848	read_lock_irqsave(&atomic_rw, iflags);
1849	ret = do_device_access(SCpnt, devip, lba, num, 0);
1850	read_unlock_irqrestore(&atomic_rw, iflags);
1851	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1852}
1853
1854void dump_sector(unsigned char *buf, int len)
1855{
1856	int i, j;
1857
1858	printk(KERN_ERR ">>> Sector Dump <<<\n");
1859
 
1860	for (i = 0 ; i < len ; i += 16) {
1861		printk(KERN_ERR "%04d: ", i);
1862
1863		for (j = 0 ; j < 16 ; j++) {
1864			unsigned char c = buf[i+j];
 
1865			if (c >= 0x20 && c < 0x7e)
1866				printk(" %c ", buf[i+j]);
 
1867			else
1868				printk("%02x ", buf[i+j]);
 
1869		}
1870
1871		printk("\n");
1872	}
1873}
1874
1875static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1876			     unsigned int sectors, u32 ei_lba)
1877{
1878	int i, j, ret;
1879	struct sd_dif_tuple *sdt;
1880	struct scatterlist *dsgl = scsi_sglist(SCpnt);
1881	struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1882	void *daddr, *paddr;
1883	sector_t tmp_sec = start_sec;
1884	sector_t sector;
1885	int ppage_offset;
1886	unsigned short csum;
1887
1888	sector = do_div(tmp_sec, sdebug_store_sectors);
1889
1890	BUG_ON(scsi_sg_count(SCpnt) == 0);
1891	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1892
1893	paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1894	ppage_offset = 0;
1895
1896	/* For each data page */
1897	scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1898		daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1899
1900		/* For each sector-sized chunk in data page */
1901		for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
 
 
 
 
1902
 
 
1903			/* If we're at the end of the current
1904			 * protection page advance to the next one
1905			 */
1906			if (ppage_offset >= psgl->length) {
1907				kunmap_atomic(paddr);
1908				psgl = sg_next(psgl);
1909				BUG_ON(psgl == NULL);
1910				paddr = kmap_atomic(sg_page(psgl))
1911					+ psgl->offset;
1912				ppage_offset = 0;
1913			}
1914
1915			sdt = paddr + ppage_offset;
1916
1917			switch (scsi_debug_guard) {
1918			case 1:
1919				csum = ip_compute_csum(daddr,
1920						       scsi_debug_sector_size);
1921				break;
1922			case 0:
1923				csum = cpu_to_be16(crc_t10dif(daddr,
1924						      scsi_debug_sector_size));
1925				break;
1926			default:
1927				BUG();
1928				ret = 0;
1929				goto out;
1930			}
1931
1932			if (sdt->guard_tag != csum) {
1933				printk(KERN_ERR
1934				       "%s: GUARD check failed on sector %lu " \
1935				       "rcvd 0x%04x, calculated 0x%04x\n",
1936				       __func__, (unsigned long)sector,
1937				       be16_to_cpu(sdt->guard_tag),
1938				       be16_to_cpu(csum));
1939				ret = 0x01;
1940				dump_sector(daddr, scsi_debug_sector_size);
1941				goto out;
1942			}
1943
1944			if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1945			    be32_to_cpu(sdt->ref_tag)
1946			    != (start_sec & 0xffffffff)) {
1947				printk(KERN_ERR
1948				       "%s: REF check failed on sector %lu\n",
1949				       __func__, (unsigned long)sector);
1950				ret = 0x03;
1951				dump_sector(daddr, scsi_debug_sector_size);
1952				goto out;
1953			}
1954
1955			if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1956			    be32_to_cpu(sdt->ref_tag) != ei_lba) {
1957				printk(KERN_ERR
1958				       "%s: REF check failed on sector %lu\n",
1959				       __func__, (unsigned long)sector);
1960				ret = 0x03;
1961				dump_sector(daddr, scsi_debug_sector_size);
1962				goto out;
1963			}
1964
1965			/* Would be great to copy this in bigger
1966			 * chunks.  However, for the sake of
1967			 * correctness we need to verify each sector
1968			 * before writing it to "stable" storage
1969			 */
1970			memcpy(dif_storep + dif_offset(sector), sdt, 8);
1971
1972			sector++;
1973
1974			if (sector == sdebug_store_sectors)
1975				sector = 0;	/* Force wrap */
1976
1977			start_sec++;
1978			ei_lba++;
1979			daddr += scsi_debug_sector_size;
1980			ppage_offset += sizeof(struct sd_dif_tuple);
1981		}
1982
1983		kunmap_atomic(daddr);
1984	}
 
1985
1986	kunmap_atomic(paddr);
1987
1988	dix_writes++;
1989
1990	return 0;
1991
1992out:
1993	dif_errors++;
1994	kunmap_atomic(daddr);
1995	kunmap_atomic(paddr);
1996	return ret;
1997}
1998
1999static unsigned int map_state(sector_t lba, unsigned int *num)
2000{
2001	unsigned int granularity, alignment, mapped;
2002	sector_t block, next, end;
 
 
 
2003
2004	granularity = scsi_debug_unmap_granularity;
2005	alignment = granularity - scsi_debug_unmap_alignment;
2006	block = lba + alignment;
2007	do_div(block, granularity);
2008
2009	mapped = test_bit(block, map_storep);
 
 
 
 
 
 
 
 
 
 
 
 
 
2010
2011	if (mapped)
2012		next = find_next_zero_bit(map_storep, map_size, block);
2013	else
2014		next = find_next_bit(map_storep, map_size, block);
2015
2016	end = next * granularity - scsi_debug_unmap_alignment;
2017	*num = end - lba;
2018
2019	return mapped;
2020}
2021
2022static void map_region(sector_t lba, unsigned int len)
2023{
2024	unsigned int granularity, alignment;
2025	sector_t end = lba + len;
2026
2027	granularity = scsi_debug_unmap_granularity;
2028	alignment = granularity - scsi_debug_unmap_alignment;
2029
2030	while (lba < end) {
2031		sector_t block, rem;
2032
2033		block = lba + alignment;
2034		rem = do_div(block, granularity);
2035
2036		if (block < map_size)
2037			set_bit(block, map_storep);
2038
2039		lba += granularity - rem;
2040	}
2041}
2042
2043static void unmap_region(sector_t lba, unsigned int len)
2044{
2045	unsigned int granularity, alignment;
2046	sector_t end = lba + len;
2047
2048	granularity = scsi_debug_unmap_granularity;
2049	alignment = granularity - scsi_debug_unmap_alignment;
2050
2051	while (lba < end) {
2052		sector_t block, rem;
2053
2054		block = lba + alignment;
2055		rem = do_div(block, granularity);
2056
2057		if (rem == 0 && lba + granularity <= end && block < map_size) {
2058			clear_bit(block, map_storep);
2059			if (scsi_debug_lbprz)
 
 
2060				memset(fake_storep +
2061				       block * scsi_debug_sector_size, 0,
2062				       scsi_debug_sector_size);
 
 
 
 
 
 
 
 
2063		}
2064		lba += granularity - rem;
2065	}
2066}
2067
2068static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2069		      unsigned int num, struct sdebug_dev_info *devip,
2070		      u32 ei_lba)
2071{
 
 
 
 
2072	unsigned long iflags;
2073	int ret;
 
2074
2075	ret = check_device_access_params(devip, lba, num);
2076	if (ret)
2077		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2078
2079	/* DIX + T10 DIF */
2080	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2081		int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2082
2083		if (prot_ret) {
2084			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
 
2085			return illegal_condition_result;
2086		}
2087	}
2088
2089	write_lock_irqsave(&atomic_rw, iflags);
2090	ret = do_device_access(SCpnt, devip, lba, num, 1);
2091	if (scsi_debug_unmap_granularity)
2092		map_region(lba, num);
2093	write_unlock_irqrestore(&atomic_rw, iflags);
2094	if (-1 == ret)
2095		return (DID_ERROR << 16);
2096	else if ((ret < (num * scsi_debug_sector_size)) &&
2097		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2098		printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2099		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2101	return 0;
2102}
2103
2104static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2105		      unsigned int num, struct sdebug_dev_info *devip,
2106			   u32 ei_lba, unsigned int unmap)
2107{
2108	unsigned long iflags;
2109	unsigned long long i;
2110	int ret;
 
2111
2112	ret = check_device_access_params(devip, lba, num);
2113	if (ret)
2114		return ret;
2115
2116	if (num > scsi_debug_write_same_length) {
2117		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2118				0);
2119		return check_condition_result;
2120	}
2121
2122	write_lock_irqsave(&atomic_rw, iflags);
2123
2124	if (unmap && scsi_debug_unmap_granularity) {
2125		unmap_region(lba, num);
2126		goto out;
2127	}
2128
2129	/* Else fetch one logical block */
2130	ret = fetch_to_dev_buffer(scmd,
2131				  fake_storep + (lba * scsi_debug_sector_size),
2132				  scsi_debug_sector_size);
 
 
 
 
2133
2134	if (-1 == ret) {
2135		write_unlock_irqrestore(&atomic_rw, iflags);
2136		return (DID_ERROR << 16);
2137	} else if ((ret < (num * scsi_debug_sector_size)) &&
2138		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2139		printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2140		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
 
2141
2142	/* Copy first sector to remaining blocks */
2143	for (i = 1 ; i < num ; i++)
2144		memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2145		       fake_storep + (lba * scsi_debug_sector_size),
2146		       scsi_debug_sector_size);
2147
2148	if (scsi_debug_unmap_granularity)
2149		map_region(lba, num);
2150out:
2151	write_unlock_irqrestore(&atomic_rw, iflags);
2152
2153	return 0;
2154}
2155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2156struct unmap_block_desc {
2157	__be64	lba;
2158	__be32	blocks;
2159	__be32	__reserved;
2160};
2161
2162static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2163{
2164	unsigned char *buf;
2165	struct unmap_block_desc *desc;
2166	unsigned int i, payload_len, descriptors;
2167	int ret;
 
2168
2169	ret = check_readiness(scmd, 1, devip);
2170	if (ret)
2171		return ret;
2172
2173	payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2174	BUG_ON(scsi_bufflen(scmd) != payload_len);
 
 
2175
2176	descriptors = (payload_len - 8) / 16;
 
 
 
 
2177
2178	buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2179	if (!buf)
 
 
2180		return check_condition_result;
 
2181
2182	scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2183
2184	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2185	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2186
2187	desc = (void *)&buf[8];
2188
 
 
2189	for (i = 0 ; i < descriptors ; i++) {
2190		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2191		unsigned int num = get_unaligned_be32(&desc[i].blocks);
2192
2193		ret = check_device_access_params(devip, lba, num);
2194		if (ret)
2195			goto out;
2196
2197		unmap_region(lba, num);
2198	}
2199
2200	ret = 0;
2201
2202out:
 
2203	kfree(buf);
2204
2205	return ret;
2206}
2207
2208#define SDEBUG_GET_LBA_STATUS_LEN 32
2209
2210static int resp_get_lba_status(struct scsi_cmnd * scmd,
2211			       struct sdebug_dev_info * devip)
2212{
2213	unsigned long long lba;
2214	unsigned int alloc_len, mapped, num;
2215	unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
 
2216	int ret;
2217
2218	ret = check_readiness(scmd, 1, devip);
2219	if (ret)
2220		return ret;
2221
2222	lba = get_unaligned_be64(&scmd->cmnd[2]);
2223	alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2224
2225	if (alloc_len < 24)
2226		return 0;
2227
2228	ret = check_device_access_params(devip, lba, 1);
2229	if (ret)
2230		return ret;
2231
2232	mapped = map_state(lba, &num);
 
 
 
 
 
 
 
 
 
 
2233
2234	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2235	put_unaligned_be32(20, &arr[0]);	/* Parameter Data Length */
2236	put_unaligned_be64(lba, &arr[8]);	/* LBA */
2237	put_unaligned_be32(num, &arr[16]);	/* Number of blocks */
2238	arr[20] = !mapped;			/* mapped = 0, unmapped = 1 */
2239
2240	return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2241}
2242
2243#define SDEBUG_RLUN_ARR_SZ 256
2244
2245static int resp_report_luns(struct scsi_cmnd * scp,
2246			    struct sdebug_dev_info * devip)
 
 
 
 
 
 
 
 
2247{
 
2248	unsigned int alloc_len;
2249	int lun_cnt, i, upper, num, n, wlun, lun;
2250	unsigned char *cmd = (unsigned char *)scp->cmnd;
2251	int select_report = (int)cmd[2];
2252	struct scsi_lun *one_lun;
2253	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2254	unsigned char * max_addr;
2255
2256	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2257	if ((alloc_len < 4) || (select_report > 2)) {
2258		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2259			       	0);
2260		return check_condition_result;
2261	}
2262	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
2263	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2264	lun_cnt = scsi_debug_max_luns;
2265	if (1 == select_report)
 
 
 
 
 
 
 
 
 
 
 
 
2266		lun_cnt = 0;
2267	else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2268		--lun_cnt;
2269	wlun = (select_report > 0) ? 1 : 0;
2270	num = lun_cnt + wlun;
2271	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2272	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2273	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2274			    sizeof(struct scsi_lun)), num);
2275	if (n < num) {
2276		wlun = 0;
2277		lun_cnt = n;
2278	}
2279	one_lun = (struct scsi_lun *) &arr[8];
2280	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2281	for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2282             ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2283	     i++, lun++) {
2284		upper = (lun >> 8) & 0x3f;
2285		if (upper)
2286			one_lun[i].scsi_lun[0] =
2287			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2288		one_lun[i].scsi_lun[1] = lun & 0xff;
2289	}
2290	if (wlun) {
2291		one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2292		one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2293		i++;
 
 
 
 
2294	}
2295	alloc_len = (unsigned char *)(one_lun + i) - arr;
2296	return fill_from_dev_buffer(scp, arr,
2297				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
 
 
 
 
2298}
2299
2300static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2301			    unsigned int num, struct sdebug_dev_info *devip)
2302{
2303	int i, j, ret = -1;
2304	unsigned char *kaddr, *buf;
2305	unsigned int offset;
2306	struct scatterlist *sg;
2307	struct scsi_data_buffer *sdb = scsi_in(scp);
 
2308
2309	/* better not to use temporary buffer. */
2310	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2311	if (!buf)
2312		return ret;
 
 
 
2313
2314	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2315
2316	offset = 0;
2317	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2318		kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2319		if (!kaddr)
2320			goto out;
2321
2322		for (j = 0; j < sg->length; j++)
2323			*(kaddr + sg->offset + j) ^= *(buf + offset + j);
 
 
2324
2325		offset += sg->length;
2326		kunmap_atomic(kaddr);
2327	}
2328	ret = 0;
2329out:
2330	kfree(buf);
2331
2332	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2333}
2334
2335/* When timer goes off this function is called. */
2336static void timer_intr_handler(unsigned long indx)
2337{
2338	struct sdebug_queued_cmd * sqcp;
 
2339	unsigned long iflags;
 
 
 
 
2340
2341	if (indx >= scsi_debug_max_queue) {
2342		printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2343		       "large\n");
 
 
 
 
 
 
2344		return;
2345	}
2346	spin_lock_irqsave(&queued_arr_lock, iflags);
2347	sqcp = &queued_arr[(int)indx];
2348	if (! sqcp->in_use) {
2349		printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2350		       "interrupt\n");
2351		spin_unlock_irqrestore(&queued_arr_lock, iflags);
 
2352		return;
2353	}
2354	sqcp->in_use = 0;
2355	if (sqcp->done_funct) {
2356		sqcp->a_cmnd->result = sqcp->scsi_result;
2357		sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
 
 
 
 
 
 
 
 
 
2358	}
2359	sqcp->done_funct = NULL;
2360	spin_unlock_irqrestore(&queued_arr_lock, iflags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2361}
2362
 
 
 
 
 
 
 
 
2363
2364static struct sdebug_dev_info *
2365sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
 
 
 
 
 
 
 
 
 
 
 
2366{
2367	struct sdebug_dev_info *devip;
2368
2369	devip = kzalloc(sizeof(*devip), flags);
2370	if (devip) {
 
 
 
 
 
 
 
 
 
 
 
2371		devip->sdbg_host = sdbg_host;
2372		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2373	}
2374	return devip;
2375}
2376
2377static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2378{
2379	struct sdebug_host_info * sdbg_host;
2380	struct sdebug_dev_info * open_devip = NULL;
2381	struct sdebug_dev_info * devip =
2382			(struct sdebug_dev_info *)sdev->hostdata;
2383
2384	if (devip)
2385		return devip;
2386	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2387	if (!sdbg_host) {
2388                printk(KERN_ERR "Host info NULL\n");
2389		return NULL;
2390        }
2391	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2392		if ((devip->used) && (devip->channel == sdev->channel) &&
2393                    (devip->target == sdev->id) &&
2394                    (devip->lun == sdev->lun))
2395                        return devip;
2396		else {
2397			if ((!devip->used) && (!open_devip))
2398				open_devip = devip;
2399		}
2400	}
2401	if (!open_devip) { /* try and make a new one */
2402		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2403		if (!open_devip) {
2404			printk(KERN_ERR "%s: out of memory at line %d\n",
2405				__func__, __LINE__);
2406			return NULL;
2407		}
2408	}
2409
2410	open_devip->channel = sdev->channel;
2411	open_devip->target = sdev->id;
2412	open_devip->lun = sdev->lun;
2413	open_devip->sdbg_host = sdbg_host;
2414	open_devip->reset = 1;
2415	open_devip->used = 1;
2416	memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2417	if (scsi_debug_dsense)
2418		open_devip->sense_buff[0] = 0x72;
2419	else {
2420		open_devip->sense_buff[0] = 0x70;
2421		open_devip->sense_buff[7] = 0xa;
2422	}
2423	if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2424		open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2425
2426	return open_devip;
2427}
2428
2429static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2430{
2431	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2432		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2433		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2434	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2435	return 0;
2436}
2437
2438static int scsi_debug_slave_configure(struct scsi_device *sdp)
2439{
2440	struct sdebug_dev_info *devip;
 
2441
2442	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2443		printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2444		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2445	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2446		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2447	devip = devInfoReg(sdp);
2448	if (NULL == devip)
2449		return 1;	/* no resources, will be marked offline */
 
 
2450	sdp->hostdata = devip;
2451	if (sdp->host->cmd_per_lun)
2452		scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2453					sdp->host->cmd_per_lun);
2454	blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2455	if (scsi_debug_no_uld)
2456		sdp->no_uld_attach = 1;
2457	return 0;
2458}
2459
2460static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2461{
2462	struct sdebug_dev_info *devip =
2463		(struct sdebug_dev_info *)sdp->hostdata;
2464
2465	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2466		printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2467		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2468	if (devip) {
2469		/* make this slot available for re-use */
2470		devip->used = 0;
2471		sdp->hostdata = NULL;
2472	}
2473}
2474
2475/* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2476static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
 
 
 
 
 
 
 
 
 
 
 
2477{
2478	unsigned long iflags;
2479	int k;
 
2480	struct sdebug_queued_cmd *sqcp;
 
 
2481
2482	spin_lock_irqsave(&queued_arr_lock, iflags);
2483	for (k = 0; k < scsi_debug_max_queue; ++k) {
2484		sqcp = &queued_arr[k];
2485		if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2486			del_timer_sync(&sqcp->cmnd_timer);
2487			sqcp->in_use = 0;
2488			sqcp->a_cmnd = NULL;
2489			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2490		}
 
2491	}
2492	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2493	return (k < scsi_debug_max_queue) ? 1 : 0;
2494}
2495
2496/* Deletes (stops) timers of all queued commands */
2497static void stop_all_queued(void)
2498{
2499	unsigned long iflags;
2500	int k;
 
2501	struct sdebug_queued_cmd *sqcp;
 
 
2502
2503	spin_lock_irqsave(&queued_arr_lock, iflags);
2504	for (k = 0; k < scsi_debug_max_queue; ++k) {
2505		sqcp = &queued_arr[k];
2506		if (sqcp->in_use && sqcp->a_cmnd) {
2507			del_timer_sync(&sqcp->cmnd_timer);
2508			sqcp->in_use = 0;
2509			sqcp->a_cmnd = NULL;
 
 
 
 
 
 
 
 
 
 
 
2510		}
 
2511	}
2512	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2513}
2514
2515static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
 
2516{
2517	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2518		printk(KERN_INFO "scsi_debug: abort\n");
2519	++num_aborts;
2520	stop_queued_cmnd(SCpnt);
2521	return SUCCESS;
 
 
 
 
 
 
2522}
2523
2524static int scsi_debug_biosparam(struct scsi_device *sdev,
2525		struct block_device * bdev, sector_t capacity, int *info)
2526{
2527	int res;
2528	unsigned char *buf;
2529
2530	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2531		printk(KERN_INFO "scsi_debug: biosparam\n");
2532	buf = scsi_bios_ptable(bdev);
2533	if (buf) {
2534		res = scsi_partsize(buf, capacity,
2535				    &info[2], &info[0], &info[1]);
2536		kfree(buf);
2537		if (! res)
2538			return res;
2539	}
2540	info[0] = sdebug_heads;
2541	info[1] = sdebug_sectors_per;
2542	info[2] = sdebug_cylinders_per;
2543	return 0;
2544}
2545
2546static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2547{
2548	struct sdebug_dev_info * devip;
2549
2550	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2551		printk(KERN_INFO "scsi_debug: device_reset\n");
2552	++num_dev_resets;
2553	if (SCpnt) {
2554		devip = devInfoReg(SCpnt->device);
 
 
 
 
 
2555		if (devip)
2556			devip->reset = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2557	}
 
 
 
 
2558	return SUCCESS;
2559}
2560
2561static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2562{
2563	struct sdebug_host_info *sdbg_host;
2564        struct sdebug_dev_info * dev_info;
2565        struct scsi_device * sdp;
2566        struct Scsi_Host * hp;
 
2567
2568	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2569		printk(KERN_INFO "scsi_debug: bus_reset\n");
2570	++num_bus_resets;
2571	if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
 
 
 
 
 
 
2572		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2573		if (sdbg_host) {
2574			list_for_each_entry(dev_info,
2575                                            &sdbg_host->dev_info_list,
2576                                            dev_list)
2577				dev_info->reset = 1;
 
 
2578		}
2579	}
 
 
 
 
2580	return SUCCESS;
2581}
2582
2583static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2584{
2585	struct sdebug_host_info * sdbg_host;
2586        struct sdebug_dev_info * dev_info;
 
2587
2588	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2589		printk(KERN_INFO "scsi_debug: host_reset\n");
2590	++num_host_resets;
 
 
2591        spin_lock(&sdebug_host_list_lock);
2592        list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2593                list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2594                                    dev_list)
2595                        dev_info->reset = 1;
 
 
2596        }
2597        spin_unlock(&sdebug_host_list_lock);
2598	stop_all_queued();
 
 
 
2599	return SUCCESS;
2600}
2601
2602/* Initializes timers in queued array */
2603static void __init init_all_queued(void)
2604{
2605	unsigned long iflags;
2606	int k;
2607	struct sdebug_queued_cmd * sqcp;
2608
2609	spin_lock_irqsave(&queued_arr_lock, iflags);
2610	for (k = 0; k < scsi_debug_max_queue; ++k) {
2611		sqcp = &queued_arr[k];
2612		init_timer(&sqcp->cmnd_timer);
2613		sqcp->in_use = 0;
2614		sqcp->a_cmnd = NULL;
2615	}
2616	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2617}
2618
2619static void __init sdebug_build_parts(unsigned char *ramp,
2620				      unsigned long store_size)
2621{
2622	struct partition * pp;
2623	int starts[SDEBUG_MAX_PARTS + 2];
2624	int sectors_per_part, num_sectors, k;
2625	int heads_by_sects, start_sec, end_sec;
2626
2627	/* assume partition table already zeroed */
2628	if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2629		return;
2630	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2631		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2632		printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2633				    "partitions to %d\n", SDEBUG_MAX_PARTS);
2634	}
2635	num_sectors = (int)sdebug_store_sectors;
2636	sectors_per_part = (num_sectors - sdebug_sectors_per)
2637			   / scsi_debug_num_parts;
2638	heads_by_sects = sdebug_heads * sdebug_sectors_per;
2639        starts[0] = sdebug_sectors_per;
2640	for (k = 1; k < scsi_debug_num_parts; ++k)
2641		starts[k] = ((k * sectors_per_part) / heads_by_sects)
2642			    * heads_by_sects;
2643	starts[scsi_debug_num_parts] = num_sectors;
2644	starts[scsi_debug_num_parts + 1] = 0;
2645
2646	ramp[510] = 0x55;	/* magic partition markings */
2647	ramp[511] = 0xAA;
2648	pp = (struct partition *)(ramp + 0x1be);
2649	for (k = 0; starts[k + 1]; ++k, ++pp) {
2650		start_sec = starts[k];
2651		end_sec = starts[k + 1] - 1;
2652		pp->boot_ind = 0;
2653
2654		pp->cyl = start_sec / heads_by_sects;
2655		pp->head = (start_sec - (pp->cyl * heads_by_sects))
2656			   / sdebug_sectors_per;
2657		pp->sector = (start_sec % sdebug_sectors_per) + 1;
2658
2659		pp->end_cyl = end_sec / heads_by_sects;
2660		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2661			       / sdebug_sectors_per;
2662		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2663
2664		pp->start_sect = start_sec;
2665		pp->nr_sects = end_sec - start_sec + 1;
2666		pp->sys_ind = 0x83;	/* plain Linux partition */
2667	}
2668}
2669
2670static int schedule_resp(struct scsi_cmnd * cmnd,
2671			 struct sdebug_dev_info * devip,
2672			 done_funct_t done, int scsi_result, int delta_jiff)
2673{
2674	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2675		if (scsi_result) {
2676			struct scsi_device * sdp = cmnd->device;
2677
2678			printk(KERN_INFO "scsi_debug:    <%u %u %u %u> "
2679			       "non-zero result=0x%x\n", sdp->host->host_no,
2680			       sdp->channel, sdp->id, sdp->lun, scsi_result);
2681		}
2682	}
2683	if (cmnd && devip) {
2684		/* simulate autosense by this driver */
2685		if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2686			memcpy(cmnd->sense_buffer, devip->sense_buff,
2687			       (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2688			       SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2689	}
2690	if (delta_jiff <= 0) {
2691		if (cmnd)
2692			cmnd->result = scsi_result;
2693		if (done)
2694			done(cmnd);
2695		return 0;
2696	} else {
2697		unsigned long iflags;
2698		int k;
2699		struct sdebug_queued_cmd * sqcp = NULL;
2700
2701		spin_lock_irqsave(&queued_arr_lock, iflags);
2702		for (k = 0; k < scsi_debug_max_queue; ++k) {
2703			sqcp = &queued_arr[k];
2704			if (! sqcp->in_use)
2705				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2706		}
2707		if (k >= scsi_debug_max_queue) {
2708			spin_unlock_irqrestore(&queued_arr_lock, iflags);
2709			printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2710			return 1;	/* report busy to mid level */
2711		}
2712		sqcp->in_use = 1;
2713		sqcp->a_cmnd = cmnd;
2714		sqcp->scsi_result = scsi_result;
2715		sqcp->done_funct = done;
2716		sqcp->cmnd_timer.function = timer_intr_handler;
2717		sqcp->cmnd_timer.data = k;
2718		sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2719		add_timer(&sqcp->cmnd_timer);
2720		spin_unlock_irqrestore(&queued_arr_lock, iflags);
2721		if (cmnd)
2722			cmnd->result = 0;
2723		return 0;
2724	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2725}
 
2726/* Note: The following macros create attribute files in the
2727   /sys/module/scsi_debug/parameters directory. Unfortunately this
2728   driver is unaware of a change and cannot trigger auxiliary actions
2729   as it can when the corresponding attribute in the
2730   /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2731 */
2732module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2733module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2734module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2735module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2736module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2737module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2738module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2739module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2740module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2741module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2742module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2743module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2744module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2745module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2746module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2747module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2748module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2749module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2750module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2751module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2752module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2753module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2754module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2755module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2756module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2757module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2758module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2759module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2760module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2761module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2762module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2763module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2764module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
 
 
 
 
 
 
 
 
2765		   S_IRUGO | S_IWUSR);
2766module_param_named(write_same_length, scsi_debug_write_same_length, int,
2767		   S_IRUGO | S_IWUSR);
2768
2769MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2770MODULE_DESCRIPTION("SCSI debug adapter driver");
2771MODULE_LICENSE("GPL");
2772MODULE_VERSION(SCSI_DEBUG_VERSION);
2773
2774MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2775MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2776MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2777MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
 
2778MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2779MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2780MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2781MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2782MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2783MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
 
2784MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2785MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2786MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2787MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
 
2788MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2789MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2790MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
 
2791MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2792MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2793MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2794MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2795MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2796MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2797MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2798MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2799MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
 
2800MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
 
 
 
2801MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2802MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2803MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2804MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2805MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
 
 
2806MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2807MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2808
2809static char sdebug_info[256];
 
2810
2811static const char * scsi_debug_info(struct Scsi_Host * shp)
2812{
2813	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2814		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2815		scsi_debug_version_date, scsi_debug_dev_size_mb,
2816		scsi_debug_opts);
 
 
 
 
 
 
2817	return sdebug_info;
2818}
2819
2820/* scsi_debug_proc_info
2821 * Used if the driver currently has no own support for /proc/scsi
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2822 */
2823static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2824				int length, int inout)
2825{
2826	int len, pos, begin;
2827	int orig_length;
2828
2829	orig_length = length;
2830
2831	if (inout == 1) {
2832		char arr[16];
2833		int minLen = length > 15 ? 15 : length;
2834
2835		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2836			return -EACCES;
2837		memcpy(arr, buffer, minLen);
2838		arr[minLen] = '\0';
2839		if (1 != sscanf(arr, "%d", &pos))
2840			return -EINVAL;
2841		scsi_debug_opts = pos;
2842		if (scsi_debug_every_nth != 0)
2843                        scsi_debug_cmnd_count = 0;
2844		return length;
2845	}
2846	begin = 0;
2847	pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2848	    "%s [%s]\n"
2849	    "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2850	    "every_nth=%d(curr:%d)\n"
2851	    "delay=%d, max_luns=%d, scsi_level=%d\n"
2852	    "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2853	    "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2854	    "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2855	    SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2856	    scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2857	    scsi_debug_cmnd_count, scsi_debug_delay,
2858	    scsi_debug_max_luns, scsi_debug_scsi_level,
2859	    scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2860	    sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2861	    num_host_resets, dix_reads, dix_writes, dif_errors);
2862	if (pos < offset) {
2863		len = 0;
2864		begin = pos;
2865	}
2866	*start = buffer + (offset - begin);	/* Start of wanted data */
2867	len -= (offset - begin);
2868	if (len > length)
2869		len = length;
2870	return len;
2871}
 
2872
2873static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2874{
2875        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2876}
2877
2878static ssize_t sdebug_delay_store(struct device_driver * ddp,
2879				  const char * buf, size_t count)
2880{
2881        int delay;
2882	char work[20];
2883
2884        if (1 == sscanf(buf, "%10s", work)) {
2885		if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2886			scsi_debug_delay = delay;
2887			return count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2888		}
 
2889	}
2890	return -EINVAL;
2891}
2892DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2893	    sdebug_delay_store);
2894
2895static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2896{
2897        return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2898}
2899
2900static ssize_t sdebug_opts_store(struct device_driver * ddp,
2901				 const char * buf, size_t count)
2902{
2903        int opts;
2904	char work[20];
2905
2906        if (1 == sscanf(buf, "%10s", work)) {
2907		if (0 == strnicmp(work,"0x", 2)) {
2908			if (1 == sscanf(&work[2], "%x", &opts))
2909				goto opts_done;
2910		} else {
2911			if (1 == sscanf(work, "%d", &opts))
2912				goto opts_done;
2913		}
2914	}
2915	return -EINVAL;
2916opts_done:
2917	scsi_debug_opts = opts;
2918	scsi_debug_cmnd_count = 0;
 
 
2919	return count;
2920}
2921DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2922	    sdebug_opts_store);
2923
2924static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2925{
2926        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2927}
2928static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2929				  const char * buf, size_t count)
2930{
2931        int n;
2932
2933	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2934		scsi_debug_ptype = n;
2935		return count;
2936	}
2937	return -EINVAL;
2938}
2939DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2940
2941static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2942{
2943        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2944}
2945static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2946				  const char * buf, size_t count)
2947{
2948        int n;
2949
2950	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2951		scsi_debug_dsense = n;
2952		return count;
2953	}
2954	return -EINVAL;
2955}
2956DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2957	    sdebug_dsense_store);
2958
2959static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2960{
2961        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2962}
2963static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2964				    const char * buf, size_t count)
2965{
2966        int n;
2967
2968	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2969		scsi_debug_fake_rw = n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2970		return count;
2971	}
2972	return -EINVAL;
2973}
2974DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2975	    sdebug_fake_rw_store);
2976
2977static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2978{
2979        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2980}
2981static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2982				     const char * buf, size_t count)
2983{
2984        int n;
2985
2986	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2987		scsi_debug_no_lun_0 = n;
2988		return count;
2989	}
2990	return -EINVAL;
2991}
2992DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2993	    sdebug_no_lun_0_store);
2994
2995static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2996{
2997        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2998}
2999static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
3000				     const char * buf, size_t count)
3001{
3002        int n;
3003
3004	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3005		scsi_debug_num_tgts = n;
3006		sdebug_max_tgts_luns();
3007		return count;
3008	}
3009	return -EINVAL;
3010}
3011DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
3012	    sdebug_num_tgts_store);
3013
3014static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
3015{
3016        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3017}
3018DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
3019
3020static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
3021{
3022        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3023}
3024DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
3025
3026static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
3027{
3028        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3029}
3030static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
3031				      const char * buf, size_t count)
3032{
3033        int nth;
3034
3035	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3036		scsi_debug_every_nth = nth;
3037		scsi_debug_cmnd_count = 0;
 
 
 
 
3038		return count;
3039	}
3040	return -EINVAL;
3041}
3042DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3043	    sdebug_every_nth_store);
3044
3045static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3046{
3047        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3048}
3049static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3050				     const char * buf, size_t count)
3051{
3052        int n;
 
3053
3054	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3055		scsi_debug_max_luns = n;
 
 
 
 
 
3056		sdebug_max_tgts_luns();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3057		return count;
3058	}
3059	return -EINVAL;
3060}
3061DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3062	    sdebug_max_luns_store);
3063
3064static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3065{
3066        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3067}
3068static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3069				      const char * buf, size_t count)
 
 
3070{
3071        int n;
 
3072
3073	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3074	    (n <= SCSI_DEBUG_CANQUEUE)) {
3075		scsi_debug_max_queue = n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3076		return count;
3077	}
3078	return -EINVAL;
3079}
3080DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3081	    sdebug_max_queue_store);
3082
3083static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3084{
3085        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3086}
3087DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3088
3089static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3090{
3091        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3092}
3093DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3094
3095static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3096{
3097        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3098}
3099static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3100				       const char * buf, size_t count)
3101{
3102        int n;
 
3103
3104	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3105		scsi_debug_virtual_gb = n;
3106
3107		sdebug_capacity = get_sdebug_capacity();
3108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3109		return count;
3110	}
3111	return -EINVAL;
3112}
3113DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3114	    sdebug_virtual_gb_store);
3115
3116static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3117{
3118        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3119}
3120
3121static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3122				     const char * buf, size_t count)
 
 
 
3123{
3124	int delta_hosts;
3125
3126	if (sscanf(buf, "%d", &delta_hosts) != 1)
3127		return -EINVAL;
3128	if (delta_hosts > 0) {
3129		do {
3130			sdebug_add_adapter();
3131		} while (--delta_hosts);
3132	} else if (delta_hosts < 0) {
3133		do {
3134			sdebug_remove_adapter();
3135		} while (++delta_hosts);
3136	}
3137	return count;
3138}
3139DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3140	    sdebug_add_host_store);
3141
3142static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3143					  char * buf)
3144{
3145	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3146}
3147static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3148					   const char * buf, size_t count)
3149{
3150	int n;
3151
3152	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3153		scsi_debug_vpd_use_hostno = n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3154		return count;
3155	}
3156	return -EINVAL;
3157}
3158DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3159	    sdebug_vpd_use_hostno_store);
 
 
 
 
 
3160
3161static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3162{
3163	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3164}
3165DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3166
3167static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3168{
3169	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3170}
3171DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3172
3173static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3174{
3175	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3176}
3177DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3178
3179static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3180{
3181	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3182}
3183DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3184
3185static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3186{
3187	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3188}
3189DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3190
3191static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3192{
3193	ssize_t count;
3194
3195	if (!scsi_debug_lbp())
3196		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3197				 sdebug_store_sectors);
3198
3199	count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3200
3201	buf[count++] = '\n';
3202	buf[count++] = 0;
3203
3204	return count;
3205}
3206DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3207
 
 
 
 
 
 
 
 
3208
3209/* Note: The following function creates attribute files in the
3210   /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3211   files (over those found in the /sys/module/scsi_debug/parameters
3212   directory) is that auxiliary actions can be triggered when an attribute
3213   is changed. For example see: sdebug_add_host_store() above.
3214 */
3215static int do_create_driverfs_files(void)
 
 
3216{
3217	int ret;
 
 
 
 
 
 
3218
3219	ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3220	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3221	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3222	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3223	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3224	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3225	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3226	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3227	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3228	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3229	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3230	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3231	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3232	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3233	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3234	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3235	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3236	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3237	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3238	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3239	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3240	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3241	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3242	return ret;
3243}
 
3244
3245static void do_remove_driverfs_files(void)
3246{
3247	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3248	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3249	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3250	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3251	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3252	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3253	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3254	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3255	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3256	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3257	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3258	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3259	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3260	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3261	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3262	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3263	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3264	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3265	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3266	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3267	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3268	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3269	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3270}
 
 
 
 
3271
3272struct device *pseudo_primary;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3273
3274static int __init scsi_debug_init(void)
3275{
3276	unsigned long sz;
3277	int host_to_add;
3278	int k;
3279	int ret;
3280
3281	switch (scsi_debug_sector_size) {
 
 
 
 
 
 
 
 
3282	case  512:
3283	case 1024:
3284	case 2048:
3285	case 4096:
3286		break;
3287	default:
3288		printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3289		       scsi_debug_sector_size);
3290		return -EINVAL;
3291	}
3292
3293	switch (scsi_debug_dif) {
3294
3295	case SD_DIF_TYPE0_PROTECTION:
3296	case SD_DIF_TYPE1_PROTECTION:
3297	case SD_DIF_TYPE2_PROTECTION:
3298	case SD_DIF_TYPE3_PROTECTION:
 
3299		break;
3300
3301	default:
3302		printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3303		return -EINVAL;
3304	}
3305
3306	if (scsi_debug_guard > 1) {
3307		printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3308		return -EINVAL;
3309	}
3310
3311	if (scsi_debug_ato > 1) {
3312		printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3313		return -EINVAL;
3314	}
3315
3316	if (scsi_debug_physblk_exp > 15) {
3317		printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3318		       scsi_debug_physblk_exp);
 
 
 
 
 
 
 
 
3319		return -EINVAL;
3320	}
3321
3322	if (scsi_debug_lowest_aligned > 0x3fff) {
3323		printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3324		       scsi_debug_lowest_aligned);
3325		return -EINVAL;
3326	}
 
 
 
 
 
 
3327
3328	if (scsi_debug_dev_size_mb < 1)
3329		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
3330	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3331	sdebug_store_sectors = sz / scsi_debug_sector_size;
3332	sdebug_capacity = get_sdebug_capacity();
3333
3334	/* play around with geometry, don't waste too much on track 0 */
3335	sdebug_heads = 8;
3336	sdebug_sectors_per = 32;
3337	if (scsi_debug_dev_size_mb >= 16)
3338		sdebug_heads = 32;
3339	else if (scsi_debug_dev_size_mb >= 256)
3340		sdebug_heads = 64;
 
 
3341	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3342			       (sdebug_sectors_per * sdebug_heads);
3343	if (sdebug_cylinders_per >= 1024) {
3344		/* other LLDs do this; implies >= 1GB ram disk ... */
3345		sdebug_heads = 255;
3346		sdebug_sectors_per = 63;
3347		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3348			       (sdebug_sectors_per * sdebug_heads);
3349	}
3350
3351	fake_storep = vmalloc(sz);
3352	if (NULL == fake_storep) {
3353		printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3354		return -ENOMEM;
 
 
 
 
 
 
3355	}
3356	memset(fake_storep, 0, sz);
3357	if (scsi_debug_num_parts > 0)
3358		sdebug_build_parts(fake_storep, sz);
3359
3360	if (scsi_debug_dif) {
3361		int dif_size;
3362
3363		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3364		dif_storep = vmalloc(dif_size);
3365
3366		printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3367		       dif_size, dif_storep);
3368
3369		if (dif_storep == NULL) {
3370			printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3371			ret = -ENOMEM;
3372			goto free_vm;
3373		}
3374
3375		memset(dif_storep, 0xff, dif_size);
3376	}
3377
3378	/* Logical Block Provisioning */
3379	if (scsi_debug_lbp()) {
3380		unsigned int map_bytes;
 
3381
3382		scsi_debug_unmap_max_blocks =
3383			clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3384
3385		scsi_debug_unmap_max_desc =
3386			clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3387
3388		scsi_debug_unmap_granularity =
3389			clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3390
3391		if (scsi_debug_unmap_alignment &&
3392		    scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3393			printk(KERN_ERR
3394			       "%s: ERR: unmap_granularity < unmap_alignment\n",
3395			       __func__);
3396			return -EINVAL;
3397		}
3398
3399		map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3400		map_bytes = map_size >> 3;
3401		map_storep = vmalloc(map_bytes);
3402
3403		printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3404		       map_size);
3405
3406		if (map_storep == NULL) {
3407			printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3408			ret = -ENOMEM;
3409			goto free_vm;
3410		}
3411
3412		memset(map_storep, 0x0, map_bytes);
3413
3414		/* Map first 1KB for partition table */
3415		if (scsi_debug_num_parts)
3416			map_region(0, 2);
3417	}
3418
3419	pseudo_primary = root_device_register("pseudo_0");
3420	if (IS_ERR(pseudo_primary)) {
3421		printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3422		ret = PTR_ERR(pseudo_primary);
3423		goto free_vm;
3424	}
3425	ret = bus_register(&pseudo_lld_bus);
3426	if (ret < 0) {
3427		printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3428			ret);
3429		goto dev_unreg;
3430	}
3431	ret = driver_register(&sdebug_driverfs_driver);
3432	if (ret < 0) {
3433		printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3434			ret);
3435		goto bus_unreg;
3436	}
3437	ret = do_create_driverfs_files();
3438	if (ret < 0) {
3439		printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3440			ret);
3441		goto del_files;
3442	}
3443
3444	init_all_queued();
3445
3446	host_to_add = scsi_debug_add_host;
3447        scsi_debug_add_host = 0;
3448
3449        for (k = 0; k < host_to_add; k++) {
3450                if (sdebug_add_adapter()) {
3451                        printk(KERN_ERR "scsi_debug_init: "
3452                               "sdebug_add_adapter failed k=%d\n", k);
3453                        break;
3454                }
3455        }
3456
3457	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3458		printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3459		       scsi_debug_add_host);
3460	}
3461	return 0;
3462
3463del_files:
3464	do_remove_driverfs_files();
3465	driver_unregister(&sdebug_driverfs_driver);
3466bus_unreg:
3467	bus_unregister(&pseudo_lld_bus);
3468dev_unreg:
3469	root_device_unregister(pseudo_primary);
3470free_vm:
3471	if (map_storep)
3472		vfree(map_storep);
3473	if (dif_storep)
3474		vfree(dif_storep);
3475	vfree(fake_storep);
3476
 
3477	return ret;
3478}
3479
3480static void __exit scsi_debug_exit(void)
3481{
3482	int k = scsi_debug_add_host;
3483
3484	stop_all_queued();
 
3485	for (; k; k--)
3486		sdebug_remove_adapter();
3487	do_remove_driverfs_files();
3488	driver_unregister(&sdebug_driverfs_driver);
3489	bus_unregister(&pseudo_lld_bus);
3490	root_device_unregister(pseudo_primary);
3491
3492	if (dif_storep)
3493		vfree(dif_storep);
3494
3495	vfree(fake_storep);
 
3496}
3497
3498device_initcall(scsi_debug_init);
3499module_exit(scsi_debug_exit);
3500
3501static void sdebug_release_adapter(struct device * dev)
3502{
3503        struct sdebug_host_info *sdbg_host;
3504
3505	sdbg_host = to_sdebug_host(dev);
3506        kfree(sdbg_host);
3507}
3508
3509static int sdebug_add_adapter(void)
3510{
3511	int k, devs_per_host;
3512        int error = 0;
3513        struct sdebug_host_info *sdbg_host;
3514	struct sdebug_dev_info *sdbg_devinfo, *tmp;
3515
3516        sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3517        if (NULL == sdbg_host) {
3518                printk(KERN_ERR "%s: out of memory at line %d\n",
3519                       __func__, __LINE__);
3520                return -ENOMEM;
3521        }
3522
3523        INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3524
3525	devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3526        for (k = 0; k < devs_per_host; k++) {
3527		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3528		if (!sdbg_devinfo) {
3529                        printk(KERN_ERR "%s: out of memory at line %d\n",
3530                               __func__, __LINE__);
3531                        error = -ENOMEM;
3532			goto clean;
3533                }
3534        }
3535
3536        spin_lock(&sdebug_host_list_lock);
3537        list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3538        spin_unlock(&sdebug_host_list_lock);
3539
3540        sdbg_host->dev.bus = &pseudo_lld_bus;
3541        sdbg_host->dev.parent = pseudo_primary;
3542        sdbg_host->dev.release = &sdebug_release_adapter;
3543        dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3544
3545        error = device_register(&sdbg_host->dev);
3546
3547        if (error)
3548		goto clean;
3549
3550	++scsi_debug_add_host;
3551        return error;
3552
3553clean:
3554	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3555				 dev_list) {
3556		list_del(&sdbg_devinfo->dev_list);
3557		kfree(sdbg_devinfo);
3558	}
3559
3560	kfree(sdbg_host);
3561        return error;
3562}
3563
3564static void sdebug_remove_adapter(void)
3565{
3566        struct sdebug_host_info * sdbg_host = NULL;
3567
3568        spin_lock(&sdebug_host_list_lock);
3569        if (!list_empty(&sdebug_host_list)) {
3570                sdbg_host = list_entry(sdebug_host_list.prev,
3571                                       struct sdebug_host_info, host_list);
3572		list_del(&sdbg_host->host_list);
3573	}
3574        spin_unlock(&sdebug_host_list_lock);
3575
3576	if (!sdbg_host)
3577		return;
3578
3579        device_unregister(&sdbg_host->dev);
3580        --scsi_debug_add_host;
3581}
3582
3583static
3584int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3585{
3586	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3587	int len, k;
3588	unsigned int num;
3589	unsigned long long lba;
3590	u32 ei_lba;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3591	int errsts = 0;
3592	int target = SCpnt->device->id;
3593	struct sdebug_dev_info *devip = NULL;
3594	int inj_recovered = 0;
3595	int inj_transport = 0;
3596	int inj_dif = 0;
3597	int inj_dix = 0;
3598	int delay_override = 0;
3599	int unmap = 0;
3600
3601	scsi_set_resid(SCpnt, 0);
3602	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3603		printk(KERN_INFO "scsi_debug: cmd ");
3604		for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3605			printk("%02x ", (int)cmd[k]);
3606		printk("\n");
3607	}
3608
3609	if (target == SCpnt->device->host->hostt->this_id) {
3610		printk(KERN_INFO "scsi_debug: initiator's id used as "
3611		       "target!\n");
3612		return schedule_resp(SCpnt, NULL, done,
3613				     DID_NO_CONNECT << 16, 0);
3614	}
3615
3616	if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3617	    (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3618		return schedule_resp(SCpnt, NULL, done,
3619				     DID_NO_CONNECT << 16, 0);
3620	devip = devInfoReg(SCpnt->device);
3621	if (NULL == devip)
3622		return schedule_resp(SCpnt, NULL, done,
3623				     DID_NO_CONNECT << 16, 0);
3624
3625	if ((scsi_debug_every_nth != 0) &&
3626	    (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3627		scsi_debug_cmnd_count = 0;
3628		if (scsi_debug_every_nth < -1)
3629			scsi_debug_every_nth = -1;
3630		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3631			return 0; /* ignore command causing timeout */
3632		else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3633			 scsi_medium_access_command(SCpnt))
3634			return 0; /* time out reads and writes */
3635		else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3636			inj_recovered = 1; /* to reads and writes below */
3637		else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3638			inj_transport = 1; /* to reads and writes below */
3639		else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3640			inj_dif = 1; /* to reads and writes below */
3641		else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3642			inj_dix = 1; /* to reads and writes below */
3643	}
3644
3645	if (devip->wlun) {
3646		switch (*cmd) {
3647		case INQUIRY:
3648		case REQUEST_SENSE:
3649		case TEST_UNIT_READY:
3650		case REPORT_LUNS:
3651			break;  /* only allowable wlun commands */
3652		default:
3653			if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3654				printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3655				       "not supported for wlun\n", *cmd);
3656			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3657					INVALID_OPCODE, 0);
3658			errsts = check_condition_result;
3659			return schedule_resp(SCpnt, devip, done, errsts,
3660					     0);
3661		}
3662	}
3663
3664	switch (*cmd) {
3665	case INQUIRY:     /* mandatory, ignore unit attention */
3666		delay_override = 1;
3667		errsts = resp_inquiry(SCpnt, target, devip);
3668		break;
3669	case REQUEST_SENSE:	/* mandatory, ignore unit attention */
3670		delay_override = 1;
3671		errsts = resp_requests(SCpnt, devip);
3672		break;
3673	case REZERO_UNIT:	/* actually this is REWIND for SSC */
3674	case START_STOP:
3675		errsts = resp_start_stop(SCpnt, devip);
3676		break;
3677	case ALLOW_MEDIUM_REMOVAL:
3678		errsts = check_readiness(SCpnt, 1, devip);
3679		if (errsts)
3680			break;
3681		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3682			printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3683			       cmd[4] ? "inhibited" : "enabled");
3684		break;
3685	case SEND_DIAGNOSTIC:     /* mandatory */
3686		errsts = check_readiness(SCpnt, 1, devip);
3687		break;
3688	case TEST_UNIT_READY:     /* mandatory */
3689		delay_override = 1;
3690		errsts = check_readiness(SCpnt, 0, devip);
3691		break;
3692	case RESERVE:
3693		errsts = check_readiness(SCpnt, 1, devip);
3694		break;
3695	case RESERVE_10:
3696		errsts = check_readiness(SCpnt, 1, devip);
3697		break;
3698	case RELEASE:
3699		errsts = check_readiness(SCpnt, 1, devip);
3700		break;
3701	case RELEASE_10:
3702		errsts = check_readiness(SCpnt, 1, devip);
3703		break;
3704	case READ_CAPACITY:
3705		errsts = resp_readcap(SCpnt, devip);
3706		break;
3707	case SERVICE_ACTION_IN:
3708		if (cmd[1] == SAI_READ_CAPACITY_16)
3709			errsts = resp_readcap16(SCpnt, devip);
3710		else if (cmd[1] == SAI_GET_LBA_STATUS) {
3711
3712			if (scsi_debug_lbp() == 0) {
3713				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3714						INVALID_COMMAND_OPCODE, 0);
3715				errsts = check_condition_result;
3716			} else
3717				errsts = resp_get_lba_status(SCpnt, devip);
3718		} else {
3719			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3720					INVALID_OPCODE, 0);
3721			errsts = check_condition_result;
3722		}
3723		break;
3724	case MAINTENANCE_IN:
3725		if (MI_REPORT_TARGET_PGS != cmd[1]) {
3726			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3727					INVALID_OPCODE, 0);
3728			errsts = check_condition_result;
3729			break;
3730		}
3731		errsts = resp_report_tgtpgs(SCpnt, devip);
3732		break;
3733	case READ_16:
3734	case READ_12:
3735	case READ_10:
3736		/* READ{10,12,16} and DIF Type 2 are natural enemies */
3737		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3738		    cmd[1] & 0xe0) {
3739			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3740					INVALID_COMMAND_OPCODE, 0);
3741			errsts = check_condition_result;
3742			break;
3743		}
3744
3745		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3746		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3747		    (cmd[1] & 0xe0) == 0)
3748			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3749
3750		/* fall through */
3751	case READ_6:
3752read:
3753		errsts = check_readiness(SCpnt, 0, devip);
3754		if (errsts)
3755			break;
3756		if (scsi_debug_fake_rw)
3757			break;
3758		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3759		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3760		if (inj_recovered && (0 == errsts)) {
3761			mk_sense_buffer(devip, RECOVERED_ERROR,
3762					THRESHOLD_EXCEEDED, 0);
3763			errsts = check_condition_result;
3764		} else if (inj_transport && (0 == errsts)) {
3765			mk_sense_buffer(devip, ABORTED_COMMAND,
3766					TRANSPORT_PROBLEM, ACK_NAK_TO);
3767			errsts = check_condition_result;
3768		} else if (inj_dif && (0 == errsts)) {
3769			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3770			errsts = illegal_condition_result;
3771		} else if (inj_dix && (0 == errsts)) {
3772			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3773			errsts = illegal_condition_result;
3774		}
3775		break;
3776	case REPORT_LUNS:	/* mandatory, ignore unit attention */
3777		delay_override = 1;
3778		errsts = resp_report_luns(SCpnt, devip);
3779		break;
3780	case VERIFY:		/* 10 byte SBC-2 command */
3781		errsts = check_readiness(SCpnt, 0, devip);
3782		break;
3783	case WRITE_16:
3784	case WRITE_12:
3785	case WRITE_10:
3786		/* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3787		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3788		    cmd[1] & 0xe0) {
3789			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3790					INVALID_COMMAND_OPCODE, 0);
3791			errsts = check_condition_result;
3792			break;
3793		}
3794
3795		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3796		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3797		    (cmd[1] & 0xe0) == 0)
3798			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3799
3800		/* fall through */
3801	case WRITE_6:
3802write:
3803		errsts = check_readiness(SCpnt, 0, devip);
3804		if (errsts)
3805			break;
3806		if (scsi_debug_fake_rw)
3807			break;
3808		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3809		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3810		if (inj_recovered && (0 == errsts)) {
3811			mk_sense_buffer(devip, RECOVERED_ERROR,
3812					THRESHOLD_EXCEEDED, 0);
3813			errsts = check_condition_result;
3814		} else if (inj_dif && (0 == errsts)) {
3815			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3816			errsts = illegal_condition_result;
3817		} else if (inj_dix && (0 == errsts)) {
3818			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3819			errsts = illegal_condition_result;
3820		}
3821		break;
3822	case WRITE_SAME_16:
3823	case WRITE_SAME:
3824		if (cmd[1] & 0x8) {
3825			if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3826			    (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3827				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3828						INVALID_FIELD_IN_CDB, 0);
3829				errsts = check_condition_result;
3830			} else
3831				unmap = 1;
3832		}
3833		if (errsts)
3834			break;
3835		errsts = check_readiness(SCpnt, 0, devip);
3836		if (errsts)
3837			break;
3838		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3839		errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3840		break;
3841	case UNMAP:
3842		errsts = check_readiness(SCpnt, 0, devip);
3843		if (errsts)
3844			break;
3845
3846		if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3847			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3848					INVALID_COMMAND_OPCODE, 0);
3849			errsts = check_condition_result;
3850		} else
3851			errsts = resp_unmap(SCpnt, devip);
3852		break;
3853	case MODE_SENSE:
3854	case MODE_SENSE_10:
3855		errsts = resp_mode_sense(SCpnt, target, devip);
3856		break;
3857	case MODE_SELECT:
3858		errsts = resp_mode_select(SCpnt, 1, devip);
3859		break;
3860	case MODE_SELECT_10:
3861		errsts = resp_mode_select(SCpnt, 0, devip);
3862		break;
3863	case LOG_SENSE:
3864		errsts = resp_log_sense(SCpnt, devip);
3865		break;
3866	case SYNCHRONIZE_CACHE:
3867		delay_override = 1;
3868		errsts = check_readiness(SCpnt, 0, devip);
3869		break;
3870	case WRITE_BUFFER:
3871		errsts = check_readiness(SCpnt, 1, devip);
3872		break;
3873	case XDWRITEREAD_10:
3874		if (!scsi_bidi_cmnd(SCpnt)) {
3875			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3876					INVALID_FIELD_IN_CDB, 0);
3877			errsts = check_condition_result;
3878			break;
3879		}
3880
3881		errsts = check_readiness(SCpnt, 0, devip);
3882		if (errsts)
3883			break;
3884		if (scsi_debug_fake_rw)
3885			break;
3886		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3887		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3888		if (errsts)
3889			break;
3890		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3891		if (errsts)
3892			break;
3893		errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3894		break;
3895	case VARIABLE_LENGTH_CMD:
3896		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3897
3898			if ((cmd[10] & 0xe0) == 0)
3899				printk(KERN_ERR
3900				       "Unprotected RD/WR to DIF device\n");
3901
3902			if (cmd[9] == READ_32) {
3903				BUG_ON(SCpnt->cmd_len < 32);
3904				goto read;
3905			}
3906
3907			if (cmd[9] == WRITE_32) {
3908				BUG_ON(SCpnt->cmd_len < 32);
3909				goto write;
3910			}
3911		}
3912
3913		mk_sense_buffer(devip, ILLEGAL_REQUEST,
3914				INVALID_FIELD_IN_CDB, 0);
3915		errsts = check_condition_result;
3916		break;
3917
3918	default:
3919		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3920			printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3921			       "supported\n", *cmd);
3922		errsts = check_readiness(SCpnt, 1, devip);
3923		if (errsts)
3924			break;	/* Unit attention takes precedence */
3925		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
 
 
 
 
 
 
3926		errsts = check_condition_result;
3927		break;
3928	}
3929	return schedule_resp(SCpnt, devip, done, errsts,
3930			     (delay_override ? 0 : scsi_debug_delay));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3931}
3932
3933static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3934
3935static struct scsi_host_template sdebug_driver_template = {
3936	.proc_info =		scsi_debug_proc_info,
 
3937	.proc_name =		sdebug_proc_name,
3938	.name =			"SCSI DEBUG",
3939	.info =			scsi_debug_info,
3940	.slave_alloc =		scsi_debug_slave_alloc,
3941	.slave_configure =	scsi_debug_slave_configure,
3942	.slave_destroy =	scsi_debug_slave_destroy,
3943	.ioctl =		scsi_debug_ioctl,
3944	.queuecommand =		scsi_debug_queuecommand,
 
3945	.eh_abort_handler =	scsi_debug_abort,
3946	.eh_bus_reset_handler = scsi_debug_bus_reset,
3947	.eh_device_reset_handler = scsi_debug_device_reset,
 
 
3948	.eh_host_reset_handler = scsi_debug_host_reset,
3949	.bios_param =		scsi_debug_biosparam,
3950	.can_queue =		SCSI_DEBUG_CANQUEUE,
3951	.this_id =		7,
3952	.sg_tablesize =		256,
3953	.cmd_per_lun =		16,
3954	.max_sectors =		0xffff,
3955	.use_clustering = 	DISABLE_CLUSTERING,
3956	.module =		THIS_MODULE,
 
3957};
3958
3959static int sdebug_driver_probe(struct device * dev)
3960{
3961        int error = 0;
3962        struct sdebug_host_info *sdbg_host;
3963        struct Scsi_Host *hpnt;
3964	int host_prot;
3965
3966	sdbg_host = to_sdebug_host(dev);
3967
3968	sdebug_driver_template.can_queue = scsi_debug_max_queue;
 
 
3969	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3970	if (NULL == hpnt) {
3971		printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3972		error = -ENODEV;
3973		return error;
3974	}
 
 
 
 
 
 
 
 
 
 
3975
3976        sdbg_host->shost = hpnt;
3977	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3978	if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3979		hpnt->max_id = scsi_debug_num_tgts + 1;
3980	else
3981		hpnt->max_id = scsi_debug_num_tgts;
3982	hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;	/* = scsi_debug_max_luns; */
 
3983
3984	host_prot = 0;
3985
3986	switch (scsi_debug_dif) {
3987
3988	case SD_DIF_TYPE1_PROTECTION:
3989		host_prot = SHOST_DIF_TYPE1_PROTECTION;
3990		if (scsi_debug_dix)
3991			host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3992		break;
3993
3994	case SD_DIF_TYPE2_PROTECTION:
3995		host_prot = SHOST_DIF_TYPE2_PROTECTION;
3996		if (scsi_debug_dix)
3997			host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3998		break;
3999
4000	case SD_DIF_TYPE3_PROTECTION:
4001		host_prot = SHOST_DIF_TYPE3_PROTECTION;
4002		if (scsi_debug_dix)
4003			host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4004		break;
4005
4006	default:
4007		if (scsi_debug_dix)
4008			host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4009		break;
4010	}
4011
4012	scsi_host_set_prot(hpnt, host_prot);
4013
4014	printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4015	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4016	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4017	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4018	       (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4019	       (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4020	       (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4021	       (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
 
4022
4023	if (scsi_debug_guard == 1)
4024		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4025	else
4026		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4027
 
 
 
 
4028        error = scsi_add_host(hpnt, &sdbg_host->dev);
4029        if (error) {
4030                printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4031                error = -ENODEV;
4032		scsi_host_put(hpnt);
4033        } else
4034		scsi_scan_host(hpnt);
4035
4036
4037        return error;
4038}
4039
4040static int sdebug_driver_remove(struct device * dev)
4041{
4042        struct sdebug_host_info *sdbg_host;
4043	struct sdebug_dev_info *sdbg_devinfo, *tmp;
4044
4045	sdbg_host = to_sdebug_host(dev);
4046
4047	if (!sdbg_host) {
4048		printk(KERN_ERR "%s: Unable to locate host info\n",
4049		       __func__);
4050		return -ENODEV;
4051	}
4052
4053        scsi_remove_host(sdbg_host->shost);
4054
4055	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4056				 dev_list) {
4057                list_del(&sdbg_devinfo->dev_list);
4058                kfree(sdbg_devinfo);
4059        }
4060
4061        scsi_host_put(sdbg_host->shost);
4062        return 0;
4063}
4064
4065static int pseudo_lld_bus_match(struct device *dev,
4066				struct device_driver *dev_driver)
4067{
4068	return 1;
4069}
4070
4071static struct bus_type pseudo_lld_bus = {
4072	.name = "pseudo",
4073	.match = pseudo_lld_bus_match,
4074	.probe = sdebug_driver_probe,
4075	.remove = sdebug_driver_remove,
 
4076};
v4.10.11
   1/*
   2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
   3 *  Copyright (C) 1992  Eric Youngdale
   4 *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
   5 *  to make sure that we are not getting blocks mixed up, and PANIC if
   6 *  anything out of the ordinary is seen.
   7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
   8 *
   9 * Copyright (C) 2001 - 2016 Douglas Gilbert
 
 
 
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 *  For documentation see http://sg.danny.cz/sg/sdebug26.html
  17 *
 
 
 
 
 
 
 
 
 
  18 */
  19
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  22
  23#include <linux/module.h>
  24
  25#include <linux/kernel.h>
  26#include <linux/errno.h>
  27#include <linux/jiffies.h>
  28#include <linux/slab.h>
  29#include <linux/types.h>
  30#include <linux/string.h>
  31#include <linux/genhd.h>
  32#include <linux/fs.h>
  33#include <linux/init.h>
  34#include <linux/proc_fs.h>
  35#include <linux/vmalloc.h>
  36#include <linux/moduleparam.h>
  37#include <linux/scatterlist.h>
  38#include <linux/blkdev.h>
  39#include <linux/crc-t10dif.h>
  40#include <linux/spinlock.h>
  41#include <linux/interrupt.h>
  42#include <linux/atomic.h>
  43#include <linux/hrtimer.h>
  44#include <linux/uuid.h>
  45#include <linux/t10-pi.h>
  46
  47#include <net/checksum.h>
  48
  49#include <asm/unaligned.h>
  50
  51#include <scsi/scsi.h>
  52#include <scsi/scsi_cmnd.h>
  53#include <scsi/scsi_device.h>
  54#include <scsi/scsi_host.h>
  55#include <scsi/scsicam.h>
  56#include <scsi/scsi_eh.h>
  57#include <scsi/scsi_tcq.h>
  58#include <scsi/scsi_dbg.h>
  59
  60#include "sd.h"
  61#include "scsi_logging.h"
  62
  63/* make sure inq_product_rev string corresponds to this version */
  64#define SDEBUG_VERSION "1.86"
  65static const char *sdebug_version_date = "20160430";
  66
  67#define MY_NAME "scsi_debug"
  68
  69/* Additional Sense Code (ASC) */
  70#define NO_ADDITIONAL_SENSE 0x0
  71#define LOGICAL_UNIT_NOT_READY 0x4
  72#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
  73#define UNRECOVERED_READ_ERR 0x11
  74#define PARAMETER_LIST_LENGTH_ERR 0x1a
  75#define INVALID_OPCODE 0x20
  76#define LBA_OUT_OF_RANGE 0x21
 
  77#define INVALID_FIELD_IN_CDB 0x24
  78#define INVALID_FIELD_IN_PARAM_LIST 0x26
  79#define UA_RESET_ASC 0x29
  80#define UA_CHANGED_ASC 0x2a
  81#define TARGET_CHANGED_ASC 0x3f
  82#define LUNS_CHANGED_ASCQ 0x0e
  83#define INSUFF_RES_ASC 0x55
  84#define INSUFF_RES_ASCQ 0x3
  85#define POWER_ON_RESET_ASCQ 0x0
  86#define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
  87#define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
  88#define CAPACITY_CHANGED_ASCQ 0x9
  89#define SAVING_PARAMS_UNSUP 0x39
  90#define TRANSPORT_PROBLEM 0x4b
  91#define THRESHOLD_EXCEEDED 0x5d
  92#define LOW_POWER_COND_ON 0x5e
  93#define MISCOMPARE_VERIFY_ASC 0x1d
  94#define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
  95#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
  96
  97/* Additional Sense Code Qualifier (ASCQ) */
  98#define ACK_NAK_TO 0x3
  99
 
 
 100/* Default values for driver parameters */
 101#define DEF_NUM_HOST   1
 102#define DEF_NUM_TGTS   1
 103#define DEF_MAX_LUNS   1
 104/* With these defaults, this driver will make 1 host with 1 target
 105 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
 106 */
 107#define DEF_ATO 1
 108#define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
 109#define DEF_DEV_SIZE_MB   8
 110#define DEF_DIF 0
 111#define DEF_DIX 0
 112#define DEF_D_SENSE   0
 113#define DEF_EVERY_NTH   0
 114#define DEF_FAKE_RW	0
 115#define DEF_GUARD 0
 116#define DEF_HOST_LOCK 0
 117#define DEF_LBPU 0
 118#define DEF_LBPWS 0
 119#define DEF_LBPWS10 0
 120#define DEF_LBPRZ 1
 121#define DEF_LOWEST_ALIGNED 0
 122#define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
 123#define DEF_NO_LUN_0   0
 124#define DEF_NUM_PARTS   0
 125#define DEF_OPTS   0
 126#define DEF_OPT_BLKS 1024
 127#define DEF_PHYSBLK_EXP 0
 128#define DEF_PTYPE   TYPE_DISK
 129#define DEF_REMOVABLE false
 130#define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
 131#define DEF_SECTOR_SIZE 512
 132#define DEF_UNMAP_ALIGNMENT 0
 133#define DEF_UNMAP_GRANULARITY 1
 134#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
 135#define DEF_UNMAP_MAX_DESC 256
 136#define DEF_VIRTUAL_GB   0
 137#define DEF_VPD_USE_HOSTNO 1
 138#define DEF_WRITESAME_LENGTH 0xFFFF
 139#define DEF_STRICT 0
 140#define DEF_STATISTICS false
 141#define DEF_SUBMIT_QUEUES 1
 142#define DEF_UUID_CTL 0
 143#define JDELAY_OVERRIDDEN -9999
 144
 145#define SDEBUG_LUN_0_VAL 0
 146
 147/* bit mask values for sdebug_opts */
 148#define SDEBUG_OPT_NOISE		1
 149#define SDEBUG_OPT_MEDIUM_ERR		2
 150#define SDEBUG_OPT_TIMEOUT		4
 151#define SDEBUG_OPT_RECOVERED_ERR	8
 152#define SDEBUG_OPT_TRANSPORT_ERR	16
 153#define SDEBUG_OPT_DIF_ERR		32
 154#define SDEBUG_OPT_DIX_ERR		64
 155#define SDEBUG_OPT_MAC_TIMEOUT		128
 156#define SDEBUG_OPT_SHORT_TRANSFER	0x100
 157#define SDEBUG_OPT_Q_NOISE		0x200
 158#define SDEBUG_OPT_ALL_TSF		0x400
 159#define SDEBUG_OPT_RARE_TSF		0x800
 160#define SDEBUG_OPT_N_WCE		0x1000
 161#define SDEBUG_OPT_RESET_NOISE		0x2000
 162#define SDEBUG_OPT_NO_CDB_NOISE		0x4000
 163#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
 164			      SDEBUG_OPT_RESET_NOISE)
 165#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
 166				  SDEBUG_OPT_TRANSPORT_ERR | \
 167				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
 168				  SDEBUG_OPT_SHORT_TRANSFER)
 169/* When "every_nth" > 0 then modulo "every_nth" commands:
 170 *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
 171 *   - a RECOVERED_ERROR is simulated on successful read and write
 172 *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
 173 *   - a TRANSPORT_ERROR is simulated on successful read and write
 174 *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
 175 *
 176 * When "every_nth" < 0 then after "- every_nth" commands:
 177 *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
 178 *   - a RECOVERED_ERROR is simulated on successful read and write
 179 *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
 180 *   - a TRANSPORT_ERROR is simulated on successful read and write
 181 *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
 182 * This will continue on every subsequent command until some other action
 183 * occurs (e.g. the user * writing a new value (other than -1 or 1) to
 184 * every_nth via sysfs).
 185 */
 186
 187/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
 188 * priority order. In the subset implemented here lower numbers have higher
 189 * priority. The UA numbers should be a sequence starting from 0 with
 190 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
 191#define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
 192#define SDEBUG_UA_BUS_RESET 1
 193#define SDEBUG_UA_MODE_CHANGED 2
 194#define SDEBUG_UA_CAPACITY_CHANGED 3
 195#define SDEBUG_UA_LUNS_CHANGED 4
 196#define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
 197#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
 198#define SDEBUG_NUM_UAS 7
 199
 200/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
 201 * sector on read commands: */
 202#define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
 203#define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
 204
 205/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
 206 * or "peripheral device" addressing (value 0) */
 207#define SAM2_LUN_ADDRESS_METHOD 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 208
 209/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
 210 * (for response) per submit queue at one time. Can be reduced by max_queue
 211 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
 212 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
 213 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
 214 * but cannot exceed SDEBUG_CANQUEUE .
 215 */
 216#define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
 217#define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
 218#define DEF_CMD_PER_LUN  255
 219
 220#define F_D_IN			1
 221#define F_D_OUT			2
 222#define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
 223#define F_D_UNKN		8
 224#define F_RL_WLUN_OK		0x10
 225#define F_SKIP_UA		0x20
 226#define F_DELAY_OVERR		0x40
 227#define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
 228#define F_SA_HIGH		0x100	/* as used by variable length cdbs */
 229#define F_INV_OP		0x200
 230#define F_FAKE_RW		0x400
 231#define F_M_ACCESS		0x800	/* media access */
 232
 233#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
 234#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
 235#define FF_SA (F_SA_HIGH | F_SA_LOW)
 236
 237#define SDEBUG_MAX_PARTS 4
 238
 239#define SDEBUG_MAX_CMD_LEN 32
 240
 
 
 
 
 
 
 241
 242struct sdebug_dev_info {
 243	struct list_head dev_list;
 
 244	unsigned int channel;
 245	unsigned int target;
 246	u64 lun;
 247	uuid_be lu_name;
 248	struct sdebug_host_info *sdbg_host;
 249	unsigned long uas_bm[1];
 250	atomic_t num_in_q;
 251	atomic_t stopped;
 252	bool used;
 253};
 254
 255struct sdebug_host_info {
 256	struct list_head host_list;
 257	struct Scsi_Host *shost;
 258	struct device dev;
 259	struct list_head dev_info_list;
 260};
 261
 262#define to_sdebug_host(d)	\
 263	container_of(d, struct sdebug_host_info, dev)
 264
 265struct sdebug_defer {
 266	struct hrtimer hrt;
 267	struct execute_work ew;
 268	int sqa_idx;	/* index of sdebug_queue array */
 269	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
 270	int issuing_cpu;
 271};
 272
 273struct sdebug_queued_cmd {
 274	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
 275	 * instance indicates this slot is in use.
 276	 */
 277	struct sdebug_defer *sd_dp;
 278	struct scsi_cmnd *a_cmnd;
 279	unsigned int inj_recovered:1;
 280	unsigned int inj_transport:1;
 281	unsigned int inj_dif:1;
 282	unsigned int inj_dix:1;
 283	unsigned int inj_short:1;
 284};
 285
 286struct sdebug_queue {
 287	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
 288	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
 289	spinlock_t qc_lock;
 290	atomic_t blocked;	/* to temporarily stop more being queued */
 291};
 292
 293static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
 294static atomic_t sdebug_completions;  /* count of deferred completions */
 295static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
 296static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
 297
 298struct opcode_info_t {
 299	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
 300				/* for terminating element */
 301	u8 opcode;		/* if num_attached > 0, preferred */
 302	u16 sa;			/* service action */
 303	u32 flags;		/* OR-ed set of SDEB_F_* */
 304	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
 305	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
 306	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
 307				/* ignore cdb bytes after position 15 */
 308};
 309
 310/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
 311enum sdeb_opcode_index {
 312	SDEB_I_INVALID_OPCODE =	0,
 313	SDEB_I_INQUIRY = 1,
 314	SDEB_I_REPORT_LUNS = 2,
 315	SDEB_I_REQUEST_SENSE = 3,
 316	SDEB_I_TEST_UNIT_READY = 4,
 317	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
 318	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
 319	SDEB_I_LOG_SENSE = 7,
 320	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
 321	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
 322	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
 323	SDEB_I_START_STOP = 11,
 324	SDEB_I_SERV_ACT_IN = 12,	/* 12, 16 */
 325	SDEB_I_SERV_ACT_OUT = 13,	/* 12, 16 */
 326	SDEB_I_MAINT_IN = 14,
 327	SDEB_I_MAINT_OUT = 15,
 328	SDEB_I_VERIFY = 16,		/* 10 only */
 329	SDEB_I_VARIABLE_LEN = 17,
 330	SDEB_I_RESERVE = 18,		/* 6, 10 */
 331	SDEB_I_RELEASE = 19,		/* 6, 10 */
 332	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
 333	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
 334	SDEB_I_ATA_PT = 22,		/* 12, 16 */
 335	SDEB_I_SEND_DIAG = 23,
 336	SDEB_I_UNMAP = 24,
 337	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
 338	SDEB_I_WRITE_BUFFER = 26,
 339	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
 340	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
 341	SDEB_I_COMP_WRITE = 29,
 342	SDEB_I_LAST_ELEMENT = 30,	/* keep this last */
 343};
 344
 345
 346static const unsigned char opcode_ind_arr[256] = {
 347/* 0x0; 0x0->0x1f: 6 byte cdbs */
 348	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
 349	    0, 0, 0, 0,
 350	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 351	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 352	    SDEB_I_RELEASE,
 353	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
 354	    SDEB_I_ALLOW_REMOVAL, 0,
 355/* 0x20; 0x20->0x3f: 10 byte cdbs */
 356	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
 357	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
 358	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
 359	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
 360/* 0x40; 0x40->0x5f: 10 byte cdbs */
 361	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
 362	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
 363	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 364	    SDEB_I_RELEASE,
 365	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
 366/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
 367	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 368	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 369	0, SDEB_I_VARIABLE_LEN,
 370/* 0x80; 0x80->0x9f: 16 byte cdbs */
 371	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
 372	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
 373	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
 374	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
 375/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
 376	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
 377	     SDEB_I_MAINT_OUT, 0, 0, 0,
 378	SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
 379	     0, 0, 0, 0,
 380	0, 0, 0, 0, 0, 0, 0, 0,
 381	0, 0, 0, 0, 0, 0, 0, 0,
 382/* 0xc0; 0xc0->0xff: vendor specific */
 383	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 384	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 385	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 386	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 387};
 388
 389static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
 390static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
 391static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
 392static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 393static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
 394static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
 395static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
 396static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 397static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
 398static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
 399static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
 400static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
 401static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
 402static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
 403static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
 404static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
 405static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 406static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
 407static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 408static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
 409static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
 410
 411static const struct opcode_info_t msense_iarr[1] = {
 412	{0, 0x1a, 0, F_D_IN, NULL, NULL,
 413	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 414};
 415
 416static const struct opcode_info_t mselect_iarr[1] = {
 417	{0, 0x15, 0, F_D_OUT, NULL, NULL,
 418	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 419};
 420
 421static const struct opcode_info_t read_iarr[3] = {
 422	{0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
 423	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
 424	     0, 0, 0, 0} },
 425	{0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
 426	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 427	{0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
 428	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
 429	     0xc7, 0, 0, 0, 0} },
 430};
 431
 432static const struct opcode_info_t write_iarr[3] = {
 433	{0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
 434	    {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
 435	     0, 0, 0, 0} },
 436	{0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
 437	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 438	{0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
 439	    {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
 440	     0xc7, 0, 0, 0, 0} },
 441};
 442
 443static const struct opcode_info_t sa_in_iarr[1] = {
 444	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
 445	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 446	     0xff, 0xff, 0xff, 0, 0xc7} },
 447};
 448
 449static const struct opcode_info_t vl_iarr[1] = {	/* VARIABLE LENGTH */
 450	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
 451	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
 452		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
 453};
 454
 455static const struct opcode_info_t maint_in_iarr[2] = {
 456	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
 457	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
 458	     0xc7, 0, 0, 0, 0} },
 459	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
 460	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 461	     0, 0} },
 462};
 463
 464static const struct opcode_info_t write_same_iarr[1] = {
 465	{0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
 466	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 467	     0xff, 0xff, 0xff, 0x1f, 0xc7} },
 468};
 469
 470static const struct opcode_info_t reserve_iarr[1] = {
 471	{0, 0x16, 0, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
 472	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 473};
 474
 475static const struct opcode_info_t release_iarr[1] = {
 476	{0, 0x17, 0, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
 477	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 478};
 
 479
 480
 481/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
 482 * plus the terminating elements for logic that scans this table such as
 483 * REPORT SUPPORTED OPERATION CODES. */
 484static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
 485/* 0 */
 486	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
 487	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 488	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
 489	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 490	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
 491	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
 492	     0, 0} },
 493	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
 494	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 495	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
 496	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 497	{1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
 498	    {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 499	     0} },
 500	{1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
 501	    {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 502	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
 503	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
 504	     0, 0, 0} },
 505	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,
 506	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
 507	     0, 0} },
 508	{3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
 509	    {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 510	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* READ(16) */
 511/* 10 */
 512	{3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
 513	    {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 514	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* WRITE(16) */
 515	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
 516	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 517	{1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
 518	    {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 519	     0xff, 0xff, 0xff, 0x1, 0xc7} },	/* READ CAPACITY(16) */
 520	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
 521	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 522	{2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
 523	    {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
 524	     0} },
 525	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
 526	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 527	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
 528	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
 529	     0, 0, 0, 0, 0, 0} },
 530	{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
 531	    vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
 532		      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
 533	{1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
 534	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 535	     0} },
 536	{1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
 537	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
 538	     0} },
 539/* 20 */
 540	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
 541	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 542	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
 543	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 544	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
 545	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 546	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
 547	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 548	{0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
 549	    {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 550	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
 551	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
 552		   0, 0, 0, 0, 0, 0} },
 553	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
 554	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
 555	     0, 0, 0, 0} },			/* WRITE_BUFFER */
 556	{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
 557	    write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
 558			      0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 559	{0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
 560	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
 561	     0, 0, 0, 0} },
 562	{0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
 563	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
 564	     0, 0xff, 0x1f, 0xc7} },		/* COMPARE AND WRITE */
 565
 566/* 30 */
 567	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
 568	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 569};
 570
 571static int sdebug_add_host = DEF_NUM_HOST;
 572static int sdebug_ato = DEF_ATO;
 573static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
 574static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
 575static int sdebug_dif = DEF_DIF;
 576static int sdebug_dix = DEF_DIX;
 577static int sdebug_dsense = DEF_D_SENSE;
 578static int sdebug_every_nth = DEF_EVERY_NTH;
 579static int sdebug_fake_rw = DEF_FAKE_RW;
 580static unsigned int sdebug_guard = DEF_GUARD;
 581static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
 582static int sdebug_max_luns = DEF_MAX_LUNS;
 583static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
 584static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
 585static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
 586static int sdebug_no_lun_0 = DEF_NO_LUN_0;
 587static int sdebug_no_uld;
 588static int sdebug_num_parts = DEF_NUM_PARTS;
 589static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
 590static int sdebug_opt_blks = DEF_OPT_BLKS;
 591static int sdebug_opts = DEF_OPTS;
 592static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
 593static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
 594static int sdebug_scsi_level = DEF_SCSI_LEVEL;
 595static int sdebug_sector_size = DEF_SECTOR_SIZE;
 596static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
 597static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
 598static unsigned int sdebug_lbpu = DEF_LBPU;
 599static unsigned int sdebug_lbpws = DEF_LBPWS;
 600static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
 601static unsigned int sdebug_lbprz = DEF_LBPRZ;
 602static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 603static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 604static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
 605static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
 606static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
 607static int sdebug_uuid_ctl = DEF_UUID_CTL;
 608static bool sdebug_removable = DEF_REMOVABLE;
 609static bool sdebug_clustering;
 610static bool sdebug_host_lock = DEF_HOST_LOCK;
 611static bool sdebug_strict = DEF_STRICT;
 612static bool sdebug_any_injecting_opt;
 613static bool sdebug_verbose;
 614static bool have_dif_prot;
 615static bool sdebug_statistics = DEF_STATISTICS;
 616static bool sdebug_mq_active;
 617
 618static unsigned int sdebug_store_sectors;
 619static sector_t sdebug_capacity;	/* in sectors */
 620
 621/* old BIOS stuff, kernel may get rid of them but some mode sense pages
 622   may still need them */
 623static int sdebug_heads;		/* heads per disk */
 624static int sdebug_cylinders_per;	/* cylinders per surface */
 625static int sdebug_sectors_per;		/* sectors per cylinder */
 626
 627static LIST_HEAD(sdebug_host_list);
 628static DEFINE_SPINLOCK(sdebug_host_list_lock);
 629
 630static unsigned char *fake_storep;	/* ramdisk storage */
 631static struct t10_pi_tuple *dif_storep;	/* protection info */
 632static void *map_storep;		/* provisioning map */
 633
 634static unsigned long map_size;
 635static int num_aborts;
 636static int num_dev_resets;
 637static int num_target_resets;
 638static int num_bus_resets;
 639static int num_host_resets;
 640static int dix_writes;
 641static int dix_reads;
 642static int dif_errors;
 643
 644static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
 645static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
 646
 647static DEFINE_RWLOCK(atomic_rw);
 648
 649static char sdebug_proc_name[] = MY_NAME;
 650static const char *my_name = MY_NAME;
 651
 652static struct bus_type pseudo_lld_bus;
 653
 
 
 
 
 
 654static struct device_driver sdebug_driverfs_driver = {
 655	.name 		= sdebug_proc_name,
 656	.bus		= &pseudo_lld_bus,
 657};
 658
 659static const int check_condition_result =
 660		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 661
 662static const int illegal_condition_result =
 663	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
 664
 665static const int device_qfull_result =
 666	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
 
 
 667
 668
 669/* Only do the extra work involved in logical block provisioning if one or
 670 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
 671 * real reads and writes (i.e. not skipping them for speed).
 672 */
 673static inline bool scsi_debug_lbp(void)
 674{
 675	return 0 == sdebug_fake_rw &&
 676		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
 677}
 678
 679static void *fake_store(unsigned long long lba)
 680{
 681	lba = do_div(lba, sdebug_store_sectors);
 682
 683	return fake_storep + lba * sdebug_sector_size;
 684}
 685
 686static struct t10_pi_tuple *dif_store(sector_t sector)
 687{
 688	sector = sector_div(sector, sdebug_store_sectors);
 689
 690	return dif_storep + sector;
 691}
 692
 693static void sdebug_max_tgts_luns(void)
 694{
 695	struct sdebug_host_info *sdbg_host;
 696	struct Scsi_Host *hpnt;
 697
 698	spin_lock(&sdebug_host_list_lock);
 699	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 700		hpnt = sdbg_host->shost;
 701		if ((hpnt->this_id >= 0) &&
 702		    (sdebug_num_tgts > hpnt->this_id))
 703			hpnt->max_id = sdebug_num_tgts + 1;
 704		else
 705			hpnt->max_id = sdebug_num_tgts;
 706		/* sdebug_max_luns; */
 707		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
 708	}
 709	spin_unlock(&sdebug_host_list_lock);
 710}
 711
 712enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
 713
 714/* Set in_bit to -1 to indicate no bit position of invalid field */
 715static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
 716				 enum sdeb_cmd_data c_d,
 717				 int in_byte, int in_bit)
 718{
 719	unsigned char *sbuff;
 720	u8 sks[4];
 721	int sl, asc;
 722
 723	sbuff = scp->sense_buffer;
 724	if (!sbuff) {
 725		sdev_printk(KERN_ERR, scp->device,
 726			    "%s: sense_buffer is NULL\n", __func__);
 727		return;
 728	}
 729	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
 730	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 731	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
 732	memset(sks, 0, sizeof(sks));
 733	sks[0] = 0x80;
 734	if (c_d)
 735		sks[0] |= 0x40;
 736	if (in_bit >= 0) {
 737		sks[0] |= 0x8;
 738		sks[0] |= 0x7 & in_bit;
 739	}
 740	put_unaligned_be16(in_byte, sks + 1);
 741	if (sdebug_dsense) {
 742		sl = sbuff[7] + 8;
 743		sbuff[7] = sl;
 744		sbuff[sl] = 0x2;
 745		sbuff[sl + 1] = 0x6;
 746		memcpy(sbuff + sl + 4, sks, 3);
 747	} else
 748		memcpy(sbuff + 15, sks, 3);
 749	if (sdebug_verbose)
 750		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
 751			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
 752			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
 753}
 754
 755static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
 
 
 756{
 757	unsigned char *sbuff;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 758
 759	sbuff = scp->sense_buffer;
 760	if (!sbuff) {
 761		sdev_printk(KERN_ERR, scp->device,
 762			    "%s: sense_buffer is NULL\n", __func__);
 763		return;
 764	}
 765	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 766
 767	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
 
 
 
 
 
 
 768
 769	if (sdebug_verbose)
 770		sdev_printk(KERN_INFO, scp->device,
 771			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
 772			    my_name, key, asc, asq);
 773}
 
 
 
 
 774
 775static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
 776{
 777	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
 
 
 
 
 
 
 
 
 778}
 779
 780static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
 781{
 782	if (sdebug_verbose) {
 783		if (0x1261 == cmd)
 784			sdev_printk(KERN_INFO, dev,
 785				    "%s: BLKFLSBUF [0x1261]\n", __func__);
 786		else if (0x5331 == cmd)
 787			sdev_printk(KERN_INFO, dev,
 788				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
 789				    __func__);
 790		else
 791			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
 792				    __func__, cmd);
 793	}
 794	return -EINVAL;
 795	/* return -ENOTTY; // correct return but upsets fdisk */
 796}
 797
 798static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
 
 799{
 800	struct sdebug_host_info *sdhp;
 801	struct sdebug_dev_info *dp;
 802
 803	spin_lock(&sdebug_host_list_lock);
 804	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
 805		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
 806			if ((devip->sdbg_host == dp->sdbg_host) &&
 807			    (devip->target == dp->target))
 808				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
 809		}
 810	}
 811	spin_unlock(&sdebug_host_list_lock);
 812}
 813
 814static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 815{
 816	int k;
 817
 818	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
 819	if (k != SDEBUG_NUM_UAS) {
 820		const char *cp = NULL;
 821
 822		switch (k) {
 823		case SDEBUG_UA_POR:
 824			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
 825					POWER_ON_RESET_ASCQ);
 826			if (sdebug_verbose)
 827				cp = "power on reset";
 828			break;
 829		case SDEBUG_UA_BUS_RESET:
 830			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
 831					BUS_RESET_ASCQ);
 832			if (sdebug_verbose)
 833				cp = "bus reset";
 834			break;
 835		case SDEBUG_UA_MODE_CHANGED:
 836			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
 837					MODE_CHANGED_ASCQ);
 838			if (sdebug_verbose)
 839				cp = "mode parameters changed";
 840			break;
 841		case SDEBUG_UA_CAPACITY_CHANGED:
 842			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
 843					CAPACITY_CHANGED_ASCQ);
 844			if (sdebug_verbose)
 845				cp = "capacity data changed";
 846			break;
 847		case SDEBUG_UA_MICROCODE_CHANGED:
 848			mk_sense_buffer(scp, UNIT_ATTENTION,
 849					TARGET_CHANGED_ASC,
 850					MICROCODE_CHANGED_ASCQ);
 851			if (sdebug_verbose)
 852				cp = "microcode has been changed";
 853			break;
 854		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
 855			mk_sense_buffer(scp, UNIT_ATTENTION,
 856					TARGET_CHANGED_ASC,
 857					MICROCODE_CHANGED_WO_RESET_ASCQ);
 858			if (sdebug_verbose)
 859				cp = "microcode has been changed without reset";
 860			break;
 861		case SDEBUG_UA_LUNS_CHANGED:
 862			/*
 863			 * SPC-3 behavior is to report a UNIT ATTENTION with
 864			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
 865			 * on the target, until a REPORT LUNS command is
 866			 * received.  SPC-4 behavior is to report it only once.
 867			 * NOTE:  sdebug_scsi_level does not use the same
 868			 * values as struct scsi_device->scsi_level.
 869			 */
 870			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
 871				clear_luns_changed_on_target(devip);
 872			mk_sense_buffer(scp, UNIT_ATTENTION,
 873					TARGET_CHANGED_ASC,
 874					LUNS_CHANGED_ASCQ);
 875			if (sdebug_verbose)
 876				cp = "reported luns data has changed";
 877			break;
 878		default:
 879			pr_warn("unexpected unit attention code=%d\n", k);
 880			if (sdebug_verbose)
 881				cp = "unknown";
 882			break;
 883		}
 884		clear_bit(k, devip->uas_bm);
 885		if (sdebug_verbose)
 886			sdev_printk(KERN_INFO, scp->device,
 887				   "%s reports: Unit attention: %s\n",
 888				   my_name, cp);
 889		return check_condition_result;
 890	}
 891	return 0;
 892}
 893
 894/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
 895static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 896				int arr_len)
 897{
 898	int act_len;
 899	struct scsi_data_buffer *sdb = scsi_in(scp);
 900
 901	if (!sdb->length)
 902		return 0;
 903	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
 904		return DID_ERROR << 16;
 905
 906	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
 907				      arr, arr_len);
 908	sdb->resid = scsi_bufflen(scp) - act_len;
 
 
 
 909
 910	return 0;
 911}
 912
 913/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
 914 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
 915 * calls, not required to write in ascending offset order. Assumes resid
 916 * set to scsi_bufflen() prior to any calls.
 917 */
 918static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
 919				  int arr_len, unsigned int off_dst)
 920{
 921	int act_len, n;
 922	struct scsi_data_buffer *sdb = scsi_in(scp);
 923	off_t skip = off_dst;
 924
 925	if (sdb->length <= off_dst)
 926		return 0;
 927	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
 928		return DID_ERROR << 16;
 929
 930	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
 931				       arr, arr_len, skip);
 932	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
 933		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
 934	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
 935	sdb->resid = min(sdb->resid, n);
 936	return 0;
 937}
 938
 939/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
 940 * 'arr' or -1 if error.
 941 */
 942static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
 943			       int arr_len)
 944{
 945	if (!scsi_bufflen(scp))
 946		return 0;
 947	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
 948		return -1;
 949
 950	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
 951}
 952
 953
 954static const char * inq_vendor_id = "Linux   ";
 955static const char * inq_product_id = "scsi_debug      ";
 956static const char *inq_product_rev = "0186";	/* version less '.' */
 957/* Use some locally assigned NAAs for SAS addresses. */
 958static const u64 naa3_comp_a = 0x3222222000000000ULL;
 959static const u64 naa3_comp_b = 0x3333333000000000ULL;
 960static const u64 naa3_comp_c = 0x3111111000000000ULL;
 961
 962/* Device identification VPD page. Returns number of bytes placed in arr */
 963static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
 964			  int target_dev_id, int dev_id_num,
 965			  const char *dev_id_str, int dev_id_str_len,
 966			  const uuid_be *lu_name)
 967{
 968	int num, port_a;
 969	char b[32];
 970
 971	port_a = target_dev_id + 1;
 972	/* T10 vendor identifier field format (faked) */
 973	arr[0] = 0x2;	/* ASCII */
 974	arr[1] = 0x1;
 975	arr[2] = 0x0;
 976	memcpy(&arr[4], inq_vendor_id, 8);
 977	memcpy(&arr[12], inq_product_id, 16);
 978	memcpy(&arr[28], dev_id_str, dev_id_str_len);
 979	num = 8 + 16 + dev_id_str_len;
 980	arr[3] = num;
 981	num += 4;
 982	if (dev_id_num >= 0) {
 983		if (sdebug_uuid_ctl) {
 984			/* Locally assigned UUID */
 985			arr[num++] = 0x1;  /* binary (not necessarily sas) */
 986			arr[num++] = 0xa;  /* PIV=0, lu, naa */
 987			arr[num++] = 0x0;
 988			arr[num++] = 0x12;
 989			arr[num++] = 0x10; /* uuid type=1, locally assigned */
 990			arr[num++] = 0x0;
 991			memcpy(arr + num, lu_name, 16);
 992			num += 16;
 993		} else {
 994			/* NAA-3, Logical unit identifier (binary) */
 995			arr[num++] = 0x1;  /* binary (not necessarily sas) */
 996			arr[num++] = 0x3;  /* PIV=0, lu, naa */
 997			arr[num++] = 0x0;
 998			arr[num++] = 0x8;
 999			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1000			num += 8;
1001		}
1002		/* Target relative port number */
1003		arr[num++] = 0x61;	/* proto=sas, binary */
1004		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1005		arr[num++] = 0x0;	/* reserved */
1006		arr[num++] = 0x4;	/* length */
1007		arr[num++] = 0x0;	/* reserved */
1008		arr[num++] = 0x0;	/* reserved */
1009		arr[num++] = 0x0;
1010		arr[num++] = 0x1;	/* relative port A */
1011	}
1012	/* NAA-3, Target port identifier */
1013	arr[num++] = 0x61;	/* proto=sas, binary */
1014	arr[num++] = 0x93;	/* piv=1, target port, naa */
1015	arr[num++] = 0x0;
1016	arr[num++] = 0x8;
1017	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1018	num += 8;
1019	/* NAA-3, Target port group identifier */
 
 
 
 
 
 
1020	arr[num++] = 0x61;	/* proto=sas, binary */
1021	arr[num++] = 0x95;	/* piv=1, target port group id */
1022	arr[num++] = 0x0;
1023	arr[num++] = 0x4;
1024	arr[num++] = 0;
1025	arr[num++] = 0;
1026	put_unaligned_be16(port_group_id, arr + num);
1027	num += 2;
1028	/* NAA-3, Target device identifier */
1029	arr[num++] = 0x61;	/* proto=sas, binary */
1030	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1031	arr[num++] = 0x0;
1032	arr[num++] = 0x8;
1033	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1034	num += 8;
 
 
 
 
 
 
1035	/* SCSI name string: Target device identifier */
1036	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1037	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1038	arr[num++] = 0x0;
1039	arr[num++] = 24;
1040	memcpy(arr + num, "naa.32222220", 12);
1041	num += 12;
1042	snprintf(b, sizeof(b), "%08X", target_dev_id);
1043	memcpy(arr + num, b, 8);
1044	num += 8;
1045	memset(arr + num, 0, 4);
1046	num += 4;
1047	return num;
1048}
1049
 
1050static unsigned char vpd84_data[] = {
1051/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1052    0x22,0x22,0x22,0x0,0xbb,0x1,
1053    0x22,0x22,0x22,0x0,0xbb,0x2,
1054};
1055
1056/*  Software interface identification VPD page */
1057static int inquiry_vpd_84(unsigned char *arr)
1058{
1059	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1060	return sizeof(vpd84_data);
1061}
1062
1063/* Management network addresses VPD page */
1064static int inquiry_vpd_85(unsigned char *arr)
1065{
1066	int num = 0;
1067	const char * na1 = "https://www.kernel.org/config";
1068	const char * na2 = "http://www.kernel.org/log";
1069	int plen, olen;
1070
1071	arr[num++] = 0x1;	/* lu, storage config */
1072	arr[num++] = 0x0;	/* reserved */
1073	arr[num++] = 0x0;
1074	olen = strlen(na1);
1075	plen = olen + 1;
1076	if (plen % 4)
1077		plen = ((plen / 4) + 1) * 4;
1078	arr[num++] = plen;	/* length, null termianted, padded */
1079	memcpy(arr + num, na1, olen);
1080	memset(arr + num + olen, 0, plen - olen);
1081	num += plen;
1082
1083	arr[num++] = 0x4;	/* lu, logging */
1084	arr[num++] = 0x0;	/* reserved */
1085	arr[num++] = 0x0;
1086	olen = strlen(na2);
1087	plen = olen + 1;
1088	if (plen % 4)
1089		plen = ((plen / 4) + 1) * 4;
1090	arr[num++] = plen;	/* length, null terminated, padded */
1091	memcpy(arr + num, na2, olen);
1092	memset(arr + num + olen, 0, plen - olen);
1093	num += plen;
1094
1095	return num;
1096}
1097
1098/* SCSI ports VPD page */
1099static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1100{
1101	int num = 0;
1102	int port_a, port_b;
1103
1104	port_a = target_dev_id + 1;
1105	port_b = port_a + 1;
1106	arr[num++] = 0x0;	/* reserved */
1107	arr[num++] = 0x0;	/* reserved */
1108	arr[num++] = 0x0;
1109	arr[num++] = 0x1;	/* relative port 1 (primary) */
1110	memset(arr + num, 0, 6);
1111	num += 6;
1112	arr[num++] = 0x0;
1113	arr[num++] = 12;	/* length tp descriptor */
1114	/* naa-5 target port identifier (A) */
1115	arr[num++] = 0x61;	/* proto=sas, binary */
1116	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1117	arr[num++] = 0x0;	/* reserved */
1118	arr[num++] = 0x8;	/* length */
1119	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1120	num += 8;
 
 
 
 
 
 
 
1121	arr[num++] = 0x0;	/* reserved */
1122	arr[num++] = 0x0;	/* reserved */
1123	arr[num++] = 0x0;
1124	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1125	memset(arr + num, 0, 6);
1126	num += 6;
1127	arr[num++] = 0x0;
1128	arr[num++] = 12;	/* length tp descriptor */
1129	/* naa-5 target port identifier (B) */
1130	arr[num++] = 0x61;	/* proto=sas, binary */
1131	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1132	arr[num++] = 0x0;	/* reserved */
1133	arr[num++] = 0x8;	/* length */
1134	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1135	num += 8;
 
 
 
 
 
 
1136
1137	return num;
1138}
1139
1140
1141static unsigned char vpd89_data[] = {
1142/* from 4th byte */ 0,0,0,0,
1143'l','i','n','u','x',' ',' ',' ',
1144'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1145'1','2','3','4',
11460x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
11470xec,0,0,0,
11480x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
11490,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
11500x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
11510x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
11520x53,0x41,
11530x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
11540x20,0x20,
11550x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
11560x10,0x80,
11570,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
11580x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
11590x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
11600,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
11610x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
11620x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
11630,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
11640,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11650,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11660,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11670x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
11680,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
11690xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
11700,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
11710,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11720,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11730,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11740,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11750,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11760,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11770,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11780,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11790,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11800,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11810,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
11820,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1183};
1184
1185/* ATA Information VPD page */
1186static int inquiry_vpd_89(unsigned char *arr)
1187{
1188	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1189	return sizeof(vpd89_data);
1190}
1191
1192
 
1193static unsigned char vpdb0_data[] = {
1194	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1195	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1196	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1197	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1198};
1199
1200/* Block limits VPD page (SBC-3) */
1201static int inquiry_vpd_b0(unsigned char *arr)
1202{
1203	unsigned int gran;
1204
1205	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1206
1207	/* Optimal transfer length granularity */
1208	gran = 1 << sdebug_physblk_exp;
1209	put_unaligned_be16(gran, arr + 2);
 
1210
1211	/* Maximum Transfer Length */
1212	if (sdebug_store_sectors > 0x400)
1213		put_unaligned_be32(sdebug_store_sectors, arr + 4);
 
 
 
 
1214
1215	/* Optimal Transfer Length */
1216	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1217
1218	if (sdebug_lbpu) {
1219		/* Maximum Unmap LBA Count */
1220		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1221
1222		/* Maximum Unmap Block Descriptor Count */
1223		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1224	}
1225
1226	/* Unmap Granularity Alignment */
1227	if (sdebug_unmap_alignment) {
1228		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1229		arr[28] |= 0x80; /* UGAVALID */
1230	}
1231
1232	/* Optimal Unmap Granularity */
1233	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1234
1235	/* Maximum WRITE SAME Length */
1236	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1237
1238	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1239
1240	return sizeof(vpdb0_data);
1241}
1242
1243/* Block device characteristics VPD page (SBC-3) */
1244static int inquiry_vpd_b1(unsigned char *arr)
1245{
1246	memset(arr, 0, 0x3c);
1247	arr[0] = 0;
1248	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1249	arr[2] = 0;
1250	arr[3] = 5;	/* less than 1.8" */
1251
1252	return 0x3c;
1253}
1254
1255/* Logical block provisioning VPD page (SBC-4) */
1256static int inquiry_vpd_b2(unsigned char *arr)
1257{
1258	memset(arr, 0, 0x4);
1259	arr[0] = 0;			/* threshold exponent */
1260	if (sdebug_lbpu)
 
1261		arr[1] = 1 << 7;
1262	if (sdebug_lbpws)
 
1263		arr[1] |= 1 << 6;
1264	if (sdebug_lbpws10)
 
1265		arr[1] |= 1 << 5;
1266	if (sdebug_lbprz && scsi_debug_lbp())
1267		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1268	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1269	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1270	/* threshold_percentage=0 */
1271	return 0x4;
1272}
1273
1274#define SDEBUG_LONG_INQ_SZ 96
1275#define SDEBUG_MAX_INQ_ARR_SZ 584
1276
1277static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 
1278{
1279	unsigned char pq_pdt;
1280	unsigned char * arr;
1281	unsigned char *cmd = scp->cmnd;
1282	int alloc_len, n, ret;
1283	bool have_wlun, is_disk;
1284
1285	alloc_len = get_unaligned_be16(cmd + 3);
1286	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1287	if (! arr)
1288		return DID_REQUEUE << 16;
1289	is_disk = (sdebug_ptype == TYPE_DISK);
1290	have_wlun = scsi_is_wlun(scp->device->lun);
1291	if (have_wlun)
1292		pq_pdt = TYPE_WLUN;	/* present, wlun */
1293	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1294		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1295	else
1296		pq_pdt = (sdebug_ptype & 0x1f);
1297	arr[0] = pq_pdt;
1298	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1299		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
 
1300		kfree(arr);
1301		return check_condition_result;
1302	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1303		int lu_id_num, port_group_id, target_dev_id, len;
1304		char lu_id_str[6];
1305		int host_no = devip->sdbg_host->shost->host_no;
1306		
1307		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1308		    (devip->channel & 0x7f);
1309		if (sdebug_vpd_use_hostno == 0)
1310			host_no = 0;
1311		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1312			    (devip->target * 1000) + devip->lun);
1313		target_dev_id = ((host_no + 1) * 2000) +
1314				 (devip->target * 1000) - 3;
1315		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1316		if (0 == cmd[2]) { /* supported vital product data pages */
1317			arr[1] = cmd[2];	/*sanity */
1318			n = 4;
1319			arr[n++] = 0x0;   /* this page */
1320			arr[n++] = 0x80;  /* unit serial number */
1321			arr[n++] = 0x83;  /* device identification */
1322			arr[n++] = 0x84;  /* software interface ident. */
1323			arr[n++] = 0x85;  /* management network addresses */
1324			arr[n++] = 0x86;  /* extended inquiry */
1325			arr[n++] = 0x87;  /* mode page policy */
1326			arr[n++] = 0x88;  /* SCSI ports */
1327			if (is_disk) {	  /* SBC only */
1328				arr[n++] = 0x89;  /* ATA information */
1329				arr[n++] = 0xb0;  /* Block limits */
1330				arr[n++] = 0xb1;  /* Block characteristics */
1331				arr[n++] = 0xb2;  /* Logical Block Prov */
1332			}
1333			arr[3] = n - 4;	  /* number of supported VPD pages */
1334		} else if (0x80 == cmd[2]) { /* unit serial number */
1335			arr[1] = cmd[2];	/*sanity */
1336			arr[3] = len;
1337			memcpy(&arr[4], lu_id_str, len);
1338		} else if (0x83 == cmd[2]) { /* device identification */
1339			arr[1] = cmd[2];	/*sanity */
1340			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1341						target_dev_id, lu_id_num,
1342						lu_id_str, len,
1343						&devip->lu_name);
1344		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1345			arr[1] = cmd[2];	/*sanity */
1346			arr[3] = inquiry_vpd_84(&arr[4]);
1347		} else if (0x85 == cmd[2]) { /* Management network addresses */
1348			arr[1] = cmd[2];	/*sanity */
1349			arr[3] = inquiry_vpd_85(&arr[4]);
1350		} else if (0x86 == cmd[2]) { /* extended inquiry */
1351			arr[1] = cmd[2];	/*sanity */
1352			arr[3] = 0x3c;	/* number of following entries */
1353			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1354				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1355			else if (have_dif_prot)
1356				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1357			else
1358				arr[4] = 0x0;   /* no protection stuff */
1359			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1360		} else if (0x87 == cmd[2]) { /* mode page policy */
1361			arr[1] = cmd[2];	/*sanity */
1362			arr[3] = 0x8;	/* number of following entries */
1363			arr[4] = 0x2;	/* disconnect-reconnect mp */
1364			arr[6] = 0x80;	/* mlus, shared */
1365			arr[8] = 0x18;	 /* protocol specific lu */
1366			arr[10] = 0x82;	 /* mlus, per initiator port */
1367		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1368			arr[1] = cmd[2];	/*sanity */
1369			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1370		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1371			arr[1] = cmd[2];        /*sanity */
1372			n = inquiry_vpd_89(&arr[4]);
1373			put_unaligned_be16(n, arr + 2);
1374		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
 
1375			arr[1] = cmd[2];        /*sanity */
1376			arr[3] = inquiry_vpd_b0(&arr[4]);
1377		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1378			arr[1] = cmd[2];        /*sanity */
1379			arr[3] = inquiry_vpd_b1(&arr[4]);
1380		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1381			arr[1] = cmd[2];        /*sanity */
1382			arr[3] = inquiry_vpd_b2(&arr[4]);
1383		} else {
1384			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
 
 
1385			kfree(arr);
1386			return check_condition_result;
1387		}
1388		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1389		ret = fill_from_dev_buffer(scp, arr,
1390			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1391		kfree(arr);
1392		return ret;
1393	}
1394	/* drops through here for a standard inquiry */
1395	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1396	arr[2] = sdebug_scsi_level;
1397	arr[3] = 2;    /* response_data_format==2 */
1398	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1399	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1400	if (sdebug_vpd_use_hostno == 0)
1401		arr[5] = 0x10; /* claim: implicit TGPS */
1402	arr[6] = 0x10; /* claim: MultiP */
1403	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1404	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1405	memcpy(&arr[8], inq_vendor_id, 8);
1406	memcpy(&arr[16], inq_product_id, 16);
1407	memcpy(&arr[32], inq_product_rev, 4);
1408	/* version descriptors (2 bytes each) follow */
1409	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1410	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1411	n = 62;
1412	if (is_disk) {		/* SBC-4 no version claimed */
1413		put_unaligned_be16(0x600, arr + n);
1414		n += 2;
1415	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1416		put_unaligned_be16(0x525, arr + n);
1417		n += 2;
1418	}
1419	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1420	ret = fill_from_dev_buffer(scp, arr,
1421			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1422	kfree(arr);
1423	return ret;
1424}
1425
1426static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1427				   0, 0, 0x0, 0x0};
1428
1429static int resp_requests(struct scsi_cmnd * scp,
1430			 struct sdebug_dev_info * devip)
1431{
1432	unsigned char * sbuff;
1433	unsigned char *cmd = scp->cmnd;
1434	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1435	bool dsense;
1436	int len = 18;
1437
1438	memset(arr, 0, sizeof(arr));
1439	dsense = !!(cmd[1] & 1);
1440	sbuff = scp->sense_buffer;
 
 
1441	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1442		if (dsense) {
1443			arr[0] = 0x72;
1444			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1445			arr[2] = THRESHOLD_EXCEEDED;
1446			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1447			len = 8;
1448		} else {
1449			arr[0] = 0x70;
1450			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1451			arr[7] = 0xa;   	/* 18 byte sense buffer */
1452			arr[12] = THRESHOLD_EXCEEDED;
1453			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1454		}
1455	} else {
1456		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1457		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1458			;	/* have sense and formats match */
1459		else if (arr[0] <= 0x70) {
1460			if (dsense) {
1461				memset(arr, 0, 8);
1462				arr[0] = 0x72;
1463				len = 8;
1464			} else {
1465				memset(arr, 0, 18);
1466				arr[0] = 0x70;
1467				arr[7] = 0xa;
1468			}
1469		} else if (dsense) {
1470			memset(arr, 0, 8);
1471			arr[0] = 0x72;
1472			arr[1] = sbuff[2];     /* sense key */
1473			arr[2] = sbuff[12];    /* asc */
1474			arr[3] = sbuff[13];    /* ascq */
1475			len = 8;
1476		} else {
1477			memset(arr, 0, 18);
1478			arr[0] = 0x70;
1479			arr[2] = sbuff[1];
1480			arr[7] = 0xa;
1481			arr[12] = sbuff[1];
1482			arr[13] = sbuff[3];
1483		}
1484
1485	}
1486	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1487	return fill_from_dev_buffer(scp, arr, len);
1488}
1489
1490static int resp_start_stop(struct scsi_cmnd * scp,
1491			   struct sdebug_dev_info * devip)
1492{
1493	unsigned char *cmd = scp->cmnd;
1494	int power_cond, stop;
1495
 
 
1496	power_cond = (cmd[4] & 0xf0) >> 4;
1497	if (power_cond) {
1498		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
 
1499		return check_condition_result;
1500	}
1501	stop = !(cmd[4] & 1);
1502	atomic_xchg(&devip->stopped, stop);
 
1503	return 0;
1504}
1505
1506static sector_t get_sdebug_capacity(void)
1507{
1508	static const unsigned int gibibyte = 1073741824;
1509
1510	if (sdebug_virtual_gb > 0)
1511		return (sector_t)sdebug_virtual_gb *
1512			(gibibyte / sdebug_sector_size);
1513	else
1514		return sdebug_store_sectors;
1515}
1516
1517#define SDEBUG_READCAP_ARR_SZ 8
1518static int resp_readcap(struct scsi_cmnd * scp,
1519			struct sdebug_dev_info * devip)
1520{
1521	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1522	unsigned int capac;
 
1523
 
 
1524	/* following just in case virtual_gb changed */
1525	sdebug_capacity = get_sdebug_capacity();
1526	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1527	if (sdebug_capacity < 0xffffffff) {
1528		capac = (unsigned int)sdebug_capacity - 1;
1529		put_unaligned_be32(capac, arr + 0);
1530	} else
1531		put_unaligned_be32(0xffffffff, arr + 0);
1532	put_unaligned_be16(sdebug_sector_size, arr + 6);
 
 
 
 
 
 
 
 
1533	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1534}
1535
1536#define SDEBUG_READCAP16_ARR_SZ 32
1537static int resp_readcap16(struct scsi_cmnd * scp,
1538			  struct sdebug_dev_info * devip)
1539{
1540	unsigned char *cmd = scp->cmnd;
1541	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1542	int alloc_len;
 
1543
1544	alloc_len = get_unaligned_be32(cmd + 10);
 
 
 
1545	/* following just in case virtual_gb changed */
1546	sdebug_capacity = get_sdebug_capacity();
1547	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1548	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1549	put_unaligned_be32(sdebug_sector_size, arr + 8);
1550	arr[13] = sdebug_physblk_exp & 0xf;
1551	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
 
 
 
 
 
1552
1553	if (scsi_debug_lbp()) {
1554		arr[14] |= 0x80; /* LBPME */
1555		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1556		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1557		 * in the wider field maps to 0 in this field.
1558		 */
1559		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1560			arr[14] |= 0x40;
1561	}
1562
1563	arr[15] = sdebug_lowest_aligned & 0xff;
1564
1565	if (have_dif_prot) {
1566		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1567		arr[12] |= 1; /* PROT_EN */
1568	}
1569
1570	return fill_from_dev_buffer(scp, arr,
1571				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1572}
1573
1574#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1575
1576static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1577			      struct sdebug_dev_info * devip)
1578{
1579	unsigned char *cmd = scp->cmnd;
1580	unsigned char * arr;
1581	int host_no = devip->sdbg_host->shost->host_no;
1582	int n, ret, alen, rlen;
1583	int port_group_a, port_group_b, port_a, port_b;
1584
1585	alen = get_unaligned_be32(cmd + 6);
 
 
1586	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1587	if (! arr)
1588		return DID_REQUEUE << 16;
1589	/*
1590	 * EVPD page 0x88 states we have two ports, one
1591	 * real and a fake port with no device connected.
1592	 * So we create two port groups with one port each
1593	 * and set the group with port B to unavailable.
1594	 */
1595	port_a = 0x1; /* relative port A */
1596	port_b = 0x2; /* relative port B */
1597	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1598			(devip->channel & 0x7f);
1599	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1600			(devip->channel & 0x7f) + 0x80;
1601
1602	/*
1603	 * The asymmetric access state is cycled according to the host_id.
1604	 */
1605	n = 4;
1606	if (sdebug_vpd_use_hostno == 0) {
1607		arr[n++] = host_no % 3; /* Asymm access state */
1608		arr[n++] = 0x0F; /* claim: all states are supported */
1609	} else {
1610		arr[n++] = 0x0; /* Active/Optimized path */
1611		arr[n++] = 0x01; /* only support active/optimized paths */
1612	}
1613	put_unaligned_be16(port_group_a, arr + n);
1614	n += 2;
1615	arr[n++] = 0;    /* Reserved */
1616	arr[n++] = 0;    /* Status code */
1617	arr[n++] = 0;    /* Vendor unique */
1618	arr[n++] = 0x1;  /* One port per group */
1619	arr[n++] = 0;    /* Reserved */
1620	arr[n++] = 0;    /* Reserved */
1621	put_unaligned_be16(port_a, arr + n);
1622	n += 2;
1623	arr[n++] = 3;    /* Port unavailable */
1624	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1625	put_unaligned_be16(port_group_b, arr + n);
1626	n += 2;
1627	arr[n++] = 0;    /* Reserved */
1628	arr[n++] = 0;    /* Status code */
1629	arr[n++] = 0;    /* Vendor unique */
1630	arr[n++] = 0x1;  /* One port per group */
1631	arr[n++] = 0;    /* Reserved */
1632	arr[n++] = 0;    /* Reserved */
1633	put_unaligned_be16(port_b, arr + n);
1634	n += 2;
1635
1636	rlen = n - 4;
1637	put_unaligned_be32(rlen, arr + 0);
 
 
 
1638
1639	/*
1640	 * Return the smallest value of either
1641	 * - The allocated length
1642	 * - The constructed command length
1643	 * - The maximum array size
1644	 */
1645	rlen = min(alen,n);
1646	ret = fill_from_dev_buffer(scp, arr,
1647				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1648	kfree(arr);
1649	return ret;
1650}
1651
1652static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1653			     struct sdebug_dev_info *devip)
1654{
1655	bool rctd;
1656	u8 reporting_opts, req_opcode, sdeb_i, supp;
1657	u16 req_sa, u;
1658	u32 alloc_len, a_len;
1659	int k, offset, len, errsts, count, bump, na;
1660	const struct opcode_info_t *oip;
1661	const struct opcode_info_t *r_oip;
1662	u8 *arr;
1663	u8 *cmd = scp->cmnd;
1664
1665	rctd = !!(cmd[2] & 0x80);
1666	reporting_opts = cmd[2] & 0x7;
1667	req_opcode = cmd[3];
1668	req_sa = get_unaligned_be16(cmd + 4);
1669	alloc_len = get_unaligned_be32(cmd + 6);
1670	if (alloc_len < 4 || alloc_len > 0xffff) {
1671		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1672		return check_condition_result;
1673	}
1674	if (alloc_len > 8192)
1675		a_len = 8192;
1676	else
1677		a_len = alloc_len;
1678	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1679	if (NULL == arr) {
1680		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1681				INSUFF_RES_ASCQ);
1682		return check_condition_result;
1683	}
1684	switch (reporting_opts) {
1685	case 0:	/* all commands */
1686		/* count number of commands */
1687		for (count = 0, oip = opcode_info_arr;
1688		     oip->num_attached != 0xff; ++oip) {
1689			if (F_INV_OP & oip->flags)
1690				continue;
1691			count += (oip->num_attached + 1);
1692		}
1693		bump = rctd ? 20 : 8;
1694		put_unaligned_be32(count * bump, arr);
1695		for (offset = 4, oip = opcode_info_arr;
1696		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1697			if (F_INV_OP & oip->flags)
1698				continue;
1699			na = oip->num_attached;
1700			arr[offset] = oip->opcode;
1701			put_unaligned_be16(oip->sa, arr + offset + 2);
1702			if (rctd)
1703				arr[offset + 5] |= 0x2;
1704			if (FF_SA & oip->flags)
1705				arr[offset + 5] |= 0x1;
1706			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1707			if (rctd)
1708				put_unaligned_be16(0xa, arr + offset + 8);
1709			r_oip = oip;
1710			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1711				if (F_INV_OP & oip->flags)
1712					continue;
1713				offset += bump;
1714				arr[offset] = oip->opcode;
1715				put_unaligned_be16(oip->sa, arr + offset + 2);
1716				if (rctd)
1717					arr[offset + 5] |= 0x2;
1718				if (FF_SA & oip->flags)
1719					arr[offset + 5] |= 0x1;
1720				put_unaligned_be16(oip->len_mask[0],
1721						   arr + offset + 6);
1722				if (rctd)
1723					put_unaligned_be16(0xa,
1724							   arr + offset + 8);
1725			}
1726			oip = r_oip;
1727			offset += bump;
1728		}
1729		break;
1730	case 1:	/* one command: opcode only */
1731	case 2:	/* one command: opcode plus service action */
1732	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1733		sdeb_i = opcode_ind_arr[req_opcode];
1734		oip = &opcode_info_arr[sdeb_i];
1735		if (F_INV_OP & oip->flags) {
1736			supp = 1;
1737			offset = 4;
1738		} else {
1739			if (1 == reporting_opts) {
1740				if (FF_SA & oip->flags) {
1741					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1742							     2, 2);
1743					kfree(arr);
1744					return check_condition_result;
1745				}
1746				req_sa = 0;
1747			} else if (2 == reporting_opts &&
1748				   0 == (FF_SA & oip->flags)) {
1749				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1750				kfree(arr);	/* point at requested sa */
1751				return check_condition_result;
1752			}
1753			if (0 == (FF_SA & oip->flags) &&
1754			    req_opcode == oip->opcode)
1755				supp = 3;
1756			else if (0 == (FF_SA & oip->flags)) {
1757				na = oip->num_attached;
1758				for (k = 0, oip = oip->arrp; k < na;
1759				     ++k, ++oip) {
1760					if (req_opcode == oip->opcode)
1761						break;
1762				}
1763				supp = (k >= na) ? 1 : 3;
1764			} else if (req_sa != oip->sa) {
1765				na = oip->num_attached;
1766				for (k = 0, oip = oip->arrp; k < na;
1767				     ++k, ++oip) {
1768					if (req_sa == oip->sa)
1769						break;
1770				}
1771				supp = (k >= na) ? 1 : 3;
1772			} else
1773				supp = 3;
1774			if (3 == supp) {
1775				u = oip->len_mask[0];
1776				put_unaligned_be16(u, arr + 2);
1777				arr[4] = oip->opcode;
1778				for (k = 1; k < u; ++k)
1779					arr[4 + k] = (k < 16) ?
1780						 oip->len_mask[k] : 0xff;
1781				offset = 4 + u;
1782			} else
1783				offset = 4;
1784		}
1785		arr[1] = (rctd ? 0x80 : 0) | supp;
1786		if (rctd) {
1787			put_unaligned_be16(0xa, arr + offset);
1788			offset += 12;
1789		}
1790		break;
1791	default:
1792		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1793		kfree(arr);
1794		return check_condition_result;
1795	}
1796	offset = (offset < a_len) ? offset : a_len;
1797	len = (offset < alloc_len) ? offset : alloc_len;
1798	errsts = fill_from_dev_buffer(scp, arr, len);
1799	kfree(arr);
1800	return errsts;
1801}
1802
1803static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1804			  struct sdebug_dev_info *devip)
1805{
1806	bool repd;
1807	u32 alloc_len, len;
1808	u8 arr[16];
1809	u8 *cmd = scp->cmnd;
1810
1811	memset(arr, 0, sizeof(arr));
1812	repd = !!(cmd[2] & 0x80);
1813	alloc_len = get_unaligned_be32(cmd + 6);
1814	if (alloc_len < 4) {
1815		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1816		return check_condition_result;
1817	}
1818	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1819	arr[1] = 0x1;		/* ITNRS */
1820	if (repd) {
1821		arr[3] = 0xc;
1822		len = 16;
1823	} else
1824		len = 4;
1825
1826	len = (len < alloc_len) ? len : alloc_len;
1827	return fill_from_dev_buffer(scp, arr, len);
1828}
1829
1830/* <<Following mode page info copied from ST318451LW>> */
1831
1832static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1833{	/* Read-Write Error Recovery page for mode_sense */
1834	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1835					5, 0, 0xff, 0xff};
1836
1837	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1838	if (1 == pcontrol)
1839		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1840	return sizeof(err_recov_pg);
1841}
1842
1843static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1844{ 	/* Disconnect-Reconnect page for mode_sense */
1845	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1846					 0, 0, 0, 0, 0, 0, 0, 0};
1847
1848	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1849	if (1 == pcontrol)
1850		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1851	return sizeof(disconnect_pg);
1852}
1853
1854static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1855{       /* Format device page for mode_sense */
1856	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1857				     0, 0, 0, 0, 0, 0, 0, 0,
1858				     0, 0, 0, 0, 0x40, 0, 0, 0};
1859
1860	memcpy(p, format_pg, sizeof(format_pg));
1861	put_unaligned_be16(sdebug_sectors_per, p + 10);
1862	put_unaligned_be16(sdebug_sector_size, p + 12);
1863	if (sdebug_removable)
 
 
1864		p[20] |= 0x20; /* should agree with INQUIRY */
1865	if (1 == pcontrol)
1866		memset(p + 2, 0, sizeof(format_pg) - 2);
1867	return sizeof(format_pg);
1868}
1869
1870static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1871				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1872				     0, 0, 0, 0};
1873
1874static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1875{ 	/* Caching page for mode_sense */
1876	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1877		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1878	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1879		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1880
1881	if (SDEBUG_OPT_N_WCE & sdebug_opts)
1882		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1883	memcpy(p, caching_pg, sizeof(caching_pg));
1884	if (1 == pcontrol)
1885		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1886	else if (2 == pcontrol)
1887		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1888	return sizeof(caching_pg);
1889}
1890
1891static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1892				    0, 0, 0x2, 0x4b};
1893
1894static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1895{ 	/* Control mode page for mode_sense */
1896	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1897				        0, 0, 0, 0};
1898	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1899				     0, 0, 0x2, 0x4b};
1900
1901	if (sdebug_dsense)
1902		ctrl_m_pg[2] |= 0x4;
1903	else
1904		ctrl_m_pg[2] &= ~0x4;
1905
1906	if (sdebug_ato)
1907		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1908
1909	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1910	if (1 == pcontrol)
1911		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1912	else if (2 == pcontrol)
1913		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1914	return sizeof(ctrl_m_pg);
1915}
1916
1917
1918static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1919{	/* Informational Exceptions control mode page for mode_sense */
1920	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1921				       0, 0, 0x0, 0x0};
1922	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1923				      0, 0, 0x0, 0x0};
1924
1925	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1926	if (1 == pcontrol)
1927		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1928	else if (2 == pcontrol)
1929		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1930	return sizeof(iec_m_pg);
1931}
1932
1933static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1934{	/* SAS SSP mode page - short format for mode_sense */
1935	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1936		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1937
1938	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1939	if (1 == pcontrol)
1940		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1941	return sizeof(sas_sf_m_pg);
1942}
1943
1944
1945static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1946			      int target_dev_id)
1947{	/* SAS phy control and discover mode page for mode_sense */
1948	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1949		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1950		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1951		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1952		    0x2, 0, 0, 0, 0, 0, 0, 0,
1953		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1954		    0, 0, 0, 0, 0, 0, 0, 0,
1955		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1956		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1957		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1958		    0x3, 0, 0, 0, 0, 0, 0, 0,
1959		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1960		    0, 0, 0, 0, 0, 0, 0, 0,
1961		};
1962	int port_a, port_b;
1963
1964	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
1965	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
1966	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
1967	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
1968	port_a = target_dev_id + 1;
1969	port_b = port_a + 1;
1970	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1971	put_unaligned_be32(port_a, p + 20);
1972	put_unaligned_be32(port_b, p + 48 + 20);
 
 
 
 
 
 
1973	if (1 == pcontrol)
1974		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1975	return sizeof(sas_pcd_m_pg);
1976}
1977
1978static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1979{	/* SAS SSP shared protocol specific port mode subpage */
1980	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1981		    0, 0, 0, 0, 0, 0, 0, 0,
1982		};
1983
1984	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1985	if (1 == pcontrol)
1986		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1987	return sizeof(sas_sha_m_pg);
1988}
1989
1990#define SDEBUG_MAX_MSENSE_SZ 256
1991
1992static int resp_mode_sense(struct scsi_cmnd *scp,
1993			   struct sdebug_dev_info *devip)
1994{
 
1995	int pcontrol, pcode, subpcode, bd_len;
1996	unsigned char dev_spec;
1997	int alloc_len, offset, len, target_dev_id;
1998	int target = scp->device->id;
1999	unsigned char * ap;
2000	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2001	unsigned char *cmd = scp->cmnd;
2002	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2003
2004	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
 
 
2005	pcontrol = (cmd[2] & 0xc0) >> 6;
2006	pcode = cmd[2] & 0x3f;
2007	subpcode = cmd[3];
2008	msense_6 = (MODE_SENSE == cmd[0]);
2009	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2010	is_disk = (sdebug_ptype == TYPE_DISK);
2011	if (is_disk && !dbd)
2012		bd_len = llbaa ? 16 : 8;
2013	else
2014		bd_len = 0;
2015	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2016	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2017	if (0x3 == pcontrol) {  /* Saving values not supported */
2018		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
 
2019		return check_condition_result;
2020	}
2021	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2022			(devip->target * 1000) - 3;
2023	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2024	if (is_disk)
2025		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2026	else
2027		dev_spec = 0x0;
2028	if (msense_6) {
2029		arr[2] = dev_spec;
2030		arr[3] = bd_len;
2031		offset = 4;
2032	} else {
2033		arr[3] = dev_spec;
2034		if (16 == bd_len)
2035			arr[4] = 0x1;	/* set LONGLBA bit */
2036		arr[7] = bd_len;	/* assume 255 or less */
2037		offset = 8;
2038	}
2039	ap = arr + offset;
2040	if ((bd_len > 0) && (!sdebug_capacity))
2041		sdebug_capacity = get_sdebug_capacity();
2042
2043	if (8 == bd_len) {
2044		if (sdebug_capacity > 0xfffffffe)
2045			put_unaligned_be32(0xffffffff, ap + 0);
2046		else
2047			put_unaligned_be32(sdebug_capacity, ap + 0);
2048		put_unaligned_be16(sdebug_sector_size, ap + 6);
 
 
 
 
 
 
 
 
2049		offset += bd_len;
2050		ap = arr + offset;
2051	} else if (16 == bd_len) {
2052		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2053		put_unaligned_be32(sdebug_sector_size, ap + 12);
 
 
 
 
 
 
2054		offset += bd_len;
2055		ap = arr + offset;
2056	}
2057
2058	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2059		/* TODO: Control Extension page */
2060		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 
2061		return check_condition_result;
2062	}
2063	bad_pcode = false;
2064
2065	switch (pcode) {
2066	case 0x1:	/* Read-Write error recovery page, direct access */
2067		len = resp_err_recov_pg(ap, pcontrol, target);
2068		offset += len;
2069		break;
2070	case 0x2:	/* Disconnect-Reconnect page, all devices */
2071		len = resp_disconnect_pg(ap, pcontrol, target);
2072		offset += len;
2073		break;
2074        case 0x3:       /* Format device page, direct access */
2075		if (is_disk) {
2076			len = resp_format_pg(ap, pcontrol, target);
2077			offset += len;
2078		} else
2079			bad_pcode = true;
2080                break;
2081	case 0x8:	/* Caching page, direct access */
2082		if (is_disk) {
2083			len = resp_caching_pg(ap, pcontrol, target);
2084			offset += len;
2085		} else
2086			bad_pcode = true;
2087		break;
2088	case 0xa:	/* Control Mode page, all devices */
2089		len = resp_ctrl_m_pg(ap, pcontrol, target);
2090		offset += len;
2091		break;
2092	case 0x19:	/* if spc==1 then sas phy, control+discover */
2093		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2094			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 
2095			return check_condition_result;
2096	        }
2097		len = 0;
2098		if ((0x0 == subpcode) || (0xff == subpcode))
2099			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2100		if ((0x1 == subpcode) || (0xff == subpcode))
2101			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2102						  target_dev_id);
2103		if ((0x2 == subpcode) || (0xff == subpcode))
2104			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2105		offset += len;
2106		break;
2107	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2108		len = resp_iec_m_pg(ap, pcontrol, target);
2109		offset += len;
2110		break;
2111	case 0x3f:	/* Read all Mode pages */
2112		if ((0 == subpcode) || (0xff == subpcode)) {
2113			len = resp_err_recov_pg(ap, pcontrol, target);
2114			len += resp_disconnect_pg(ap + len, pcontrol, target);
2115			if (is_disk) {
2116				len += resp_format_pg(ap + len, pcontrol,
2117						      target);
2118				len += resp_caching_pg(ap + len, pcontrol,
2119						       target);
2120			}
2121			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2122			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2123			if (0xff == subpcode) {
2124				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2125						  target, target_dev_id);
2126				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2127			}
2128			len += resp_iec_m_pg(ap + len, pcontrol, target);
2129			offset += len;
2130		} else {
2131			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 
2132			return check_condition_result;
2133                }
 
2134		break;
2135	default:
2136		bad_pcode = true;
2137		break;
2138	}
2139	if (bad_pcode) {
2140		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2141		return check_condition_result;
2142	}
2143	if (msense_6)
2144		arr[0] = offset - 1;
2145	else
2146		put_unaligned_be16((offset - 2), arr + 0);
 
 
2147	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2148}
2149
2150#define SDEBUG_MAX_MSELECT_SZ 512
2151
2152static int resp_mode_select(struct scsi_cmnd *scp,
2153			    struct sdebug_dev_info *devip)
2154{
2155	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2156	int param_len, res, mpage;
2157	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2158	unsigned char *cmd = scp->cmnd;
2159	int mselect6 = (MODE_SELECT == cmd[0]);
2160
 
 
2161	memset(arr, 0, sizeof(arr));
2162	pf = cmd[1] & 0x10;
2163	sp = cmd[1] & 0x1;
2164	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2165	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2166		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
 
2167		return check_condition_result;
2168	}
2169        res = fetch_to_dev_buffer(scp, arr, param_len);
2170        if (-1 == res)
2171		return DID_ERROR << 16;
2172	else if (sdebug_verbose && (res < param_len))
2173		sdev_printk(KERN_INFO, scp->device,
2174			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2175			    __func__, param_len, res);
2176	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2177	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2178	if (md_len > 2) {
2179		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
 
2180		return check_condition_result;
2181	}
2182	off = bd_len + (mselect6 ? 4 : 8);
2183	mpage = arr[off] & 0x3f;
2184	ps = !!(arr[off] & 0x80);
2185	if (ps) {
2186		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
 
2187		return check_condition_result;
2188	}
2189	spf = !!(arr[off] & 0x40);
2190	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2191		       (arr[off + 1] + 2);
2192	if ((pg_len + off) > param_len) {
2193		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2194				PARAMETER_LIST_LENGTH_ERR, 0);
2195		return check_condition_result;
2196	}
2197	switch (mpage) {
2198	case 0x8:      /* Caching Mode page */
2199		if (caching_pg[1] == arr[off + 1]) {
2200			memcpy(caching_pg + 2, arr + off + 2,
2201			       sizeof(caching_pg) - 2);
2202			goto set_mode_changed_ua;
2203		}
2204		break;
2205	case 0xa:      /* Control Mode page */
2206		if (ctrl_m_pg[1] == arr[off + 1]) {
2207			memcpy(ctrl_m_pg + 2, arr + off + 2,
2208			       sizeof(ctrl_m_pg) - 2);
2209			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2210			goto set_mode_changed_ua;
2211		}
2212		break;
2213	case 0x1c:      /* Informational Exceptions Mode page */
2214		if (iec_m_pg[1] == arr[off + 1]) {
2215			memcpy(iec_m_pg + 2, arr + off + 2,
2216			       sizeof(iec_m_pg) - 2);
2217			goto set_mode_changed_ua;
2218		}
2219		break;
2220	default:
2221		break;
2222	}
2223	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
 
2224	return check_condition_result;
2225set_mode_changed_ua:
2226	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2227	return 0;
2228}
2229
2230static int resp_temp_l_pg(unsigned char * arr)
2231{
2232	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2233				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2234		};
2235
2236        memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2237        return sizeof(temp_l_pg);
2238}
2239
2240static int resp_ie_l_pg(unsigned char * arr)
2241{
2242	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2243		};
2244
2245        memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2246	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2247		arr[4] = THRESHOLD_EXCEEDED;
2248		arr[5] = 0xff;
2249	}
2250        return sizeof(ie_l_pg);
2251}
2252
2253#define SDEBUG_MAX_LSENSE_SZ 512
2254
2255static int resp_log_sense(struct scsi_cmnd * scp,
2256                          struct sdebug_dev_info * devip)
2257{
2258	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2259	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2260	unsigned char *cmd = scp->cmnd;
2261
 
 
2262	memset(arr, 0, sizeof(arr));
2263	ppc = cmd[1] & 0x2;
2264	sp = cmd[1] & 0x1;
2265	if (ppc || sp) {
2266		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
 
2267		return check_condition_result;
2268	}
2269	pcontrol = (cmd[2] & 0xc0) >> 6;
2270	pcode = cmd[2] & 0x3f;
2271	subpcode = cmd[3] & 0xff;
2272	alloc_len = get_unaligned_be16(cmd + 7);
2273	arr[0] = pcode;
2274	if (0 == subpcode) {
2275		switch (pcode) {
2276		case 0x0:	/* Supported log pages log page */
2277			n = 4;
2278			arr[n++] = 0x0;		/* this page */
2279			arr[n++] = 0xd;		/* Temperature */
2280			arr[n++] = 0x2f;	/* Informational exceptions */
2281			arr[3] = n - 4;
2282			break;
2283		case 0xd:	/* Temperature log page */
2284			arr[3] = resp_temp_l_pg(arr + 4);
2285			break;
2286		case 0x2f:	/* Informational exceptions log page */
2287			arr[3] = resp_ie_l_pg(arr + 4);
2288			break;
2289		default:
2290			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
 
2291			return check_condition_result;
2292		}
2293	} else if (0xff == subpcode) {
2294		arr[0] |= 0x40;
2295		arr[1] = subpcode;
2296		switch (pcode) {
2297		case 0x0:	/* Supported log pages and subpages log page */
2298			n = 4;
2299			arr[n++] = 0x0;
2300			arr[n++] = 0x0;		/* 0,0 page */
2301			arr[n++] = 0x0;
2302			arr[n++] = 0xff;	/* this page */
2303			arr[n++] = 0xd;
2304			arr[n++] = 0x0;		/* Temperature */
2305			arr[n++] = 0x2f;
2306			arr[n++] = 0x0;	/* Informational exceptions */
2307			arr[3] = n - 4;
2308			break;
2309		case 0xd:	/* Temperature subpages */
2310			n = 4;
2311			arr[n++] = 0xd;
2312			arr[n++] = 0x0;		/* Temperature */
2313			arr[3] = n - 4;
2314			break;
2315		case 0x2f:	/* Informational exceptions subpages */
2316			n = 4;
2317			arr[n++] = 0x2f;
2318			arr[n++] = 0x0;		/* Informational exceptions */
2319			arr[3] = n - 4;
2320			break;
2321		default:
2322			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
 
2323			return check_condition_result;
2324		}
2325	} else {
2326		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 
2327		return check_condition_result;
2328	}
2329	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2330	return fill_from_dev_buffer(scp, arr,
2331		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2332}
2333
2334static int check_device_access_params(struct scsi_cmnd *scp,
2335				      unsigned long long lba, unsigned int num)
2336{
2337	if (lba + num > sdebug_capacity) {
2338		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2339		return check_condition_result;
2340	}
2341	/* transfer length excessive (tie in to block limits VPD page) */
2342	if (num > sdebug_store_sectors) {
2343		/* needs work to find which cdb byte 'num' comes from */
2344		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2345		return check_condition_result;
2346	}
2347	return 0;
2348}
2349
2350/* Returns number of bytes copied or -1 if error. */
2351static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2352			    bool do_write)
2353{
2354	int ret;
2355	u64 block, rest = 0;
2356	struct scsi_data_buffer *sdb;
2357	enum dma_data_direction dir;
2358
2359	if (do_write) {
2360		sdb = scsi_out(scmd);
2361		dir = DMA_TO_DEVICE;
2362	} else {
2363		sdb = scsi_in(scmd);
2364		dir = DMA_FROM_DEVICE;
2365	}
2366
2367	if (!sdb->length)
2368		return 0;
2369	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2370		return -1;
2371
2372	block = do_div(lba, sdebug_store_sectors);
2373	if (block + num > sdebug_store_sectors)
2374		rest = block + num - sdebug_store_sectors;
2375
2376	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2377		   fake_storep + (block * sdebug_sector_size),
2378		   (num - rest) * sdebug_sector_size, 0, do_write);
2379	if (ret != (num - rest) * sdebug_sector_size)
2380		return ret;
2381
2382	if (rest) {
2383		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2384			    fake_storep, rest * sdebug_sector_size,
2385			    (num - rest) * sdebug_sector_size, do_write);
2386	}
2387
2388	return ret;
2389}
2390
2391/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2392 * arr into fake_store(lba,num) and return true. If comparison fails then
2393 * return false. */
2394static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2395{
2396	bool res;
2397	u64 block, rest = 0;
2398	u32 store_blks = sdebug_store_sectors;
2399	u32 lb_size = sdebug_sector_size;
2400
2401	block = do_div(lba, store_blks);
2402	if (block + num > store_blks)
2403		rest = block + num - store_blks;
2404
2405	res = !memcmp(fake_storep + (block * lb_size), arr,
2406		      (num - rest) * lb_size);
2407	if (!res)
2408		return res;
2409	if (rest)
2410		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2411			     rest * lb_size);
2412	if (!res)
2413		return res;
2414	arr += num * lb_size;
2415	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2416	if (rest)
2417		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2418		       rest * lb_size);
2419	return res;
2420}
2421
2422static __be16 dif_compute_csum(const void *buf, int len)
2423{
2424	__be16 csum;
 
 
 
 
 
2425
2426	if (sdebug_guard)
2427		csum = (__force __be16)ip_compute_csum(buf, len);
2428	else
2429		csum = cpu_to_be16(crc_t10dif(buf, len));
2430
2431	return csum;
2432}
2433
2434static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2435		      sector_t sector, u32 ei_lba)
2436{
2437	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2438
2439	if (sdt->guard_tag != csum) {
2440		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2441			(unsigned long)sector,
2442			be16_to_cpu(sdt->guard_tag),
2443			be16_to_cpu(csum));
2444		return 0x01;
2445	}
2446	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2447	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2448		pr_err("REF check failed on sector %lu\n",
2449			(unsigned long)sector);
2450		return 0x03;
2451	}
2452	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2453	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2454		pr_err("REF check failed on sector %lu\n",
2455			(unsigned long)sector);
2456		return 0x03;
2457	}
2458	return 0;
2459}
2460
2461static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2462			  unsigned int sectors, bool read)
2463{
2464	size_t resid;
2465	void *paddr;
2466	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2467	struct sg_mapping_iter miter;
2468
2469	/* Bytes of protection data to copy into sgl */
2470	resid = sectors * sizeof(*dif_storep);
2471
2472	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2473			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2474			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2475
2476	while (sg_miter_next(&miter) && resid > 0) {
2477		size_t len = min(miter.length, resid);
2478		void *start = dif_store(sector);
2479		size_t rest = 0;
2480
2481		if (dif_store_end < start + len)
2482			rest = start + len - dif_store_end;
 
 
 
 
 
 
 
 
 
 
 
 
 
2483
2484		paddr = miter.addr;
 
 
 
 
 
 
 
 
2485
2486		if (read)
2487			memcpy(paddr, start, len - rest);
2488		else
2489			memcpy(start, paddr, len - rest);
 
 
 
2490
2491		if (rest) {
2492			if (read)
2493				memcpy(paddr + len - rest, dif_storep, rest);
2494			else
2495				memcpy(dif_storep, paddr + len - rest, rest);
 
2496		}
2497
2498		sector += len / sizeof(*dif_storep);
2499		resid -= len;
2500	}
2501	sg_miter_stop(&miter);
2502}
2503
2504static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2505			    unsigned int sectors, u32 ei_lba)
2506{
2507	unsigned int i;
2508	struct t10_pi_tuple *sdt;
2509	sector_t sector;
2510
2511	for (i = 0; i < sectors; i++, ei_lba++) {
2512		int ret;
2513
2514		sector = start_sec + i;
2515		sdt = dif_store(sector);
2516
2517		if (sdt->app_tag == cpu_to_be16(0xffff))
2518			continue;
2519
2520		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2521		if (ret) {
2522			dif_errors++;
2523			return ret;
 
2524		}
 
 
2525	}
2526
2527	dif_copy_prot(SCpnt, start_sec, sectors, true);
2528	dix_reads++;
2529
2530	return 0;
2531}
2532
2533static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 
 
2534{
2535	u8 *cmd = scp->cmnd;
2536	struct sdebug_queued_cmd *sqcp;
2537	u64 lba;
2538	u32 num;
2539	u32 ei_lba;
2540	unsigned long iflags;
2541	int ret;
2542	bool check_prot;
2543
2544	switch (cmd[0]) {
2545	case READ_16:
2546		ei_lba = 0;
2547		lba = get_unaligned_be64(cmd + 2);
2548		num = get_unaligned_be32(cmd + 10);
2549		check_prot = true;
2550		break;
2551	case READ_10:
2552		ei_lba = 0;
2553		lba = get_unaligned_be32(cmd + 2);
2554		num = get_unaligned_be16(cmd + 7);
2555		check_prot = true;
2556		break;
2557	case READ_6:
2558		ei_lba = 0;
2559		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2560		      (u32)(cmd[1] & 0x1f) << 16;
2561		num = (0 == cmd[4]) ? 256 : cmd[4];
2562		check_prot = true;
2563		break;
2564	case READ_12:
2565		ei_lba = 0;
2566		lba = get_unaligned_be32(cmd + 2);
2567		num = get_unaligned_be32(cmd + 6);
2568		check_prot = true;
2569		break;
2570	case XDWRITEREAD_10:
2571		ei_lba = 0;
2572		lba = get_unaligned_be32(cmd + 2);
2573		num = get_unaligned_be16(cmd + 7);
2574		check_prot = false;
2575		break;
2576	default:	/* assume READ(32) */
2577		lba = get_unaligned_be64(cmd + 12);
2578		ei_lba = get_unaligned_be32(cmd + 20);
2579		num = get_unaligned_be32(cmd + 28);
2580		check_prot = false;
2581		break;
2582	}
2583	if (unlikely(have_dif_prot && check_prot)) {
2584		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2585		    (cmd[1] & 0xe0)) {
2586			mk_sense_invalid_opcode(scp);
2587			return check_condition_result;
2588		}
2589		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2590		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2591		    (cmd[1] & 0xe0) == 0)
2592			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2593				    "to DIF device\n");
2594	}
2595	if (unlikely(sdebug_any_injecting_opt)) {
2596		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2597
2598		if (sqcp) {
2599			if (sqcp->inj_short)
2600				num /= 2;
2601		}
2602	} else
2603		sqcp = NULL;
2604
2605	/* inline check_device_access_params() */
2606	if (unlikely(lba + num > sdebug_capacity)) {
2607		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2608		return check_condition_result;
2609	}
2610	/* transfer length excessive (tie in to block limits VPD page) */
2611	if (unlikely(num > sdebug_store_sectors)) {
2612		/* needs work to find which cdb byte 'num' comes from */
2613		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2614		return check_condition_result;
2615	}
2616
2617	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2618		     (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2619		     ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2620		/* claim unrecoverable read error */
2621		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2622		/* set info field and valid bit for fixed descriptor */
2623		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2624			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2625			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2626			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2627			put_unaligned_be32(ret, scp->sense_buffer + 3);
 
 
 
2628		}
2629		scsi_set_resid(scp, scsi_bufflen(scp));
2630		return check_condition_result;
2631	}
2632
2633	read_lock_irqsave(&atomic_rw, iflags);
2634
2635	/* DIX + T10 DIF */
2636	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2637		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2638
2639		if (prot_ret) {
2640			read_unlock_irqrestore(&atomic_rw, iflags);
2641			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2642			return illegal_condition_result;
2643		}
2644	}
2645
2646	ret = do_device_access(scp, lba, num, false);
 
2647	read_unlock_irqrestore(&atomic_rw, iflags);
2648	if (unlikely(ret == -1))
2649		return DID_ERROR << 16;
2650
2651	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2652
2653	if (unlikely(sqcp)) {
2654		if (sqcp->inj_recovered) {
2655			mk_sense_buffer(scp, RECOVERED_ERROR,
2656					THRESHOLD_EXCEEDED, 0);
2657			return check_condition_result;
2658		} else if (sqcp->inj_transport) {
2659			mk_sense_buffer(scp, ABORTED_COMMAND,
2660					TRANSPORT_PROBLEM, ACK_NAK_TO);
2661			return check_condition_result;
2662		} else if (sqcp->inj_dif) {
2663			/* Logical block guard check failed */
2664			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2665			return illegal_condition_result;
2666		} else if (sqcp->inj_dix) {
2667			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2668			return illegal_condition_result;
2669		}
2670	}
2671	return 0;
2672}
2673
2674static void dump_sector(unsigned char *buf, int len)
2675{
2676	int i, j, n;
 
 
2677
2678	pr_err(">>> Sector Dump <<<\n");
2679	for (i = 0 ; i < len ; i += 16) {
2680		char b[128];
2681
2682		for (j = 0, n = 0; j < 16; j++) {
2683			unsigned char c = buf[i+j];
2684
2685			if (c >= 0x20 && c < 0x7e)
2686				n += scnprintf(b + n, sizeof(b) - n,
2687					       " %c ", buf[i+j]);
2688			else
2689				n += scnprintf(b + n, sizeof(b) - n,
2690					       "%02x ", buf[i+j]);
2691		}
2692		pr_err("%04d: %s\n", i, b);
 
2693	}
2694}
2695
2696static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2697			     unsigned int sectors, u32 ei_lba)
2698{
2699	int ret;
2700	struct t10_pi_tuple *sdt;
2701	void *daddr;
2702	sector_t sector = start_sec;
 
 
 
2703	int ppage_offset;
2704	int dpage_offset;
2705	struct sg_mapping_iter diter;
2706	struct sg_mapping_iter piter;
2707
2708	BUG_ON(scsi_sg_count(SCpnt) == 0);
2709	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2710
2711	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2712			scsi_prot_sg_count(SCpnt),
2713			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2714	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2715			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2716
2717	/* For each protection page */
2718	while (sg_miter_next(&piter)) {
2719		dpage_offset = 0;
2720		if (WARN_ON(!sg_miter_next(&diter))) {
2721			ret = 0x01;
2722			goto out;
2723		}
2724
2725		for (ppage_offset = 0; ppage_offset < piter.length;
2726		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2727			/* If we're at the end of the current
2728			 * data page advance to the next one
2729			 */
2730			if (dpage_offset >= diter.length) {
2731				if (WARN_ON(!sg_miter_next(&diter))) {
2732					ret = 0x01;
2733					goto out;
2734				}
2735				dpage_offset = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2736			}
2737
2738			sdt = piter.addr + ppage_offset;
2739			daddr = diter.addr + dpage_offset;
 
 
 
 
 
 
 
 
 
2740
2741			ret = dif_verify(sdt, daddr, sector, ei_lba);
2742			if (ret) {
2743				dump_sector(daddr, sdebug_sector_size);
 
 
 
 
 
2744				goto out;
2745			}
2746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2747			sector++;
 
 
 
 
 
2748			ei_lba++;
2749			dpage_offset += sdebug_sector_size;
 
2750		}
2751		diter.consumed = dpage_offset;
2752		sg_miter_stop(&diter);
2753	}
2754	sg_miter_stop(&piter);
2755
2756	dif_copy_prot(SCpnt, start_sec, sectors, false);
 
2757	dix_writes++;
2758
2759	return 0;
2760
2761out:
2762	dif_errors++;
2763	sg_miter_stop(&diter);
2764	sg_miter_stop(&piter);
2765	return ret;
2766}
2767
2768static unsigned long lba_to_map_index(sector_t lba)
2769{
2770	if (sdebug_unmap_alignment)
2771		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2772	sector_div(lba, sdebug_unmap_granularity);
2773	return lba;
2774}
2775
2776static sector_t map_index_to_lba(unsigned long index)
2777{
2778	sector_t lba = index * sdebug_unmap_granularity;
 
2779
2780	if (sdebug_unmap_alignment)
2781		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2782	return lba;
2783}
2784
2785static unsigned int map_state(sector_t lba, unsigned int *num)
2786{
2787	sector_t end;
2788	unsigned int mapped;
2789	unsigned long index;
2790	unsigned long next;
2791
2792	index = lba_to_map_index(lba);
2793	mapped = test_bit(index, map_storep);
2794
2795	if (mapped)
2796		next = find_next_zero_bit(map_storep, map_size, index);
2797	else
2798		next = find_next_bit(map_storep, map_size, index);
2799
2800	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2801	*num = end - lba;
 
2802	return mapped;
2803}
2804
2805static void map_region(sector_t lba, unsigned int len)
2806{
 
2807	sector_t end = lba + len;
2808
 
 
 
2809	while (lba < end) {
2810		unsigned long index = lba_to_map_index(lba);
2811
2812		if (index < map_size)
2813			set_bit(index, map_storep);
2814
2815		lba = map_index_to_lba(index + 1);
 
 
 
2816	}
2817}
2818
2819static void unmap_region(sector_t lba, unsigned int len)
2820{
 
2821	sector_t end = lba + len;
2822
 
 
 
2823	while (lba < end) {
2824		unsigned long index = lba_to_map_index(lba);
 
 
 
2825
2826		if (lba == map_index_to_lba(index) &&
2827		    lba + sdebug_unmap_granularity <= end &&
2828		    index < map_size) {
2829			clear_bit(index, map_storep);
2830			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2831				memset(fake_storep +
2832				       lba * sdebug_sector_size,
2833				       (sdebug_lbprz & 1) ? 0 : 0xff,
2834				       sdebug_sector_size *
2835				       sdebug_unmap_granularity);
2836			}
2837			if (dif_storep) {
2838				memset(dif_storep + lba, 0xff,
2839				       sizeof(*dif_storep) *
2840				       sdebug_unmap_granularity);
2841			}
2842		}
2843		lba = map_index_to_lba(index + 1);
2844	}
2845}
2846
2847static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 
 
2848{
2849	u8 *cmd = scp->cmnd;
2850	u64 lba;
2851	u32 num;
2852	u32 ei_lba;
2853	unsigned long iflags;
2854	int ret;
2855	bool check_prot;
2856
2857	switch (cmd[0]) {
2858	case WRITE_16:
2859		ei_lba = 0;
2860		lba = get_unaligned_be64(cmd + 2);
2861		num = get_unaligned_be32(cmd + 10);
2862		check_prot = true;
2863		break;
2864	case WRITE_10:
2865		ei_lba = 0;
2866		lba = get_unaligned_be32(cmd + 2);
2867		num = get_unaligned_be16(cmd + 7);
2868		check_prot = true;
2869		break;
2870	case WRITE_6:
2871		ei_lba = 0;
2872		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2873		      (u32)(cmd[1] & 0x1f) << 16;
2874		num = (0 == cmd[4]) ? 256 : cmd[4];
2875		check_prot = true;
2876		break;
2877	case WRITE_12:
2878		ei_lba = 0;
2879		lba = get_unaligned_be32(cmd + 2);
2880		num = get_unaligned_be32(cmd + 6);
2881		check_prot = true;
2882		break;
2883	case 0x53:	/* XDWRITEREAD(10) */
2884		ei_lba = 0;
2885		lba = get_unaligned_be32(cmd + 2);
2886		num = get_unaligned_be16(cmd + 7);
2887		check_prot = false;
2888		break;
2889	default:	/* assume WRITE(32) */
2890		lba = get_unaligned_be64(cmd + 12);
2891		ei_lba = get_unaligned_be32(cmd + 20);
2892		num = get_unaligned_be32(cmd + 28);
2893		check_prot = false;
2894		break;
2895	}
2896	if (unlikely(have_dif_prot && check_prot)) {
2897		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2898		    (cmd[1] & 0xe0)) {
2899			mk_sense_invalid_opcode(scp);
2900			return check_condition_result;
2901		}
2902		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2903		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2904		    (cmd[1] & 0xe0) == 0)
2905			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2906				    "to DIF device\n");
2907	}
2908
2909	/* inline check_device_access_params() */
2910	if (unlikely(lba + num > sdebug_capacity)) {
2911		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2912		return check_condition_result;
2913	}
2914	/* transfer length excessive (tie in to block limits VPD page) */
2915	if (unlikely(num > sdebug_store_sectors)) {
2916		/* needs work to find which cdb byte 'num' comes from */
2917		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2918		return check_condition_result;
2919	}
2920
2921	write_lock_irqsave(&atomic_rw, iflags);
2922
2923	/* DIX + T10 DIF */
2924	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2925		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2926
2927		if (prot_ret) {
2928			write_unlock_irqrestore(&atomic_rw, iflags);
2929			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2930			return illegal_condition_result;
2931		}
2932	}
2933
2934	ret = do_device_access(scp, lba, num, true);
2935	if (unlikely(scsi_debug_lbp()))
 
2936		map_region(lba, num);
2937	write_unlock_irqrestore(&atomic_rw, iflags);
2938	if (unlikely(-1 == ret))
2939		return DID_ERROR << 16;
2940	else if (unlikely(sdebug_verbose &&
2941			  (ret < (num * sdebug_sector_size))))
2942		sdev_printk(KERN_INFO, scp->device,
2943			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2944			    my_name, num * sdebug_sector_size, ret);
2945
2946	if (unlikely(sdebug_any_injecting_opt)) {
2947		struct sdebug_queued_cmd *sqcp =
2948				(struct sdebug_queued_cmd *)scp->host_scribble;
2949
2950		if (sqcp) {
2951			if (sqcp->inj_recovered) {
2952				mk_sense_buffer(scp, RECOVERED_ERROR,
2953						THRESHOLD_EXCEEDED, 0);
2954				return check_condition_result;
2955			} else if (sqcp->inj_dif) {
2956				/* Logical block guard check failed */
2957				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2958				return illegal_condition_result;
2959			} else if (sqcp->inj_dix) {
2960				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2961				return illegal_condition_result;
2962			}
2963		}
2964	}
2965	return 0;
2966}
2967
2968static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
2969			   u32 ei_lba, bool unmap, bool ndob)
 
2970{
2971	unsigned long iflags;
2972	unsigned long long i;
2973	int ret;
2974	u64 lba_off;
2975
2976	ret = check_device_access_params(scp, lba, num);
2977	if (ret)
2978		return ret;
2979
 
 
 
 
 
 
2980	write_lock_irqsave(&atomic_rw, iflags);
2981
2982	if (unmap && scsi_debug_lbp()) {
2983		unmap_region(lba, num);
2984		goto out;
2985	}
2986
2987	lba_off = lba * sdebug_sector_size;
2988	/* if ndob then zero 1 logical block, else fetch 1 logical block */
2989	if (ndob) {
2990		memset(fake_storep + lba_off, 0, sdebug_sector_size);
2991		ret = 0;
2992	} else
2993		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
2994					  sdebug_sector_size);
2995
2996	if (-1 == ret) {
2997		write_unlock_irqrestore(&atomic_rw, iflags);
2998		return DID_ERROR << 16;
2999	} else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
3000		sdev_printk(KERN_INFO, scp->device,
3001			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3002			    my_name, "write same",
3003			    num * sdebug_sector_size, ret);
3004
3005	/* Copy first sector to remaining blocks */
3006	for (i = 1 ; i < num ; i++)
3007		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3008		       fake_storep + lba_off,
3009		       sdebug_sector_size);
3010
3011	if (scsi_debug_lbp())
3012		map_region(lba, num);
3013out:
3014	write_unlock_irqrestore(&atomic_rw, iflags);
3015
3016	return 0;
3017}
3018
3019static int resp_write_same_10(struct scsi_cmnd *scp,
3020			      struct sdebug_dev_info *devip)
3021{
3022	u8 *cmd = scp->cmnd;
3023	u32 lba;
3024	u16 num;
3025	u32 ei_lba = 0;
3026	bool unmap = false;
3027
3028	if (cmd[1] & 0x8) {
3029		if (sdebug_lbpws10 == 0) {
3030			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3031			return check_condition_result;
3032		} else
3033			unmap = true;
3034	}
3035	lba = get_unaligned_be32(cmd + 2);
3036	num = get_unaligned_be16(cmd + 7);
3037	if (num > sdebug_write_same_length) {
3038		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3039		return check_condition_result;
3040	}
3041	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3042}
3043
3044static int resp_write_same_16(struct scsi_cmnd *scp,
3045			      struct sdebug_dev_info *devip)
3046{
3047	u8 *cmd = scp->cmnd;
3048	u64 lba;
3049	u32 num;
3050	u32 ei_lba = 0;
3051	bool unmap = false;
3052	bool ndob = false;
3053
3054	if (cmd[1] & 0x8) {	/* UNMAP */
3055		if (sdebug_lbpws == 0) {
3056			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3057			return check_condition_result;
3058		} else
3059			unmap = true;
3060	}
3061	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3062		ndob = true;
3063	lba = get_unaligned_be64(cmd + 2);
3064	num = get_unaligned_be32(cmd + 10);
3065	if (num > sdebug_write_same_length) {
3066		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3067		return check_condition_result;
3068	}
3069	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3070}
3071
3072/* Note the mode field is in the same position as the (lower) service action
3073 * field. For the Report supported operation codes command, SPC-4 suggests
3074 * each mode of this command should be reported separately; for future. */
3075static int resp_write_buffer(struct scsi_cmnd *scp,
3076			     struct sdebug_dev_info *devip)
3077{
3078	u8 *cmd = scp->cmnd;
3079	struct scsi_device *sdp = scp->device;
3080	struct sdebug_dev_info *dp;
3081	u8 mode;
3082
3083	mode = cmd[1] & 0x1f;
3084	switch (mode) {
3085	case 0x4:	/* download microcode (MC) and activate (ACT) */
3086		/* set UAs on this device only */
3087		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3088		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3089		break;
3090	case 0x5:	/* download MC, save and ACT */
3091		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3092		break;
3093	case 0x6:	/* download MC with offsets and ACT */
3094		/* set UAs on most devices (LUs) in this target */
3095		list_for_each_entry(dp,
3096				    &devip->sdbg_host->dev_info_list,
3097				    dev_list)
3098			if (dp->target == sdp->id) {
3099				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3100				if (devip != dp)
3101					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3102						dp->uas_bm);
3103			}
3104		break;
3105	case 0x7:	/* download MC with offsets, save, and ACT */
3106		/* set UA on all devices (LUs) in this target */
3107		list_for_each_entry(dp,
3108				    &devip->sdbg_host->dev_info_list,
3109				    dev_list)
3110			if (dp->target == sdp->id)
3111				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3112					dp->uas_bm);
3113		break;
3114	default:
3115		/* do nothing for this command for other mode values */
3116		break;
3117	}
3118	return 0;
3119}
3120
3121static int resp_comp_write(struct scsi_cmnd *scp,
3122			   struct sdebug_dev_info *devip)
3123{
3124	u8 *cmd = scp->cmnd;
3125	u8 *arr;
3126	u8 *fake_storep_hold;
3127	u64 lba;
3128	u32 dnum;
3129	u32 lb_size = sdebug_sector_size;
3130	u8 num;
3131	unsigned long iflags;
3132	int ret;
3133	int retval = 0;
3134
3135	lba = get_unaligned_be64(cmd + 2);
3136	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3137	if (0 == num)
3138		return 0;	/* degenerate case, not an error */
3139	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3140	    (cmd[1] & 0xe0)) {
3141		mk_sense_invalid_opcode(scp);
3142		return check_condition_result;
3143	}
3144	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3145	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3146	    (cmd[1] & 0xe0) == 0)
3147		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3148			    "to DIF device\n");
3149
3150	/* inline check_device_access_params() */
3151	if (lba + num > sdebug_capacity) {
3152		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3153		return check_condition_result;
3154	}
3155	/* transfer length excessive (tie in to block limits VPD page) */
3156	if (num > sdebug_store_sectors) {
3157		/* needs work to find which cdb byte 'num' comes from */
3158		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3159		return check_condition_result;
3160	}
3161	dnum = 2 * num;
3162	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3163	if (NULL == arr) {
3164		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3165				INSUFF_RES_ASCQ);
3166		return check_condition_result;
3167	}
3168
3169	write_lock_irqsave(&atomic_rw, iflags);
3170
3171	/* trick do_device_access() to fetch both compare and write buffers
3172	 * from data-in into arr. Safe (atomic) since write_lock held. */
3173	fake_storep_hold = fake_storep;
3174	fake_storep = arr;
3175	ret = do_device_access(scp, 0, dnum, true);
3176	fake_storep = fake_storep_hold;
3177	if (ret == -1) {
3178		retval = DID_ERROR << 16;
3179		goto cleanup;
3180	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3181		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3182			    "indicated=%u, IO sent=%d bytes\n", my_name,
3183			    dnum * lb_size, ret);
3184	if (!comp_write_worker(lba, num, arr)) {
3185		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3186		retval = check_condition_result;
3187		goto cleanup;
3188	}
3189	if (scsi_debug_lbp())
3190		map_region(lba, num);
3191cleanup:
3192	write_unlock_irqrestore(&atomic_rw, iflags);
3193	kfree(arr);
3194	return retval;
3195}
3196
3197struct unmap_block_desc {
3198	__be64	lba;
3199	__be32	blocks;
3200	__be32	__reserved;
3201};
3202
3203static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3204{
3205	unsigned char *buf;
3206	struct unmap_block_desc *desc;
3207	unsigned int i, payload_len, descriptors;
3208	int ret;
3209	unsigned long iflags;
3210
 
 
 
3211
3212	if (!scsi_debug_lbp())
3213		return 0;	/* fib and say its done */
3214	payload_len = get_unaligned_be16(scp->cmnd + 7);
3215	BUG_ON(scsi_bufflen(scp) != payload_len);
3216
3217	descriptors = (payload_len - 8) / 16;
3218	if (descriptors > sdebug_unmap_max_desc) {
3219		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3220		return check_condition_result;
3221	}
3222
3223	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3224	if (!buf) {
3225		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3226				INSUFF_RES_ASCQ);
3227		return check_condition_result;
3228	}
3229
3230	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3231
3232	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3233	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3234
3235	desc = (void *)&buf[8];
3236
3237	write_lock_irqsave(&atomic_rw, iflags);
3238
3239	for (i = 0 ; i < descriptors ; i++) {
3240		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3241		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3242
3243		ret = check_device_access_params(scp, lba, num);
3244		if (ret)
3245			goto out;
3246
3247		unmap_region(lba, num);
3248	}
3249
3250	ret = 0;
3251
3252out:
3253	write_unlock_irqrestore(&atomic_rw, iflags);
3254	kfree(buf);
3255
3256	return ret;
3257}
3258
3259#define SDEBUG_GET_LBA_STATUS_LEN 32
3260
3261static int resp_get_lba_status(struct scsi_cmnd *scp,
3262			       struct sdebug_dev_info *devip)
3263{
3264	u8 *cmd = scp->cmnd;
3265	u64 lba;
3266	u32 alloc_len, mapped, num;
3267	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3268	int ret;
3269
3270	lba = get_unaligned_be64(cmd + 2);
3271	alloc_len = get_unaligned_be32(cmd + 10);
 
 
 
 
3272
3273	if (alloc_len < 24)
3274		return 0;
3275
3276	ret = check_device_access_params(scp, lba, 1);
3277	if (ret)
3278		return ret;
3279
3280	if (scsi_debug_lbp())
3281		mapped = map_state(lba, &num);
3282	else {
3283		mapped = 1;
3284		/* following just in case virtual_gb changed */
3285		sdebug_capacity = get_sdebug_capacity();
3286		if (sdebug_capacity - lba <= 0xffffffff)
3287			num = sdebug_capacity - lba;
3288		else
3289			num = 0xffffffff;
3290	}
3291
3292	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3293	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3294	put_unaligned_be64(lba, arr + 8);	/* LBA */
3295	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3296	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3297
3298	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3299}
3300
3301#define RL_BUCKET_ELEMS 8
3302
3303/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3304 * (W-LUN), the normal Linux scanning logic does not associate it with a
3305 * device (e.g. /dev/sg7). The following magic will make that association:
3306 *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3307 * where <n> is a host number. If there are multiple targets in a host then
3308 * the above will associate a W-LUN to each target. To only get a W-LUN
3309 * for target 2, then use "echo '- 2 49409' > scan" .
3310 */
3311static int resp_report_luns(struct scsi_cmnd *scp,
3312			    struct sdebug_dev_info *devip)
3313{
3314	unsigned char *cmd = scp->cmnd;
3315	unsigned int alloc_len;
3316	unsigned char select_report;
3317	u64 lun;
3318	struct scsi_lun *lun_p;
3319	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3320	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3321	unsigned int wlun_cnt;	/* report luns W-LUN count */
3322	unsigned int tlun_cnt;	/* total LUN count */
3323	unsigned int rlen;	/* response length (in bytes) */
3324	int k, j, n, res;
3325	unsigned int off_rsp = 0;
3326	const int sz_lun = sizeof(struct scsi_lun);
3327
3328	clear_luns_changed_on_target(devip);
3329
3330	select_report = cmd[2];
3331	alloc_len = get_unaligned_be32(cmd + 6);
3332
3333	if (alloc_len < 4) {
3334		pr_err("alloc len too small %d\n", alloc_len);
3335		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3336		return check_condition_result;
3337	}
3338
3339	switch (select_report) {
3340	case 0:		/* all LUNs apart from W-LUNs */
3341		lun_cnt = sdebug_max_luns;
3342		wlun_cnt = 0;
3343		break;
3344	case 1:		/* only W-LUNs */
3345		lun_cnt = 0;
3346		wlun_cnt = 1;
3347		break;
3348	case 2:		/* all LUNs */
3349		lun_cnt = sdebug_max_luns;
3350		wlun_cnt = 1;
3351		break;
3352	case 0x10:	/* only administrative LUs */
3353	case 0x11:	/* see SPC-5 */
3354	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3355	default:
3356		pr_debug("select report invalid %d\n", select_report);
3357		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3358		return check_condition_result;
3359	}
3360
3361	if (sdebug_no_lun_0 && (lun_cnt > 0))
3362		--lun_cnt;
3363
3364	tlun_cnt = lun_cnt + wlun_cnt;
3365	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3366	scsi_set_resid(scp, scsi_bufflen(scp));
3367	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3368		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3369
3370	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3371	lun = sdebug_no_lun_0 ? 1 : 0;
3372	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3373		memset(arr, 0, sizeof(arr));
3374		lun_p = (struct scsi_lun *)&arr[0];
3375		if (k == 0) {
3376			put_unaligned_be32(rlen, &arr[0]);
3377			++lun_p;
3378			j = 1;
3379		}
3380		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3381			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3382				break;
3383			int_to_scsilun(lun++, lun_p);
3384		}
3385		if (j < RL_BUCKET_ELEMS)
3386			break;
3387		n = j * sz_lun;
3388		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3389		if (res)
3390			return res;
3391		off_rsp += n;
3392	}
3393	if (wlun_cnt) {
3394		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3395		++j;
3396	}
3397	if (j > 0)
3398		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3399	return res;
3400}
3401
3402static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3403			    unsigned int num, struct sdebug_dev_info *devip)
3404{
3405	int j;
3406	unsigned char *kaddr, *buf;
3407	unsigned int offset;
 
3408	struct scsi_data_buffer *sdb = scsi_in(scp);
3409	struct sg_mapping_iter miter;
3410
3411	/* better not to use temporary buffer. */
3412	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3413	if (!buf) {
3414		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3415				INSUFF_RES_ASCQ);
3416		return check_condition_result;
3417	}
3418
3419	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3420
3421	offset = 0;
3422	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3423			SG_MITER_ATOMIC | SG_MITER_TO_SG);
 
 
3424
3425	while (sg_miter_next(&miter)) {
3426		kaddr = miter.addr;
3427		for (j = 0; j < miter.length; j++)
3428			*(kaddr + j) ^= *(buf + offset + j);
3429
3430		offset += miter.length;
 
3431	}
3432	sg_miter_stop(&miter);
 
3433	kfree(buf);
3434
3435	return 0;
3436}
3437
3438static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3439			       struct sdebug_dev_info *devip)
3440{
3441	u8 *cmd = scp->cmnd;
3442	u64 lba;
3443	u32 num;
3444	int errsts;
3445
3446	if (!scsi_bidi_cmnd(scp)) {
3447		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3448				INSUFF_RES_ASCQ);
3449		return check_condition_result;
3450	}
3451	errsts = resp_read_dt0(scp, devip);
3452	if (errsts)
3453		return errsts;
3454	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3455		errsts = resp_write_dt0(scp, devip);
3456		if (errsts)
3457			return errsts;
3458	}
3459	lba = get_unaligned_be32(cmd + 2);
3460	num = get_unaligned_be16(cmd + 7);
3461	return resp_xdwriteread(scp, lba, num, devip);
3462}
3463
3464static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3465{
3466	struct sdebug_queue *sqp = sdebug_q_arr;
3467
3468	if (sdebug_mq_active) {
3469		u32 tag = blk_mq_unique_tag(cmnd->request);
3470		u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3471
3472		if (unlikely(hwq >= submit_queues)) {
3473			pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3474			hwq %= submit_queues;
3475		}
3476		pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3477		return sqp + hwq;
3478	} else
3479		return sqp;
3480}
3481
3482/* Queued (deferred) command completions converge here. */
3483static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3484{
3485	int qc_idx;
3486	int retiring = 0;
3487	unsigned long iflags;
3488	struct sdebug_queue *sqp;
3489	struct sdebug_queued_cmd *sqcp;
3490	struct scsi_cmnd *scp;
3491	struct sdebug_dev_info *devip;
3492
3493	qc_idx = sd_dp->qc_idx;
3494	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3495	if (sdebug_statistics) {
3496		atomic_inc(&sdebug_completions);
3497		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3498			atomic_inc(&sdebug_miss_cpus);
3499	}
3500	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3501		pr_err("wild qc_idx=%d\n", qc_idx);
3502		return;
3503	}
3504	spin_lock_irqsave(&sqp->qc_lock, iflags);
3505	sqcp = &sqp->qc_arr[qc_idx];
3506	scp = sqcp->a_cmnd;
3507	if (unlikely(scp == NULL)) {
3508		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3509		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3510		       sd_dp->sqa_idx, qc_idx);
3511		return;
3512	}
3513	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3514	if (likely(devip))
3515		atomic_dec(&devip->num_in_q);
3516	else
3517		pr_err("devip=NULL\n");
3518	if (unlikely(atomic_read(&retired_max_queue) > 0))
3519		retiring = 1;
3520
3521	sqcp->a_cmnd = NULL;
3522	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3523		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3524		pr_err("Unexpected completion\n");
3525		return;
3526	}
3527
3528	if (unlikely(retiring)) {	/* user has reduced max_queue */
3529		int k, retval;
3530
3531		retval = atomic_read(&retired_max_queue);
3532		if (qc_idx >= retval) {
3533			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3534			pr_err("index %d too large\n", retval);
3535			return;
3536		}
3537		k = find_last_bit(sqp->in_use_bm, retval);
3538		if ((k < sdebug_max_queue) || (k == retval))
3539			atomic_set(&retired_max_queue, 0);
3540		else
3541			atomic_set(&retired_max_queue, k + 1);
3542	}
3543	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3544	scp->scsi_done(scp); /* callback to mid level */
3545}
3546
3547/* When high resolution timer goes off this function is called. */
3548static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3549{
3550	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3551						  hrt);
3552	sdebug_q_cmd_complete(sd_dp);
3553	return HRTIMER_NORESTART;
3554}
3555
3556/* When work queue schedules work, it calls this function. */
3557static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3558{
3559	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3560						  ew.work);
3561	sdebug_q_cmd_complete(sd_dp);
3562}
3563
3564static bool got_shared_uuid;
3565static uuid_be shared_uuid;
3566
3567static struct sdebug_dev_info *sdebug_device_create(
3568			struct sdebug_host_info *sdbg_host, gfp_t flags)
3569{
3570	struct sdebug_dev_info *devip;
3571
3572	devip = kzalloc(sizeof(*devip), flags);
3573	if (devip) {
3574		if (sdebug_uuid_ctl == 1)
3575			uuid_be_gen(&devip->lu_name);
3576		else if (sdebug_uuid_ctl == 2) {
3577			if (got_shared_uuid)
3578				devip->lu_name = shared_uuid;
3579			else {
3580				uuid_be_gen(&shared_uuid);
3581				got_shared_uuid = true;
3582				devip->lu_name = shared_uuid;
3583			}
3584		}
3585		devip->sdbg_host = sdbg_host;
3586		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3587	}
3588	return devip;
3589}
3590
3591static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3592{
3593	struct sdebug_host_info *sdbg_host;
3594	struct sdebug_dev_info *open_devip = NULL;
3595	struct sdebug_dev_info *devip;
 
3596
 
 
3597	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3598	if (!sdbg_host) {
3599		pr_err("Host info NULL\n");
3600		return NULL;
3601        }
3602	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3603		if ((devip->used) && (devip->channel == sdev->channel) &&
3604                    (devip->target == sdev->id) &&
3605                    (devip->lun == sdev->lun))
3606                        return devip;
3607		else {
3608			if ((!devip->used) && (!open_devip))
3609				open_devip = devip;
3610		}
3611	}
3612	if (!open_devip) { /* try and make a new one */
3613		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3614		if (!open_devip) {
3615			pr_err("out of memory at line %d\n", __LINE__);
 
3616			return NULL;
3617		}
3618	}
3619
3620	open_devip->channel = sdev->channel;
3621	open_devip->target = sdev->id;
3622	open_devip->lun = sdev->lun;
3623	open_devip->sdbg_host = sdbg_host;
3624	atomic_set(&open_devip->num_in_q, 0);
3625	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3626	open_devip->used = true;
 
 
 
 
 
 
 
 
 
3627	return open_devip;
3628}
3629
3630static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3631{
3632	if (sdebug_verbose)
3633		pr_info("slave_alloc <%u %u %u %llu>\n",
3634		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3635	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3636	return 0;
3637}
3638
3639static int scsi_debug_slave_configure(struct scsi_device *sdp)
3640{
3641	struct sdebug_dev_info *devip =
3642			(struct sdebug_dev_info *)sdp->hostdata;
3643
3644	if (sdebug_verbose)
3645		pr_info("slave_configure <%u %u %u %llu>\n",
3646		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3647	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3648		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3649	if (devip == NULL) {
3650		devip = find_build_dev_info(sdp);
3651		if (devip == NULL)
3652			return 1;  /* no resources, will be marked offline */
3653	}
3654	sdp->hostdata = devip;
3655	blk_queue_max_segment_size(sdp->request_queue, -1U);
3656	if (sdebug_no_uld)
 
 
 
3657		sdp->no_uld_attach = 1;
3658	return 0;
3659}
3660
3661static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3662{
3663	struct sdebug_dev_info *devip =
3664		(struct sdebug_dev_info *)sdp->hostdata;
3665
3666	if (sdebug_verbose)
3667		pr_info("slave_destroy <%u %u %u %llu>\n",
3668		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3669	if (devip) {
3670		/* make this slot available for re-use */
3671		devip->used = false;
3672		sdp->hostdata = NULL;
3673	}
3674}
3675
3676static void stop_qc_helper(struct sdebug_defer *sd_dp)
3677{
3678	if (!sd_dp)
3679		return;
3680	if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
3681		hrtimer_cancel(&sd_dp->hrt);
3682	else if (sdebug_jdelay < 0)
3683		cancel_work_sync(&sd_dp->ew.work);
3684}
3685
3686/* If @cmnd found deletes its timer or work queue and returns true; else
3687   returns false */
3688static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3689{
3690	unsigned long iflags;
3691	int j, k, qmax, r_qmax;
3692	struct sdebug_queue *sqp;
3693	struct sdebug_queued_cmd *sqcp;
3694	struct sdebug_dev_info *devip;
3695	struct sdebug_defer *sd_dp;
3696
3697	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3698		spin_lock_irqsave(&sqp->qc_lock, iflags);
3699		qmax = sdebug_max_queue;
3700		r_qmax = atomic_read(&retired_max_queue);
3701		if (r_qmax > qmax)
3702			qmax = r_qmax;
3703		for (k = 0; k < qmax; ++k) {
3704			if (test_bit(k, sqp->in_use_bm)) {
3705				sqcp = &sqp->qc_arr[k];
3706				if (cmnd != sqcp->a_cmnd)
3707					continue;
3708				/* found */
3709				devip = (struct sdebug_dev_info *)
3710						cmnd->device->hostdata;
3711				if (devip)
3712					atomic_dec(&devip->num_in_q);
3713				sqcp->a_cmnd = NULL;
3714				sd_dp = sqcp->sd_dp;
3715				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3716				stop_qc_helper(sd_dp);
3717				clear_bit(k, sqp->in_use_bm);
3718				return true;
3719			}
3720		}
3721		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3722	}
3723	return false;
 
3724}
3725
3726/* Deletes (stops) timers or work queues of all queued commands */
3727static void stop_all_queued(void)
3728{
3729	unsigned long iflags;
3730	int j, k;
3731	struct sdebug_queue *sqp;
3732	struct sdebug_queued_cmd *sqcp;
3733	struct sdebug_dev_info *devip;
3734	struct sdebug_defer *sd_dp;
3735
3736	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3737		spin_lock_irqsave(&sqp->qc_lock, iflags);
3738		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3739			if (test_bit(k, sqp->in_use_bm)) {
3740				sqcp = &sqp->qc_arr[k];
3741				if (sqcp->a_cmnd == NULL)
3742					continue;
3743				devip = (struct sdebug_dev_info *)
3744					sqcp->a_cmnd->device->hostdata;
3745				if (devip)
3746					atomic_dec(&devip->num_in_q);
3747				sqcp->a_cmnd = NULL;
3748				sd_dp = sqcp->sd_dp;
3749				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3750				stop_qc_helper(sd_dp);
3751				clear_bit(k, sqp->in_use_bm);
3752				spin_lock_irqsave(&sqp->qc_lock, iflags);
3753			}
3754		}
3755		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3756	}
 
3757}
3758
3759/* Free queued command memory on heap */
3760static void free_all_queued(void)
3761{
3762	int j, k;
3763	struct sdebug_queue *sqp;
3764	struct sdebug_queued_cmd *sqcp;
3765
3766	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3767		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3768			sqcp = &sqp->qc_arr[k];
3769			kfree(sqcp->sd_dp);
3770			sqcp->sd_dp = NULL;
3771		}
3772	}
3773}
3774
3775static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
 
3776{
3777	bool ok;
 
3778
3779	++num_aborts;
3780	if (SCpnt) {
3781		ok = stop_queued_cmnd(SCpnt);
3782		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3783			sdev_printk(KERN_INFO, SCpnt->device,
3784				    "%s: command%s found\n", __func__,
3785				    ok ? "" : " not");
 
 
3786	}
3787	return SUCCESS;
 
 
 
3788}
3789
3790static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3791{
 
 
 
 
3792	++num_dev_resets;
3793	if (SCpnt && SCpnt->device) {
3794		struct scsi_device *sdp = SCpnt->device;
3795		struct sdebug_dev_info *devip =
3796				(struct sdebug_dev_info *)sdp->hostdata;
3797
3798		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3799			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3800		if (devip)
3801			set_bit(SDEBUG_UA_POR, devip->uas_bm);
3802	}
3803	return SUCCESS;
3804}
3805
3806static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3807{
3808	struct sdebug_host_info *sdbg_host;
3809	struct sdebug_dev_info *devip;
3810	struct scsi_device *sdp;
3811	struct Scsi_Host *hp;
3812	int k = 0;
3813
3814	++num_target_resets;
3815	if (!SCpnt)
3816		goto lie;
3817	sdp = SCpnt->device;
3818	if (!sdp)
3819		goto lie;
3820	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3821		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3822	hp = sdp->host;
3823	if (!hp)
3824		goto lie;
3825	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3826	if (sdbg_host) {
3827		list_for_each_entry(devip,
3828				    &sdbg_host->dev_info_list,
3829				    dev_list)
3830			if (devip->target == sdp->id) {
3831				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3832				++k;
3833			}
3834	}
3835	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3836		sdev_printk(KERN_INFO, sdp,
3837			    "%s: %d device(s) found in target\n", __func__, k);
3838lie:
3839	return SUCCESS;
3840}
3841
3842static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3843{
3844	struct sdebug_host_info *sdbg_host;
3845	struct sdebug_dev_info *devip;
3846        struct scsi_device * sdp;
3847        struct Scsi_Host * hp;
3848	int k = 0;
3849
 
 
3850	++num_bus_resets;
3851	if (!(SCpnt && SCpnt->device))
3852		goto lie;
3853	sdp = SCpnt->device;
3854	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3855		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3856	hp = sdp->host;
3857	if (hp) {
3858		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3859		if (sdbg_host) {
3860			list_for_each_entry(devip,
3861                                            &sdbg_host->dev_info_list,
3862					    dev_list) {
3863				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3864				++k;
3865			}
3866		}
3867	}
3868	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3869		sdev_printk(KERN_INFO, sdp,
3870			    "%s: %d device(s) found in host\n", __func__, k);
3871lie:
3872	return SUCCESS;
3873}
3874
3875static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3876{
3877	struct sdebug_host_info * sdbg_host;
3878	struct sdebug_dev_info *devip;
3879	int k = 0;
3880
 
 
3881	++num_host_resets;
3882	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3883		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3884        spin_lock(&sdebug_host_list_lock);
3885        list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3886		list_for_each_entry(devip, &sdbg_host->dev_info_list,
3887				    dev_list) {
3888			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3889			++k;
3890		}
3891        }
3892        spin_unlock(&sdebug_host_list_lock);
3893	stop_all_queued();
3894	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3895		sdev_printk(KERN_INFO, SCpnt->device,
3896			    "%s: %d device(s) found\n", __func__, k);
3897	return SUCCESS;
3898}
3899
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3900static void __init sdebug_build_parts(unsigned char *ramp,
3901				      unsigned long store_size)
3902{
3903	struct partition * pp;
3904	int starts[SDEBUG_MAX_PARTS + 2];
3905	int sectors_per_part, num_sectors, k;
3906	int heads_by_sects, start_sec, end_sec;
3907
3908	/* assume partition table already zeroed */
3909	if ((sdebug_num_parts < 1) || (store_size < 1048576))
3910		return;
3911	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3912		sdebug_num_parts = SDEBUG_MAX_PARTS;
3913		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
 
3914	}
3915	num_sectors = (int)sdebug_store_sectors;
3916	sectors_per_part = (num_sectors - sdebug_sectors_per)
3917			   / sdebug_num_parts;
3918	heads_by_sects = sdebug_heads * sdebug_sectors_per;
3919        starts[0] = sdebug_sectors_per;
3920	for (k = 1; k < sdebug_num_parts; ++k)
3921		starts[k] = ((k * sectors_per_part) / heads_by_sects)
3922			    * heads_by_sects;
3923	starts[sdebug_num_parts] = num_sectors;
3924	starts[sdebug_num_parts + 1] = 0;
3925
3926	ramp[510] = 0x55;	/* magic partition markings */
3927	ramp[511] = 0xAA;
3928	pp = (struct partition *)(ramp + 0x1be);
3929	for (k = 0; starts[k + 1]; ++k, ++pp) {
3930		start_sec = starts[k];
3931		end_sec = starts[k + 1] - 1;
3932		pp->boot_ind = 0;
3933
3934		pp->cyl = start_sec / heads_by_sects;
3935		pp->head = (start_sec - (pp->cyl * heads_by_sects))
3936			   / sdebug_sectors_per;
3937		pp->sector = (start_sec % sdebug_sectors_per) + 1;
3938
3939		pp->end_cyl = end_sec / heads_by_sects;
3940		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3941			       / sdebug_sectors_per;
3942		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3943
3944		pp->start_sect = cpu_to_le32(start_sec);
3945		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3946		pp->sys_ind = 0x83;	/* plain Linux partition */
3947	}
3948}
3949
3950static void block_unblock_all_queues(bool block)
 
 
3951{
3952	int j;
3953	struct sdebug_queue *sqp;
 
3954
3955	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
3956		atomic_set(&sqp->blocked, (int)block);
3957}
3958
3959/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
3960 * commands will be processed normally before triggers occur.
3961 */
3962static void tweak_cmnd_count(void)
3963{
3964	int count, modulo;
3965
3966	modulo = abs(sdebug_every_nth);
3967	if (modulo < 2)
3968		return;
3969	block_unblock_all_queues(true);
3970	count = atomic_read(&sdebug_cmnd_count);
3971	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
3972	block_unblock_all_queues(false);
3973}
3974
3975static void clear_queue_stats(void)
3976{
3977	atomic_set(&sdebug_cmnd_count, 0);
3978	atomic_set(&sdebug_completions, 0);
3979	atomic_set(&sdebug_miss_cpus, 0);
3980	atomic_set(&sdebug_a_tsf, 0);
3981}
3982
3983static void setup_inject(struct sdebug_queue *sqp,
3984			 struct sdebug_queued_cmd *sqcp)
3985{
3986	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
3987		return;
3988	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
3989	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
3990	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
3991	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
3992	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
3993}
3994
3995/* Complete the processing of the thread that queued a SCSI command to this
3996 * driver. It either completes the command by calling cmnd_done() or
3997 * schedules a hr timer or work queue then returns 0. Returns
3998 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
3999 */
4000static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4001			 int scsi_result, int delta_jiff)
4002{
4003	unsigned long iflags;
4004	int k, num_in_q, qdepth, inject;
4005	struct sdebug_queue *sqp;
4006	struct sdebug_queued_cmd *sqcp;
4007	struct scsi_device *sdp;
4008	struct sdebug_defer *sd_dp;
4009
4010	if (unlikely(devip == NULL)) {
4011		if (scsi_result == 0)
4012			scsi_result = DID_NO_CONNECT << 16;
4013		goto respond_in_thread;
4014	}
4015	sdp = cmnd->device;
4016
4017	if (unlikely(sdebug_verbose && scsi_result))
4018		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4019			    __func__, scsi_result);
4020	if (delta_jiff == 0)
4021		goto respond_in_thread;
4022
4023	/* schedule the response at a later time if resources permit */
4024	sqp = get_queue(cmnd);
4025	spin_lock_irqsave(&sqp->qc_lock, iflags);
4026	if (unlikely(atomic_read(&sqp->blocked))) {
4027		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4028		return SCSI_MLQUEUE_HOST_BUSY;
4029	}
4030	num_in_q = atomic_read(&devip->num_in_q);
4031	qdepth = cmnd->device->queue_depth;
4032	inject = 0;
4033	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4034		if (scsi_result) {
4035			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4036			goto respond_in_thread;
4037		} else
4038			scsi_result = device_qfull_result;
4039	} else if (unlikely(sdebug_every_nth &&
4040			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4041			    (scsi_result == 0))) {
4042		if ((num_in_q == (qdepth - 1)) &&
4043		    (atomic_inc_return(&sdebug_a_tsf) >=
4044		     abs(sdebug_every_nth))) {
4045			atomic_set(&sdebug_a_tsf, 0);
4046			inject = 1;
4047			scsi_result = device_qfull_result;
4048		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4049	}
4050
4051	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4052	if (unlikely(k >= sdebug_max_queue)) {
4053		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4054		if (scsi_result)
4055			goto respond_in_thread;
4056		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4057			scsi_result = device_qfull_result;
4058		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4059			sdev_printk(KERN_INFO, sdp,
4060				    "%s: max_queue=%d exceeded, %s\n",
4061				    __func__, sdebug_max_queue,
4062				    (scsi_result ?  "status: TASK SET FULL" :
4063						    "report: host busy"));
4064		if (scsi_result)
4065			goto respond_in_thread;
4066		else
4067			return SCSI_MLQUEUE_HOST_BUSY;
4068	}
4069	__set_bit(k, sqp->in_use_bm);
4070	atomic_inc(&devip->num_in_q);
4071	sqcp = &sqp->qc_arr[k];
4072	sqcp->a_cmnd = cmnd;
4073	cmnd->host_scribble = (unsigned char *)sqcp;
4074	cmnd->result = scsi_result;
4075	sd_dp = sqcp->sd_dp;
4076	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4077	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4078		setup_inject(sqp, sqcp);
4079	if (delta_jiff > 0 || sdebug_ndelay > 0) {
4080		ktime_t kt;
4081
4082		if (delta_jiff > 0) {
4083			struct timespec ts;
4084
4085			jiffies_to_timespec(delta_jiff, &ts);
4086			kt = ktime_set(ts.tv_sec, ts.tv_nsec);
4087		} else
4088			kt = sdebug_ndelay;
4089		if (NULL == sd_dp) {
4090			sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4091			if (NULL == sd_dp)
4092				return SCSI_MLQUEUE_HOST_BUSY;
4093			sqcp->sd_dp = sd_dp;
4094			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4095				     HRTIMER_MODE_REL_PINNED);
4096			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4097			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4098			sd_dp->qc_idx = k;
4099		}
4100		if (sdebug_statistics)
4101			sd_dp->issuing_cpu = raw_smp_processor_id();
4102		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4103	} else {	/* jdelay < 0, use work queue */
4104		if (NULL == sd_dp) {
4105			sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
4106			if (NULL == sd_dp)
4107				return SCSI_MLQUEUE_HOST_BUSY;
4108			sqcp->sd_dp = sd_dp;
4109			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4110			sd_dp->qc_idx = k;
4111			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4112		}
4113		if (sdebug_statistics)
4114			sd_dp->issuing_cpu = raw_smp_processor_id();
4115		schedule_work(&sd_dp->ew.work);
4116	}
4117	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4118		     (scsi_result == device_qfull_result)))
4119		sdev_printk(KERN_INFO, sdp,
4120			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4121			    num_in_q, (inject ? "<inject> " : ""),
4122			    "status: TASK SET FULL");
4123	return 0;
4124
4125respond_in_thread:	/* call back to mid-layer using invocation thread */
4126	cmnd->result = scsi_result;
4127	cmnd->scsi_done(cmnd);
4128	return 0;
4129}
4130
4131/* Note: The following macros create attribute files in the
4132   /sys/module/scsi_debug/parameters directory. Unfortunately this
4133   driver is unaware of a change and cannot trigger auxiliary actions
4134   as it can when the corresponding attribute in the
4135   /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4136 */
4137module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4138module_param_named(ato, sdebug_ato, int, S_IRUGO);
4139module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4140module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4141module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4142module_param_named(dif, sdebug_dif, int, S_IRUGO);
4143module_param_named(dix, sdebug_dix, int, S_IRUGO);
4144module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4145module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4146module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4147module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4148module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4149module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4150module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4151module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4152module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4153module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4154module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4155module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4156module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4157module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4158module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4159module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4160module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4161module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4162module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4163module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4164module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4165module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4166module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4167module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4168module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4169module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4170module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4171module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4172module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4173module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4174module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4175module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4176module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4177module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4178		   S_IRUGO | S_IWUSR);
4179module_param_named(write_same_length, sdebug_write_same_length, int,
4180		   S_IRUGO | S_IWUSR);
4181
4182MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4183MODULE_DESCRIPTION("SCSI debug adapter driver");
4184MODULE_LICENSE("GPL");
4185MODULE_VERSION(SDEBUG_VERSION);
4186
4187MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4188MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4189MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4190MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4191MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4192MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4193MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4194MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4195MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4196MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4197MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4198MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4199MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4200MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4201MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4202MODULE_PARM_DESC(lbprz,
4203	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4204MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4205MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4206MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4207MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4208MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4209MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4210MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4211MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4212MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4213MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4214MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4215MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4216MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4217MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4218MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4219MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4220MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4221MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4222MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4223MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4224MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4225MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4226MODULE_PARM_DESC(uuid_ctl,
4227		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4228MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4229MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4230MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4231
4232#define SDEBUG_INFO_LEN 256
4233static char sdebug_info[SDEBUG_INFO_LEN];
4234
4235static const char * scsi_debug_info(struct Scsi_Host * shp)
4236{
4237	int k;
4238
4239	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4240		      my_name, SDEBUG_VERSION, sdebug_version_date);
4241	if (k >= (SDEBUG_INFO_LEN - 1))
4242		return sdebug_info;
4243	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4244		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4245		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4246		  "statistics", (int)sdebug_statistics);
4247	return sdebug_info;
4248}
4249
4250/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4251static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4252				 int length)
4253{
4254	char arr[16];
4255	int opts;
4256	int minLen = length > 15 ? 15 : length;
4257
4258	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4259		return -EACCES;
4260	memcpy(arr, buffer, minLen);
4261	arr[minLen] = '\0';
4262	if (1 != sscanf(arr, "%d", &opts))
4263		return -EINVAL;
4264	sdebug_opts = opts;
4265	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4266	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4267	if (sdebug_every_nth != 0)
4268		tweak_cmnd_count();
4269	return length;
4270}
4271
4272/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4273 * same for each scsi_debug host (if more than one). Some of the counters
4274 * output are not atomics so might be inaccurate in a busy system. */
4275static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4276{
4277	int f, j, l;
4278	struct sdebug_queue *sqp;
4279
4280	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4281		   SDEBUG_VERSION, sdebug_version_date);
4282	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4283		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4284		   sdebug_opts, sdebug_every_nth);
4285	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4286		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4287		   sdebug_sector_size, "bytes");
4288	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4289		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4290		   num_aborts);
4291	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4292		   num_dev_resets, num_target_resets, num_bus_resets,
4293		   num_host_resets);
4294	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4295		   dix_reads, dix_writes, dif_errors);
4296	seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4297		   TICK_NSEC / 1000, "statistics", sdebug_statistics,
4298		   sdebug_mq_active);
4299	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4300		   atomic_read(&sdebug_cmnd_count),
4301		   atomic_read(&sdebug_completions),
4302		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4303		   atomic_read(&sdebug_a_tsf));
4304
4305	seq_printf(m, "submit_queues=%d\n", submit_queues);
4306	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4307		seq_printf(m, "  queue %d:\n", j);
4308		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4309		if (f != sdebug_max_queue) {
4310			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4311			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4312				   "first,last bits", f, l);
4313		}
4314	}
4315	return 0;
4316}
4317
4318static ssize_t delay_show(struct device_driver *ddp, char *buf)
4319{
4320	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4321}
4322/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4323 * of delay is jiffies.
4324 */
4325static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4326			   size_t count)
4327{
4328	int jdelay, res;
 
 
 
4329
4330	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4331		res = count;
4332		if (sdebug_jdelay != jdelay) {
4333			int j, k;
4334			struct sdebug_queue *sqp;
4335
4336			block_unblock_all_queues(true);
4337			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4338			     ++j, ++sqp) {
4339				k = find_first_bit(sqp->in_use_bm,
4340						   sdebug_max_queue);
4341				if (k != sdebug_max_queue) {
4342					res = -EBUSY;   /* queued commands */
4343					break;
4344				}
4345			}
4346			if (res > 0) {
4347				/* make sure sdebug_defer instances get
4348				 * re-allocated for new delay variant */
4349				free_all_queued();
4350				sdebug_jdelay = jdelay;
4351				sdebug_ndelay = 0;
4352			}
4353			block_unblock_all_queues(false);
4354		}
4355		return res;
 
 
 
 
 
 
 
 
4356	}
4357	return -EINVAL;
 
 
 
 
4358}
4359static DRIVER_ATTR_RW(delay);
4360
4361static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4362{
4363	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4364}
4365/* Returns -EBUSY if ndelay is being changed and commands are queued */
4366/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4367static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4368			    size_t count)
4369{
4370	int ndelay, res;
4371
4372	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4373	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4374		res = count;
4375		if (sdebug_ndelay != ndelay) {
4376			int j, k;
4377			struct sdebug_queue *sqp;
4378
4379			block_unblock_all_queues(true);
4380			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4381			     ++j, ++sqp) {
4382				k = find_first_bit(sqp->in_use_bm,
4383						   sdebug_max_queue);
4384				if (k != sdebug_max_queue) {
4385					res = -EBUSY;   /* queued commands */
4386					break;
4387				}
4388			}
4389			if (res > 0) {
4390				/* make sure sdebug_defer instances get
4391				 * re-allocated for new delay variant */
4392				free_all_queued();
4393				sdebug_ndelay = ndelay;
4394				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4395							: DEF_JDELAY;
4396			}
4397			block_unblock_all_queues(false);
4398		}
4399		return res;
4400	}
4401	return -EINVAL;
4402}
4403static DRIVER_ATTR_RW(ndelay);
 
4404
4405static ssize_t opts_show(struct device_driver *ddp, char *buf)
4406{
4407	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4408}
4409
4410static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4411			  size_t count)
4412{
4413        int opts;
4414	char work[20];
4415
4416        if (1 == sscanf(buf, "%10s", work)) {
4417		if (0 == strncasecmp(work,"0x", 2)) {
4418			if (1 == sscanf(&work[2], "%x", &opts))
4419				goto opts_done;
4420		} else {
4421			if (1 == sscanf(work, "%d", &opts))
4422				goto opts_done;
4423		}
4424	}
4425	return -EINVAL;
4426opts_done:
4427	sdebug_opts = opts;
4428	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4429	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4430	tweak_cmnd_count();
4431	return count;
4432}
4433static DRIVER_ATTR_RW(opts);
 
4434
4435static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4436{
4437	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4438}
4439static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4440			   size_t count)
4441{
4442        int n;
4443
4444	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4445		sdebug_ptype = n;
4446		return count;
4447	}
4448	return -EINVAL;
4449}
4450static DRIVER_ATTR_RW(ptype);
4451
4452static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4453{
4454	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4455}
4456static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4457			    size_t count)
4458{
4459        int n;
4460
4461	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4462		sdebug_dsense = n;
4463		return count;
4464	}
4465	return -EINVAL;
4466}
4467static DRIVER_ATTR_RW(dsense);
 
4468
4469static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4470{
4471	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4472}
4473static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4474			     size_t count)
4475{
4476        int n;
4477
4478	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4479		n = (n > 0);
4480		sdebug_fake_rw = (sdebug_fake_rw > 0);
4481		if (sdebug_fake_rw != n) {
4482			if ((0 == n) && (NULL == fake_storep)) {
4483				unsigned long sz =
4484					(unsigned long)sdebug_dev_size_mb *
4485					1048576;
4486
4487				fake_storep = vmalloc(sz);
4488				if (NULL == fake_storep) {
4489					pr_err("out of memory, 9\n");
4490					return -ENOMEM;
4491				}
4492				memset(fake_storep, 0, sz);
4493			}
4494			sdebug_fake_rw = n;
4495		}
4496		return count;
4497	}
4498	return -EINVAL;
4499}
4500static DRIVER_ATTR_RW(fake_rw);
 
4501
4502static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4503{
4504	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4505}
4506static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4507			      size_t count)
4508{
4509        int n;
4510
4511	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4512		sdebug_no_lun_0 = n;
4513		return count;
4514	}
4515	return -EINVAL;
4516}
4517static DRIVER_ATTR_RW(no_lun_0);
 
4518
4519static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4520{
4521	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4522}
4523static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4524			      size_t count)
4525{
4526        int n;
4527
4528	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4529		sdebug_num_tgts = n;
4530		sdebug_max_tgts_luns();
4531		return count;
4532	}
4533	return -EINVAL;
4534}
4535static DRIVER_ATTR_RW(num_tgts);
 
4536
4537static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4538{
4539	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4540}
4541static DRIVER_ATTR_RO(dev_size_mb);
4542
4543static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4544{
4545	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4546}
4547static DRIVER_ATTR_RO(num_parts);
4548
4549static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4550{
4551	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4552}
4553static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4554			       size_t count)
4555{
4556        int nth;
4557
4558	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4559		sdebug_every_nth = nth;
4560		if (nth && !sdebug_statistics) {
4561			pr_info("every_nth needs statistics=1, set it\n");
4562			sdebug_statistics = true;
4563		}
4564		tweak_cmnd_count();
4565		return count;
4566	}
4567	return -EINVAL;
4568}
4569static DRIVER_ATTR_RW(every_nth);
 
4570
4571static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4572{
4573	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4574}
4575static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4576			      size_t count)
4577{
4578        int n;
4579	bool changed;
4580
4581	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4582		if (n > 256) {
4583			pr_warn("max_luns can be no more than 256\n");
4584			return -EINVAL;
4585		}
4586		changed = (sdebug_max_luns != n);
4587		sdebug_max_luns = n;
4588		sdebug_max_tgts_luns();
4589		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4590			struct sdebug_host_info *sdhp;
4591			struct sdebug_dev_info *dp;
4592
4593			spin_lock(&sdebug_host_list_lock);
4594			list_for_each_entry(sdhp, &sdebug_host_list,
4595					    host_list) {
4596				list_for_each_entry(dp, &sdhp->dev_info_list,
4597						    dev_list) {
4598					set_bit(SDEBUG_UA_LUNS_CHANGED,
4599						dp->uas_bm);
4600				}
4601			}
4602			spin_unlock(&sdebug_host_list_lock);
4603		}
4604		return count;
4605	}
4606	return -EINVAL;
4607}
4608static DRIVER_ATTR_RW(max_luns);
 
4609
4610static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4611{
4612	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4613}
4614/* N.B. max_queue can be changed while there are queued commands. In flight
4615 * commands beyond the new max_queue will be completed. */
4616static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4617			       size_t count)
4618{
4619	int j, n, k, a;
4620	struct sdebug_queue *sqp;
4621
4622	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4623	    (n <= SDEBUG_CANQUEUE)) {
4624		block_unblock_all_queues(true);
4625		k = 0;
4626		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4627		     ++j, ++sqp) {
4628			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4629			if (a > k)
4630				k = a;
4631		}
4632		sdebug_max_queue = n;
4633		if (k == SDEBUG_CANQUEUE)
4634			atomic_set(&retired_max_queue, 0);
4635		else if (k >= n)
4636			atomic_set(&retired_max_queue, k + 1);
4637		else
4638			atomic_set(&retired_max_queue, 0);
4639		block_unblock_all_queues(false);
4640		return count;
4641	}
4642	return -EINVAL;
4643}
4644static DRIVER_ATTR_RW(max_queue);
 
4645
4646static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4647{
4648	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4649}
4650static DRIVER_ATTR_RO(no_uld);
4651
4652static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4653{
4654	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4655}
4656static DRIVER_ATTR_RO(scsi_level);
4657
4658static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4659{
4660	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4661}
4662static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4663				size_t count)
4664{
4665        int n;
4666	bool changed;
4667
4668	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4669		changed = (sdebug_virtual_gb != n);
4670		sdebug_virtual_gb = n;
4671		sdebug_capacity = get_sdebug_capacity();
4672		if (changed) {
4673			struct sdebug_host_info *sdhp;
4674			struct sdebug_dev_info *dp;
4675
4676			spin_lock(&sdebug_host_list_lock);
4677			list_for_each_entry(sdhp, &sdebug_host_list,
4678					    host_list) {
4679				list_for_each_entry(dp, &sdhp->dev_info_list,
4680						    dev_list) {
4681					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4682						dp->uas_bm);
4683				}
4684			}
4685			spin_unlock(&sdebug_host_list_lock);
4686		}
4687		return count;
4688	}
4689	return -EINVAL;
4690}
4691static DRIVER_ATTR_RW(virtual_gb);
 
4692
4693static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4694{
4695	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4696}
4697
4698static int sdebug_add_adapter(void);
4699static void sdebug_remove_adapter(void);
4700
4701static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4702			      size_t count)
4703{
4704	int delta_hosts;
4705
4706	if (sscanf(buf, "%d", &delta_hosts) != 1)
4707		return -EINVAL;
4708	if (delta_hosts > 0) {
4709		do {
4710			sdebug_add_adapter();
4711		} while (--delta_hosts);
4712	} else if (delta_hosts < 0) {
4713		do {
4714			sdebug_remove_adapter();
4715		} while (++delta_hosts);
4716	}
4717	return count;
4718}
4719static DRIVER_ATTR_RW(add_host);
 
4720
4721static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
 
4722{
4723	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4724}
4725static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4726				    size_t count)
4727{
4728	int n;
4729
4730	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4731		sdebug_vpd_use_hostno = n;
4732		return count;
4733	}
4734	return -EINVAL;
4735}
4736static DRIVER_ATTR_RW(vpd_use_hostno);
4737
4738static ssize_t statistics_show(struct device_driver *ddp, char *buf)
4739{
4740	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
4741}
4742static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
4743				size_t count)
4744{
4745	int n;
4746
4747	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
4748		if (n > 0)
4749			sdebug_statistics = true;
4750		else {
4751			clear_queue_stats();
4752			sdebug_statistics = false;
4753		}
4754		return count;
4755	}
4756	return -EINVAL;
4757}
4758static DRIVER_ATTR_RW(statistics);
4759
4760static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4761{
4762	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4763}
4764static DRIVER_ATTR_RO(sector_size);
4765
4766static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
4767{
4768	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
4769}
4770static DRIVER_ATTR_RO(submit_queues);
4771
4772static ssize_t dix_show(struct device_driver *ddp, char *buf)
4773{
4774	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4775}
4776static DRIVER_ATTR_RO(dix);
4777
4778static ssize_t dif_show(struct device_driver *ddp, char *buf)
4779{
4780	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4781}
4782static DRIVER_ATTR_RO(dif);
4783
4784static ssize_t guard_show(struct device_driver *ddp, char *buf)
4785{
4786	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4787}
4788static DRIVER_ATTR_RO(guard);
4789
4790static ssize_t ato_show(struct device_driver *ddp, char *buf)
4791{
4792	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4793}
4794static DRIVER_ATTR_RO(ato);
4795
4796static ssize_t map_show(struct device_driver *ddp, char *buf)
4797{
4798	ssize_t count;
4799
4800	if (!scsi_debug_lbp())
4801		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4802				 sdebug_store_sectors);
4803
4804	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4805			  (int)map_size, map_storep);
4806	buf[count++] = '\n';
4807	buf[count] = '\0';
4808
4809	return count;
4810}
4811static DRIVER_ATTR_RO(map);
4812
4813static ssize_t removable_show(struct device_driver *ddp, char *buf)
4814{
4815	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4816}
4817static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4818			       size_t count)
4819{
4820	int n;
4821
4822	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4823		sdebug_removable = (n > 0);
4824		return count;
4825	}
4826	return -EINVAL;
4827}
4828static DRIVER_ATTR_RW(removable);
4829
4830static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4831{
4832	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4833}
4834/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4835static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4836			       size_t count)
4837{
4838	int n;
4839
4840	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4841		sdebug_host_lock = (n > 0);
4842		return count;
4843	}
4844	return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4845}
4846static DRIVER_ATTR_RW(host_lock);
4847
4848static ssize_t strict_show(struct device_driver *ddp, char *buf)
4849{
4850	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4851}
4852static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4853			    size_t count)
4854{
4855	int n;
4856
4857	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4858		sdebug_strict = (n > 0);
4859		return count;
4860	}
4861	return -EINVAL;
4862}
4863static DRIVER_ATTR_RW(strict);
4864
4865static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
4866{
4867	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
4868}
4869static DRIVER_ATTR_RO(uuid_ctl);
4870
4871
4872/* Note: The following array creates attribute files in the
4873   /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4874   files (over those found in the /sys/module/scsi_debug/parameters
4875   directory) is that auxiliary actions can be triggered when an attribute
4876   is changed. For example see: sdebug_add_host_store() above.
4877 */
4878
4879static struct attribute *sdebug_drv_attrs[] = {
4880	&driver_attr_delay.attr,
4881	&driver_attr_opts.attr,
4882	&driver_attr_ptype.attr,
4883	&driver_attr_dsense.attr,
4884	&driver_attr_fake_rw.attr,
4885	&driver_attr_no_lun_0.attr,
4886	&driver_attr_num_tgts.attr,
4887	&driver_attr_dev_size_mb.attr,
4888	&driver_attr_num_parts.attr,
4889	&driver_attr_every_nth.attr,
4890	&driver_attr_max_luns.attr,
4891	&driver_attr_max_queue.attr,
4892	&driver_attr_no_uld.attr,
4893	&driver_attr_scsi_level.attr,
4894	&driver_attr_virtual_gb.attr,
4895	&driver_attr_add_host.attr,
4896	&driver_attr_vpd_use_hostno.attr,
4897	&driver_attr_sector_size.attr,
4898	&driver_attr_statistics.attr,
4899	&driver_attr_submit_queues.attr,
4900	&driver_attr_dix.attr,
4901	&driver_attr_dif.attr,
4902	&driver_attr_guard.attr,
4903	&driver_attr_ato.attr,
4904	&driver_attr_map.attr,
4905	&driver_attr_removable.attr,
4906	&driver_attr_host_lock.attr,
4907	&driver_attr_ndelay.attr,
4908	&driver_attr_strict.attr,
4909	&driver_attr_uuid_ctl.attr,
4910	NULL,
4911};
4912ATTRIBUTE_GROUPS(sdebug_drv);
4913
4914static struct device *pseudo_primary;
4915
4916static int __init scsi_debug_init(void)
4917{
4918	unsigned long sz;
4919	int host_to_add;
4920	int k;
4921	int ret;
4922
4923	atomic_set(&retired_max_queue, 0);
4924
4925	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4926		pr_warn("ndelay must be less than 1 second, ignored\n");
4927		sdebug_ndelay = 0;
4928	} else if (sdebug_ndelay > 0)
4929		sdebug_jdelay = JDELAY_OVERRIDDEN;
4930
4931	switch (sdebug_sector_size) {
4932	case  512:
4933	case 1024:
4934	case 2048:
4935	case 4096:
4936		break;
4937	default:
4938		pr_err("invalid sector_size %d\n", sdebug_sector_size);
 
4939		return -EINVAL;
4940	}
4941
4942	switch (sdebug_dif) {
4943	case T10_PI_TYPE0_PROTECTION:
4944		break;
4945	case T10_PI_TYPE1_PROTECTION:
4946	case T10_PI_TYPE2_PROTECTION:
4947	case T10_PI_TYPE3_PROTECTION:
4948		have_dif_prot = true;
4949		break;
4950
4951	default:
4952		pr_err("dif must be 0, 1, 2 or 3\n");
4953		return -EINVAL;
4954	}
4955
4956	if (sdebug_guard > 1) {
4957		pr_err("guard must be 0 or 1\n");
4958		return -EINVAL;
4959	}
4960
4961	if (sdebug_ato > 1) {
4962		pr_err("ato must be 0 or 1\n");
4963		return -EINVAL;
4964	}
4965
4966	if (sdebug_physblk_exp > 15) {
4967		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4968		return -EINVAL;
4969	}
4970	if (sdebug_max_luns > 256) {
4971		pr_warn("max_luns can be no more than 256, use default\n");
4972		sdebug_max_luns = DEF_MAX_LUNS;
4973	}
4974
4975	if (sdebug_lowest_aligned > 0x3fff) {
4976		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4977		return -EINVAL;
4978	}
4979
4980	if (submit_queues < 1) {
4981		pr_err("submit_queues must be 1 or more\n");
 
4982		return -EINVAL;
4983	}
4984	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
4985			       GFP_KERNEL);
4986	if (sdebug_q_arr == NULL)
4987		return -ENOMEM;
4988	for (k = 0; k < submit_queues; ++k)
4989		spin_lock_init(&sdebug_q_arr[k].qc_lock);
4990
4991	if (sdebug_dev_size_mb < 1)
4992		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4993	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
4994	sdebug_store_sectors = sz / sdebug_sector_size;
4995	sdebug_capacity = get_sdebug_capacity();
4996
4997	/* play around with geometry, don't waste too much on track 0 */
4998	sdebug_heads = 8;
4999	sdebug_sectors_per = 32;
5000	if (sdebug_dev_size_mb >= 256)
 
 
5001		sdebug_heads = 64;
5002	else if (sdebug_dev_size_mb >= 16)
5003		sdebug_heads = 32;
5004	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5005			       (sdebug_sectors_per * sdebug_heads);
5006	if (sdebug_cylinders_per >= 1024) {
5007		/* other LLDs do this; implies >= 1GB ram disk ... */
5008		sdebug_heads = 255;
5009		sdebug_sectors_per = 63;
5010		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5011			       (sdebug_sectors_per * sdebug_heads);
5012	}
5013
5014	if (sdebug_fake_rw == 0) {
5015		fake_storep = vmalloc(sz);
5016		if (NULL == fake_storep) {
5017			pr_err("out of memory, 1\n");
5018			ret = -ENOMEM;
5019			goto free_q_arr;
5020		}
5021		memset(fake_storep, 0, sz);
5022		if (sdebug_num_parts > 0)
5023			sdebug_build_parts(fake_storep, sz);
5024	}
 
 
 
5025
5026	if (sdebug_dix) {
5027		int dif_size;
5028
5029		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5030		dif_storep = vmalloc(dif_size);
5031
5032		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
 
5033
5034		if (dif_storep == NULL) {
5035			pr_err("out of mem. (DIX)\n");
5036			ret = -ENOMEM;
5037			goto free_vm;
5038		}
5039
5040		memset(dif_storep, 0xff, dif_size);
5041	}
5042
5043	/* Logical Block Provisioning */
5044	if (scsi_debug_lbp()) {
5045		sdebug_unmap_max_blocks =
5046			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5047
5048		sdebug_unmap_max_desc =
5049			clamp(sdebug_unmap_max_desc, 0U, 256U);
5050
5051		sdebug_unmap_granularity =
5052			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5053
5054		if (sdebug_unmap_alignment &&
5055		    sdebug_unmap_granularity <=
5056		    sdebug_unmap_alignment) {
5057			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5058			ret = -EINVAL;
5059			goto free_vm;
 
 
 
5060		}
5061
5062		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5063		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
 
5064
5065		pr_info("%lu provisioning blocks\n", map_size);
 
5066
5067		if (map_storep == NULL) {
5068			pr_err("out of mem. (MAP)\n");
5069			ret = -ENOMEM;
5070			goto free_vm;
5071		}
5072
5073		bitmap_zero(map_storep, map_size);
5074
5075		/* Map first 1KB for partition table */
5076		if (sdebug_num_parts)
5077			map_region(0, 2);
5078	}
5079
5080	pseudo_primary = root_device_register("pseudo_0");
5081	if (IS_ERR(pseudo_primary)) {
5082		pr_warn("root_device_register() error\n");
5083		ret = PTR_ERR(pseudo_primary);
5084		goto free_vm;
5085	}
5086	ret = bus_register(&pseudo_lld_bus);
5087	if (ret < 0) {
5088		pr_warn("bus_register error: %d\n", ret);
 
5089		goto dev_unreg;
5090	}
5091	ret = driver_register(&sdebug_driverfs_driver);
5092	if (ret < 0) {
5093		pr_warn("driver_register error: %d\n", ret);
 
5094		goto bus_unreg;
5095	}
 
 
 
 
 
 
 
 
5096
5097	host_to_add = sdebug_add_host;
5098	sdebug_add_host = 0;
5099
5100        for (k = 0; k < host_to_add; k++) {
5101                if (sdebug_add_adapter()) {
5102			pr_err("sdebug_add_adapter failed k=%d\n", k);
 
5103                        break;
5104                }
5105        }
5106
5107	if (sdebug_verbose)
5108		pr_info("built %d host(s)\n", sdebug_add_host);
5109
 
5110	return 0;
5111
 
 
 
5112bus_unreg:
5113	bus_unregister(&pseudo_lld_bus);
5114dev_unreg:
5115	root_device_unregister(pseudo_primary);
5116free_vm:
5117	vfree(map_storep);
5118	vfree(dif_storep);
 
 
5119	vfree(fake_storep);
5120free_q_arr:
5121	kfree(sdebug_q_arr);
5122	return ret;
5123}
5124
5125static void __exit scsi_debug_exit(void)
5126{
5127	int k = sdebug_add_host;
5128
5129	stop_all_queued();
5130	free_all_queued();
5131	for (; k; k--)
5132		sdebug_remove_adapter();
 
5133	driver_unregister(&sdebug_driverfs_driver);
5134	bus_unregister(&pseudo_lld_bus);
5135	root_device_unregister(pseudo_primary);
5136
5137	vfree(map_storep);
5138	vfree(dif_storep);
 
5139	vfree(fake_storep);
5140	kfree(sdebug_q_arr);
5141}
5142
5143device_initcall(scsi_debug_init);
5144module_exit(scsi_debug_exit);
5145
5146static void sdebug_release_adapter(struct device * dev)
5147{
5148        struct sdebug_host_info *sdbg_host;
5149
5150	sdbg_host = to_sdebug_host(dev);
5151        kfree(sdbg_host);
5152}
5153
5154static int sdebug_add_adapter(void)
5155{
5156	int k, devs_per_host;
5157        int error = 0;
5158        struct sdebug_host_info *sdbg_host;
5159	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5160
5161        sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5162        if (NULL == sdbg_host) {
5163		pr_err("out of memory at line %d\n", __LINE__);
 
5164                return -ENOMEM;
5165        }
5166
5167        INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5168
5169	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5170        for (k = 0; k < devs_per_host; k++) {
5171		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5172		if (!sdbg_devinfo) {
5173			pr_err("out of memory at line %d\n", __LINE__);
 
5174                        error = -ENOMEM;
5175			goto clean;
5176                }
5177        }
5178
5179        spin_lock(&sdebug_host_list_lock);
5180        list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5181        spin_unlock(&sdebug_host_list_lock);
5182
5183        sdbg_host->dev.bus = &pseudo_lld_bus;
5184        sdbg_host->dev.parent = pseudo_primary;
5185        sdbg_host->dev.release = &sdebug_release_adapter;
5186	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5187
5188        error = device_register(&sdbg_host->dev);
5189
5190        if (error)
5191		goto clean;
5192
5193	++sdebug_add_host;
5194        return error;
5195
5196clean:
5197	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5198				 dev_list) {
5199		list_del(&sdbg_devinfo->dev_list);
5200		kfree(sdbg_devinfo);
5201	}
5202
5203	kfree(sdbg_host);
5204        return error;
5205}
5206
5207static void sdebug_remove_adapter(void)
5208{
5209        struct sdebug_host_info * sdbg_host = NULL;
5210
5211        spin_lock(&sdebug_host_list_lock);
5212        if (!list_empty(&sdebug_host_list)) {
5213                sdbg_host = list_entry(sdebug_host_list.prev,
5214                                       struct sdebug_host_info, host_list);
5215		list_del(&sdbg_host->host_list);
5216	}
5217        spin_unlock(&sdebug_host_list_lock);
5218
5219	if (!sdbg_host)
5220		return;
5221
5222	device_unregister(&sdbg_host->dev);
5223	--sdebug_add_host;
5224}
5225
5226static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
 
5227{
5228	int num_in_q = 0;
5229	struct sdebug_dev_info *devip;
5230
5231	block_unblock_all_queues(true);
5232	devip = (struct sdebug_dev_info *)sdev->hostdata;
5233	if (NULL == devip) {
5234		block_unblock_all_queues(false);
5235		return	-ENODEV;
5236	}
5237	num_in_q = atomic_read(&devip->num_in_q);
5238
5239	if (qdepth < 1)
5240		qdepth = 1;
5241	/* allow to exceed max host qc_arr elements for testing */
5242	if (qdepth > SDEBUG_CANQUEUE + 10)
5243		qdepth = SDEBUG_CANQUEUE + 10;
5244	scsi_change_queue_depth(sdev, qdepth);
5245
5246	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5247		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5248			    __func__, qdepth, num_in_q);
5249	}
5250	block_unblock_all_queues(false);
5251	return sdev->queue_depth;
5252}
5253
5254static bool fake_timeout(struct scsi_cmnd *scp)
5255{
5256	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5257		if (sdebug_every_nth < -1)
5258			sdebug_every_nth = -1;
5259		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5260			return true; /* ignore command causing timeout */
5261		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5262			 scsi_medium_access_command(scp))
5263			return true; /* time out reads and writes */
5264	}
5265	return false;
5266}
5267
5268static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5269				   struct scsi_cmnd *scp)
5270{
5271	u8 sdeb_i;
5272	struct scsi_device *sdp = scp->device;
5273	const struct opcode_info_t *oip;
5274	const struct opcode_info_t *r_oip;
5275	struct sdebug_dev_info *devip;
5276	u8 *cmd = scp->cmnd;
5277	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5278	int k, na;
5279	int errsts = 0;
5280	u32 flags;
5281	u16 sa;
5282	u8 opcode = cmd[0];
5283	bool has_wlun_rl;
5284
5285	scsi_set_resid(scp, 0);
5286	if (sdebug_statistics)
5287		atomic_inc(&sdebug_cmnd_count);
5288	if (unlikely(sdebug_verbose &&
5289		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5290		char b[120];
5291		int n, len, sb;
5292
5293		len = scp->cmd_len;
5294		sb = (int)sizeof(b);
5295		if (len > 32)
5296			strcpy(b, "too long, over 32 bytes");
5297		else {
5298			for (k = 0, n = 0; k < len && n < sb; ++k)
5299				n += scnprintf(b + n, sb - n, "%02x ",
5300					       (u32)cmd[k]);
5301		}
5302		if (sdebug_mq_active)
5303			sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5304				    my_name, blk_mq_unique_tag(scp->request),
5305				    b);
5306		else
5307			sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5308				    b);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5309	}
5310	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5311	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5312		goto err_out;
5313
5314	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5315	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5316	devip = (struct sdebug_dev_info *)sdp->hostdata;
5317	if (unlikely(!devip)) {
5318		devip = find_build_dev_info(sdp);
5319		if (NULL == devip)
5320			goto err_out;
5321	}
5322	na = oip->num_attached;
5323	r_pfp = oip->pfp;
5324	if (na) {	/* multiple commands with this opcode */
5325		r_oip = oip;
5326		if (FF_SA & r_oip->flags) {
5327			if (F_SA_LOW & oip->flags)
5328				sa = 0x1f & cmd[1];
5329			else
5330				sa = get_unaligned_be16(cmd + 8);
5331			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5332				if (opcode == oip->opcode && sa == oip->sa)
5333					break;
5334			}
5335		} else {   /* since no service action only check opcode */
5336			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5337				if (opcode == oip->opcode)
5338					break;
5339			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5340		}
5341		if (k > na) {
5342			if (F_SA_LOW & r_oip->flags)
5343				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5344			else if (F_SA_HIGH & r_oip->flags)
5345				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5346			else
5347				mk_sense_invalid_opcode(scp);
5348			goto check_cond;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5349		}
5350	}	/* else (when na==0) we assume the oip is a match */
5351	flags = oip->flags;
5352	if (unlikely(F_INV_OP & flags)) {
5353		mk_sense_invalid_opcode(scp);
5354		goto check_cond;
5355	}
5356	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5357		if (sdebug_verbose)
5358			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5359				    my_name, opcode, " supported for wlun");
5360		mk_sense_invalid_opcode(scp);
5361		goto check_cond;
5362	}
5363	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5364		u8 rem;
5365		int j;
5366
5367		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5368			rem = ~oip->len_mask[k] & cmd[k];
5369			if (rem) {
5370				for (j = 7; j >= 0; --j, rem <<= 1) {
5371					if (0x80 & rem)
5372						break;
5373				}
5374				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5375				goto check_cond;
 
 
 
 
5376			}
5377		}
5378	}
5379	if (unlikely(!(F_SKIP_UA & flags) &&
5380		     find_first_bit(devip->uas_bm,
5381				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5382		errsts = make_ua(scp, devip);
 
 
 
 
 
 
5383		if (errsts)
5384			goto check_cond;
5385	}
5386	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5387		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5388		if (sdebug_verbose)
5389			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5390				    "%s\n", my_name, "initializing command "
5391				    "required");
5392		errsts = check_condition_result;
5393		goto fini;
5394	}
5395	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5396		goto fini;
5397	if (unlikely(sdebug_every_nth)) {
5398		if (fake_timeout(scp))
5399			return 0;	/* ignore command: make trouble */
5400	}
5401	if (likely(oip->pfp))
5402		errsts = oip->pfp(scp, devip);	/* calls a resp_* function */
5403	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5404		errsts = r_pfp(scp, devip);
5405
5406fini:
5407	return schedule_resp(scp, devip, errsts,
5408			     ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5409check_cond:
5410	return schedule_resp(scp, devip, check_condition_result, 0);
5411err_out:
5412	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5413}
5414
 
 
5415static struct scsi_host_template sdebug_driver_template = {
5416	.show_info =		scsi_debug_show_info,
5417	.write_info =		scsi_debug_write_info,
5418	.proc_name =		sdebug_proc_name,
5419	.name =			"SCSI DEBUG",
5420	.info =			scsi_debug_info,
5421	.slave_alloc =		scsi_debug_slave_alloc,
5422	.slave_configure =	scsi_debug_slave_configure,
5423	.slave_destroy =	scsi_debug_slave_destroy,
5424	.ioctl =		scsi_debug_ioctl,
5425	.queuecommand =		scsi_debug_queuecommand,
5426	.change_queue_depth =	sdebug_change_qdepth,
5427	.eh_abort_handler =	scsi_debug_abort,
 
5428	.eh_device_reset_handler = scsi_debug_device_reset,
5429	.eh_target_reset_handler = scsi_debug_target_reset,
5430	.eh_bus_reset_handler = scsi_debug_bus_reset,
5431	.eh_host_reset_handler = scsi_debug_host_reset,
5432	.can_queue =		SDEBUG_CANQUEUE,
 
5433	.this_id =		7,
5434	.sg_tablesize =		SG_MAX_SEGMENTS,
5435	.cmd_per_lun =		DEF_CMD_PER_LUN,
5436	.max_sectors =		-1U,
5437	.use_clustering = 	DISABLE_CLUSTERING,
5438	.module =		THIS_MODULE,
5439	.track_queue_depth =	1,
5440};
5441
5442static int sdebug_driver_probe(struct device * dev)
5443{
5444	int error = 0;
5445	struct sdebug_host_info *sdbg_host;
5446	struct Scsi_Host *hpnt;
5447	int hprot;
5448
5449	sdbg_host = to_sdebug_host(dev);
5450
5451	sdebug_driver_template.can_queue = sdebug_max_queue;
5452	if (sdebug_clustering)
5453		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5454	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5455	if (NULL == hpnt) {
5456		pr_err("scsi_host_alloc failed\n");
5457		error = -ENODEV;
5458		return error;
5459	}
5460	if (submit_queues > nr_cpu_ids) {
5461		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
5462			my_name, submit_queues, nr_cpu_ids);
5463		submit_queues = nr_cpu_ids;
5464	}
5465	/* Decide whether to tell scsi subsystem that we want mq */
5466	/* Following should give the same answer for each host */
5467	sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5468	if (sdebug_mq_active)
5469		hpnt->nr_hw_queues = submit_queues;
5470
5471        sdbg_host->shost = hpnt;
5472	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5473	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5474		hpnt->max_id = sdebug_num_tgts + 1;
5475	else
5476		hpnt->max_id = sdebug_num_tgts;
5477	/* = sdebug_max_luns; */
5478	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5479
5480	hprot = 0;
5481
5482	switch (sdebug_dif) {
5483
5484	case T10_PI_TYPE1_PROTECTION:
5485		hprot = SHOST_DIF_TYPE1_PROTECTION;
5486		if (sdebug_dix)
5487			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5488		break;
5489
5490	case T10_PI_TYPE2_PROTECTION:
5491		hprot = SHOST_DIF_TYPE2_PROTECTION;
5492		if (sdebug_dix)
5493			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5494		break;
5495
5496	case T10_PI_TYPE3_PROTECTION:
5497		hprot = SHOST_DIF_TYPE3_PROTECTION;
5498		if (sdebug_dix)
5499			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5500		break;
5501
5502	default:
5503		if (sdebug_dix)
5504			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5505		break;
5506	}
5507
5508	scsi_host_set_prot(hpnt, hprot);
5509
5510	if (have_dif_prot || sdebug_dix)
5511		pr_info("host protection%s%s%s%s%s%s%s\n",
5512			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5513			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5514			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5515			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5516			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5517			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5518			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5519
5520	if (sdebug_guard == 1)
5521		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5522	else
5523		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5524
5525	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5526	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5527	if (sdebug_every_nth)	/* need stats counters for every_nth */
5528		sdebug_statistics = true;
5529        error = scsi_add_host(hpnt, &sdbg_host->dev);
5530        if (error) {
5531		pr_err("scsi_add_host failed\n");
5532                error = -ENODEV;
5533		scsi_host_put(hpnt);
5534        } else
5535		scsi_scan_host(hpnt);
5536
5537	return error;
 
5538}
5539
5540static int sdebug_driver_remove(struct device * dev)
5541{
5542        struct sdebug_host_info *sdbg_host;
5543	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5544
5545	sdbg_host = to_sdebug_host(dev);
5546
5547	if (!sdbg_host) {
5548		pr_err("Unable to locate host info\n");
 
5549		return -ENODEV;
5550	}
5551
5552        scsi_remove_host(sdbg_host->shost);
5553
5554	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5555				 dev_list) {
5556                list_del(&sdbg_devinfo->dev_list);
5557                kfree(sdbg_devinfo);
5558        }
5559
5560        scsi_host_put(sdbg_host->shost);
5561        return 0;
5562}
5563
5564static int pseudo_lld_bus_match(struct device *dev,
5565				struct device_driver *dev_driver)
5566{
5567	return 1;
5568}
5569
5570static struct bus_type pseudo_lld_bus = {
5571	.name = "pseudo",
5572	.match = pseudo_lld_bus_match,
5573	.probe = sdebug_driver_probe,
5574	.remove = sdebug_driver_remove,
5575	.drv_groups = sdebug_drv_groups,
5576};