Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * SCSI Block Commands (SBC) parsing and emulation.
   3 *
   4 * (c) Copyright 2002-2013 Datera, Inc.
   5 *
   6 * Nicholas A. Bellinger <nab@kernel.org>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/ratelimit.h>
  26#include <linux/crc-t10dif.h>
  27#include <linux/t10-pi.h>
  28#include <asm/unaligned.h>
  29#include <scsi/scsi_proto.h>
  30#include <scsi/scsi_tcq.h>
  31
  32#include <target/target_core_base.h>
  33#include <target/target_core_backend.h>
  34#include <target/target_core_fabric.h>
  35
  36#include "target_core_internal.h"
  37#include "target_core_ua.h"
  38#include "target_core_alua.h"
  39
  40static sense_reason_t
  41sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
  42static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
  43
  44static sense_reason_t
  45sbc_emulate_readcapacity(struct se_cmd *cmd)
  46{
  47	struct se_device *dev = cmd->se_dev;
  48	unsigned char *cdb = cmd->t_task_cdb;
  49	unsigned long long blocks_long = dev->transport->get_blocks(dev);
  50	unsigned char *rbuf;
  51	unsigned char buf[8];
  52	u32 blocks;
  53
  54	/*
  55	 * SBC-2 says:
  56	 *   If the PMI bit is set to zero and the LOGICAL BLOCK
  57	 *   ADDRESS field is not set to zero, the device server shall
  58	 *   terminate the command with CHECK CONDITION status with
  59	 *   the sense key set to ILLEGAL REQUEST and the additional
  60	 *   sense code set to INVALID FIELD IN CDB.
  61	 *
  62	 * In SBC-3, these fields are obsolete, but some SCSI
  63	 * compliance tests actually check this, so we might as well
  64	 * follow SBC-2.
  65	 */
  66	if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
  67		return TCM_INVALID_CDB_FIELD;
  68
  69	if (blocks_long >= 0x00000000ffffffff)
  70		blocks = 0xffffffff;
  71	else
  72		blocks = (u32)blocks_long;
  73
  74	buf[0] = (blocks >> 24) & 0xff;
  75	buf[1] = (blocks >> 16) & 0xff;
  76	buf[2] = (blocks >> 8) & 0xff;
  77	buf[3] = blocks & 0xff;
  78	buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
  79	buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
  80	buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
  81	buf[7] = dev->dev_attrib.block_size & 0xff;
  82
  83	rbuf = transport_kmap_data_sg(cmd);
  84	if (rbuf) {
  85		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
  86		transport_kunmap_data_sg(cmd);
  87	}
  88
  89	target_complete_cmd_with_length(cmd, GOOD, 8);
  90	return 0;
  91}
  92
  93static sense_reason_t
  94sbc_emulate_readcapacity_16(struct se_cmd *cmd)
  95{
  96	struct se_device *dev = cmd->se_dev;
  97	struct se_session *sess = cmd->se_sess;
  98	int pi_prot_type = dev->dev_attrib.pi_prot_type;
  99
 100	unsigned char *rbuf;
 101	unsigned char buf[32];
 102	unsigned long long blocks = dev->transport->get_blocks(dev);
 103
 104	memset(buf, 0, sizeof(buf));
 105	buf[0] = (blocks >> 56) & 0xff;
 106	buf[1] = (blocks >> 48) & 0xff;
 107	buf[2] = (blocks >> 40) & 0xff;
 108	buf[3] = (blocks >> 32) & 0xff;
 109	buf[4] = (blocks >> 24) & 0xff;
 110	buf[5] = (blocks >> 16) & 0xff;
 111	buf[6] = (blocks >> 8) & 0xff;
 112	buf[7] = blocks & 0xff;
 113	buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
 114	buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
 115	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
 116	buf[11] = dev->dev_attrib.block_size & 0xff;
 117	/*
 118	 * Set P_TYPE and PROT_EN bits for DIF support
 119	 */
 120	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
 121		/*
 122		 * Only override a device's pi_prot_type if no T10-PI is
 123		 * available, and sess_prot_type has been explicitly enabled.
 124		 */
 125		if (!pi_prot_type)
 126			pi_prot_type = sess->sess_prot_type;
 127
 128		if (pi_prot_type)
 129			buf[12] = (pi_prot_type - 1) << 1 | 0x1;
 130	}
 131
 132	if (dev->transport->get_lbppbe)
 133		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
 134
 135	if (dev->transport->get_alignment_offset_lbas) {
 136		u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
 137		buf[14] = (lalba >> 8) & 0x3f;
 138		buf[15] = lalba & 0xff;
 139	}
 140
 141	/*
 142	 * Set Thin Provisioning Enable bit following sbc3r22 in section
 143	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
 144	 */
 145	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) {
 146		buf[14] |= 0x80;
 147
 148		/*
 149		 * LBPRZ signifies that zeroes will be read back from an LBA after
 150		 * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2)
 151		 */
 152		if (dev->dev_attrib.unmap_zeroes_data)
 153			buf[14] |= 0x40;
 154	}
 155
 156	rbuf = transport_kmap_data_sg(cmd);
 157	if (rbuf) {
 158		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
 159		transport_kunmap_data_sg(cmd);
 160	}
 161
 162	target_complete_cmd_with_length(cmd, GOOD, 32);
 163	return 0;
 164}
 165
 166static sense_reason_t
 167sbc_emulate_startstop(struct se_cmd *cmd)
 168{
 169	unsigned char *cdb = cmd->t_task_cdb;
 170
 171	/*
 172	 * See sbc3r36 section 5.25
 173	 * Immediate bit should be set since there is nothing to complete
 174	 * POWER CONDITION MODIFIER 0h
 175	 */
 176	if (!(cdb[1] & 1) || cdb[2] || cdb[3])
 177		return TCM_INVALID_CDB_FIELD;
 178
 179	/*
 180	 * See sbc3r36 section 5.25
 181	 * POWER CONDITION 0h START_VALID - process START and LOEJ
 182	 */
 183	if (cdb[4] >> 4 & 0xf)
 184		return TCM_INVALID_CDB_FIELD;
 185
 186	/*
 187	 * See sbc3r36 section 5.25
 188	 * LOEJ 0h - nothing to load or unload
 189	 * START 1h - we are ready
 190	 */
 191	if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
 192		return TCM_INVALID_CDB_FIELD;
 193
 194	target_complete_cmd(cmd, SAM_STAT_GOOD);
 195	return 0;
 196}
 197
 198sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
 199{
 200	u32 num_blocks;
 201
 202	if (cmd->t_task_cdb[0] == WRITE_SAME)
 203		num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
 204	else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
 205		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
 206	else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
 207		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
 208
 209	/*
 210	 * Use the explicit range when non zero is supplied, otherwise calculate
 211	 * the remaining range based on ->get_blocks() - starting LBA.
 212	 */
 213	if (num_blocks)
 214		return num_blocks;
 215
 216	return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
 217		cmd->t_task_lba + 1;
 218}
 219EXPORT_SYMBOL(sbc_get_write_same_sectors);
 220
 221static sense_reason_t
 222sbc_execute_write_same_unmap(struct se_cmd *cmd)
 223{
 224	struct sbc_ops *ops = cmd->protocol_data;
 225	sector_t nolb = sbc_get_write_same_sectors(cmd);
 226	sense_reason_t ret;
 227
 228	if (nolb) {
 229		ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb);
 230		if (ret)
 231			return ret;
 232	}
 233
 234	target_complete_cmd(cmd, GOOD);
 235	return 0;
 236}
 237
 238static sense_reason_t
 239sbc_emulate_noop(struct se_cmd *cmd)
 240{
 241	target_complete_cmd(cmd, GOOD);
 242	return 0;
 243}
 244
 245static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
 246{
 247	return cmd->se_dev->dev_attrib.block_size * sectors;
 248}
 249
 250static inline u32 transport_get_sectors_6(unsigned char *cdb)
 251{
 252	/*
 253	 * Use 8-bit sector value.  SBC-3 says:
 254	 *
 255	 *   A TRANSFER LENGTH field set to zero specifies that 256
 256	 *   logical blocks shall be written.  Any other value
 257	 *   specifies the number of logical blocks that shall be
 258	 *   written.
 259	 */
 260	return cdb[4] ? : 256;
 261}
 262
 263static inline u32 transport_get_sectors_10(unsigned char *cdb)
 264{
 265	return (u32)(cdb[7] << 8) + cdb[8];
 266}
 267
 268static inline u32 transport_get_sectors_12(unsigned char *cdb)
 269{
 270	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
 271}
 272
 273static inline u32 transport_get_sectors_16(unsigned char *cdb)
 274{
 275	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
 276		    (cdb[12] << 8) + cdb[13];
 277}
 278
 279/*
 280 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
 281 */
 282static inline u32 transport_get_sectors_32(unsigned char *cdb)
 283{
 284	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
 285		    (cdb[30] << 8) + cdb[31];
 286
 287}
 288
 289static inline u32 transport_lba_21(unsigned char *cdb)
 290{
 291	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
 292}
 293
 294static inline u32 transport_lba_32(unsigned char *cdb)
 295{
 296	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
 297}
 298
 299static inline unsigned long long transport_lba_64(unsigned char *cdb)
 300{
 301	unsigned int __v1, __v2;
 302
 303	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
 304	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
 305
 306	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
 307}
 308
 309/*
 310 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
 311 */
 312static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
 313{
 314	unsigned int __v1, __v2;
 315
 316	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
 317	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
 318
 319	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
 320}
 321
 322static sense_reason_t
 323sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
 324{
 325	struct se_device *dev = cmd->se_dev;
 326	sector_t end_lba = dev->transport->get_blocks(dev) + 1;
 327	unsigned int sectors = sbc_get_write_same_sectors(cmd);
 328	sense_reason_t ret;
 329
 330	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
 331		pr_err("WRITE_SAME PBDATA and LBDATA"
 332			" bits not supported for Block Discard"
 333			" Emulation\n");
 334		return TCM_UNSUPPORTED_SCSI_OPCODE;
 335	}
 336	if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
 337		pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
 338			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
 339		return TCM_INVALID_CDB_FIELD;
 340	}
 341	/*
 342	 * Sanity check for LBA wrap and request past end of device.
 343	 */
 344	if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
 345	    ((cmd->t_task_lba + sectors) > end_lba)) {
 346		pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
 347		       (unsigned long long)end_lba, cmd->t_task_lba, sectors);
 348		return TCM_ADDRESS_OUT_OF_RANGE;
 349	}
 350
 351	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
 352	if (flags[0] & 0x10) {
 353		pr_warn("WRITE SAME with ANCHOR not supported\n");
 354		return TCM_INVALID_CDB_FIELD;
 355	}
 356	/*
 357	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
 358	 * translated into block discard requests within backend code.
 359	 */
 360	if (flags[0] & 0x08) {
 361		if (!ops->execute_unmap)
 362			return TCM_UNSUPPORTED_SCSI_OPCODE;
 363
 364		if (!dev->dev_attrib.emulate_tpws) {
 365			pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
 366			       " has emulate_tpws disabled\n");
 367			return TCM_UNSUPPORTED_SCSI_OPCODE;
 368		}
 369		cmd->execute_cmd = sbc_execute_write_same_unmap;
 370		return 0;
 371	}
 372	if (!ops->execute_write_same)
 373		return TCM_UNSUPPORTED_SCSI_OPCODE;
 374
 375	ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
 376	if (ret)
 377		return ret;
 378
 379	cmd->execute_cmd = ops->execute_write_same;
 380	return 0;
 381}
 382
 383static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
 384					   int *post_ret)
 385{
 386	unsigned char *buf, *addr;
 387	struct scatterlist *sg;
 388	unsigned int offset;
 389	sense_reason_t ret = TCM_NO_SENSE;
 390	int i, count;
 391	/*
 392	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
 393	 *
 394	 * 1) read the specified logical block(s);
 395	 * 2) transfer logical blocks from the data-out buffer;
 396	 * 3) XOR the logical blocks transferred from the data-out buffer with
 397	 *    the logical blocks read, storing the resulting XOR data in a buffer;
 398	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
 399	 *    blocks transferred from the data-out buffer; and
 400	 * 5) transfer the resulting XOR data to the data-in buffer.
 401	 */
 402	buf = kmalloc(cmd->data_length, GFP_KERNEL);
 403	if (!buf) {
 404		pr_err("Unable to allocate xor_callback buf\n");
 405		return TCM_OUT_OF_RESOURCES;
 406	}
 407	/*
 408	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
 409	 * into the locally allocated *buf
 410	 */
 411	sg_copy_to_buffer(cmd->t_data_sg,
 412			  cmd->t_data_nents,
 413			  buf,
 414			  cmd->data_length);
 415
 416	/*
 417	 * Now perform the XOR against the BIDI read memory located at
 418	 * cmd->t_mem_bidi_list
 419	 */
 420
 421	offset = 0;
 422	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
 423		addr = kmap_atomic(sg_page(sg));
 424		if (!addr) {
 425			ret = TCM_OUT_OF_RESOURCES;
 426			goto out;
 427		}
 428
 429		for (i = 0; i < sg->length; i++)
 430			*(addr + sg->offset + i) ^= *(buf + offset + i);
 431
 432		offset += sg->length;
 433		kunmap_atomic(addr);
 434	}
 435
 436out:
 437	kfree(buf);
 438	return ret;
 439}
 440
 441static sense_reason_t
 442sbc_execute_rw(struct se_cmd *cmd)
 443{
 444	struct sbc_ops *ops = cmd->protocol_data;
 445
 446	return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
 447			       cmd->data_direction);
 448}
 449
 450static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
 451					     int *post_ret)
 452{
 453	struct se_device *dev = cmd->se_dev;
 454	sense_reason_t ret = TCM_NO_SENSE;
 455
 456	/*
 457	 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
 458	 * within target_complete_ok_work() if the command was successfully
 459	 * sent to the backend driver.
 460	 */
 461	spin_lock_irq(&cmd->t_state_lock);
 462	if (cmd->transport_state & CMD_T_SENT) {
 463		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
 464		*post_ret = 1;
 465
 466		if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
 467			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 468	}
 469	spin_unlock_irq(&cmd->t_state_lock);
 470
 471	/*
 472	 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
 473	 * before the original READ I/O submission.
 474	 */
 475	up(&dev->caw_sem);
 476
 477	return ret;
 478}
 479
 480static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
 481						 int *post_ret)
 482{
 483	struct se_device *dev = cmd->se_dev;
 484	struct scatterlist *write_sg = NULL, *sg;
 485	unsigned char *buf = NULL, *addr;
 486	struct sg_mapping_iter m;
 487	unsigned int offset = 0, len;
 488	unsigned int nlbas = cmd->t_task_nolb;
 489	unsigned int block_size = dev->dev_attrib.block_size;
 490	unsigned int compare_len = (nlbas * block_size);
 491	sense_reason_t ret = TCM_NO_SENSE;
 492	int rc, i;
 493
 494	/*
 495	 * Handle early failure in transport_generic_request_failure(),
 496	 * which will not have taken ->caw_sem yet..
 497	 */
 498	if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
 499		return TCM_NO_SENSE;
 500	/*
 501	 * Handle special case for zero-length COMPARE_AND_WRITE
 502	 */
 503	if (!cmd->data_length)
 504		goto out;
 505	/*
 506	 * Immediately exit + release dev->caw_sem if command has already
 507	 * been failed with a non-zero SCSI status.
 508	 */
 509	if (cmd->scsi_status) {
 510		pr_err("compare_and_write_callback: non zero scsi_status:"
 511			" 0x%02x\n", cmd->scsi_status);
 512		goto out;
 513	}
 514
 515	buf = kzalloc(cmd->data_length, GFP_KERNEL);
 516	if (!buf) {
 517		pr_err("Unable to allocate compare_and_write buf\n");
 518		ret = TCM_OUT_OF_RESOURCES;
 519		goto out;
 520	}
 521
 522	write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
 523			   GFP_KERNEL);
 524	if (!write_sg) {
 525		pr_err("Unable to allocate compare_and_write sg\n");
 526		ret = TCM_OUT_OF_RESOURCES;
 527		goto out;
 528	}
 529	sg_init_table(write_sg, cmd->t_data_nents);
 530	/*
 531	 * Setup verify and write data payloads from total NumberLBAs.
 532	 */
 533	rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
 534			       cmd->data_length);
 535	if (!rc) {
 536		pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
 537		ret = TCM_OUT_OF_RESOURCES;
 538		goto out;
 539	}
 540	/*
 541	 * Compare against SCSI READ payload against verify payload
 542	 */
 543	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
 544		addr = (unsigned char *)kmap_atomic(sg_page(sg));
 545		if (!addr) {
 546			ret = TCM_OUT_OF_RESOURCES;
 547			goto out;
 548		}
 549
 550		len = min(sg->length, compare_len);
 551
 552		if (memcmp(addr, buf + offset, len)) {
 553			pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
 554				addr, buf + offset);
 555			kunmap_atomic(addr);
 556			goto miscompare;
 557		}
 558		kunmap_atomic(addr);
 559
 560		offset += len;
 561		compare_len -= len;
 562		if (!compare_len)
 563			break;
 564	}
 565
 566	i = 0;
 567	len = cmd->t_task_nolb * block_size;
 568	sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
 569	/*
 570	 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
 571	 */
 572	while (len) {
 573		sg_miter_next(&m);
 574
 575		if (block_size < PAGE_SIZE) {
 576			sg_set_page(&write_sg[i], m.page, block_size,
 577				    m.piter.sg->offset + block_size);
 578		} else {
 579			sg_miter_next(&m);
 580			sg_set_page(&write_sg[i], m.page, block_size,
 581				    m.piter.sg->offset);
 582		}
 583		len -= block_size;
 584		i++;
 585	}
 586	sg_miter_stop(&m);
 587	/*
 588	 * Save the original SGL + nents values before updating to new
 589	 * assignments, to be released in transport_free_pages() ->
 590	 * transport_reset_sgl_orig()
 591	 */
 592	cmd->t_data_sg_orig = cmd->t_data_sg;
 593	cmd->t_data_sg = write_sg;
 594	cmd->t_data_nents_orig = cmd->t_data_nents;
 595	cmd->t_data_nents = 1;
 596
 597	cmd->sam_task_attr = TCM_HEAD_TAG;
 598	cmd->transport_complete_callback = compare_and_write_post;
 599	/*
 600	 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
 601	 * for submitting the adjusted SGL to write instance user-data.
 602	 */
 603	cmd->execute_cmd = sbc_execute_rw;
 604
 605	spin_lock_irq(&cmd->t_state_lock);
 606	cmd->t_state = TRANSPORT_PROCESSING;
 607	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
 608	spin_unlock_irq(&cmd->t_state_lock);
 609
 610	__target_execute_cmd(cmd, false);
 611
 612	kfree(buf);
 613	return ret;
 614
 615miscompare:
 616	pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
 617		dev->transport->name);
 618	ret = TCM_MISCOMPARE_VERIFY;
 619out:
 620	/*
 621	 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
 622	 * sbc_compare_and_write() before the original READ I/O submission.
 623	 */
 624	up(&dev->caw_sem);
 625	kfree(write_sg);
 626	kfree(buf);
 627	return ret;
 628}
 629
 630static sense_reason_t
 631sbc_compare_and_write(struct se_cmd *cmd)
 632{
 633	struct sbc_ops *ops = cmd->protocol_data;
 634	struct se_device *dev = cmd->se_dev;
 635	sense_reason_t ret;
 636	int rc;
 637	/*
 638	 * Submit the READ first for COMPARE_AND_WRITE to perform the
 639	 * comparision using SGLs at cmd->t_bidi_data_sg..
 640	 */
 641	rc = down_interruptible(&dev->caw_sem);
 642	if (rc != 0) {
 643		cmd->transport_complete_callback = NULL;
 644		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 645	}
 646	/*
 647	 * Reset cmd->data_length to individual block_size in order to not
 648	 * confuse backend drivers that depend on this value matching the
 649	 * size of the I/O being submitted.
 650	 */
 651	cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
 652
 653	ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
 654			      DMA_FROM_DEVICE);
 655	if (ret) {
 656		cmd->transport_complete_callback = NULL;
 657		up(&dev->caw_sem);
 658		return ret;
 659	}
 660	/*
 661	 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
 662	 * upon MISCOMPARE, or in compare_and_write_done() upon completion
 663	 * of WRITE instance user-data.
 664	 */
 665	return TCM_NO_SENSE;
 666}
 667
 668static int
 669sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type,
 670		       bool is_write, struct se_cmd *cmd)
 671{
 672	if (is_write) {
 673		cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
 674			       protect ? TARGET_PROT_DOUT_PASS :
 675			       TARGET_PROT_DOUT_INSERT;
 676		switch (protect) {
 677		case 0x0:
 678		case 0x3:
 679			cmd->prot_checks = 0;
 680			break;
 681		case 0x1:
 682		case 0x5:
 683			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
 684			if (prot_type == TARGET_DIF_TYPE1_PROT)
 685				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
 686			break;
 687		case 0x2:
 688			if (prot_type == TARGET_DIF_TYPE1_PROT)
 689				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
 690			break;
 691		case 0x4:
 692			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
 693			break;
 694		default:
 695			pr_err("Unsupported protect field %d\n", protect);
 696			return -EINVAL;
 697		}
 698	} else {
 699		cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
 700			       protect ? TARGET_PROT_DIN_PASS :
 701			       TARGET_PROT_DIN_STRIP;
 702		switch (protect) {
 703		case 0x0:
 704		case 0x1:
 705		case 0x5:
 706			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
 707			if (prot_type == TARGET_DIF_TYPE1_PROT)
 708				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
 709			break;
 710		case 0x2:
 711			if (prot_type == TARGET_DIF_TYPE1_PROT)
 712				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
 713			break;
 714		case 0x3:
 715			cmd->prot_checks = 0;
 716			break;
 717		case 0x4:
 718			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
 719			break;
 720		default:
 721			pr_err("Unsupported protect field %d\n", protect);
 722			return -EINVAL;
 723		}
 724	}
 725
 726	return 0;
 727}
 728
 729static sense_reason_t
 730sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
 731	       u32 sectors, bool is_write)
 732{
 733	u8 protect = cdb[1] >> 5;
 734	int sp_ops = cmd->se_sess->sup_prot_ops;
 735	int pi_prot_type = dev->dev_attrib.pi_prot_type;
 736	bool fabric_prot = false;
 737
 738	if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
 739		if (unlikely(protect &&
 740		    !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) {
 741			pr_err("CDB contains protect bit, but device + fabric does"
 742			       " not advertise PROTECT=1 feature bit\n");
 743			return TCM_INVALID_CDB_FIELD;
 744		}
 745		if (cmd->prot_pto)
 746			return TCM_NO_SENSE;
 747	}
 748
 749	switch (dev->dev_attrib.pi_prot_type) {
 750	case TARGET_DIF_TYPE3_PROT:
 751		cmd->reftag_seed = 0xffffffff;
 752		break;
 753	case TARGET_DIF_TYPE2_PROT:
 754		if (protect)
 755			return TCM_INVALID_CDB_FIELD;
 756
 757		cmd->reftag_seed = cmd->t_task_lba;
 758		break;
 759	case TARGET_DIF_TYPE1_PROT:
 760		cmd->reftag_seed = cmd->t_task_lba;
 761		break;
 762	case TARGET_DIF_TYPE0_PROT:
 763		/*
 764		 * See if the fabric supports T10-PI, and the session has been
 765		 * configured to allow export PROTECT=1 feature bit with backend
 766		 * devices that don't support T10-PI.
 767		 */
 768		fabric_prot = is_write ?
 769			      !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) :
 770			      !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT));
 771
 772		if (fabric_prot && cmd->se_sess->sess_prot_type) {
 773			pi_prot_type = cmd->se_sess->sess_prot_type;
 774			break;
 775		}
 776		if (!protect)
 777			return TCM_NO_SENSE;
 778		/* Fallthrough */
 779	default:
 780		pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
 781		       "PROTECT: 0x%02x\n", cdb[0], protect);
 782		return TCM_INVALID_CDB_FIELD;
 783	}
 784
 785	if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd))
 786		return TCM_INVALID_CDB_FIELD;
 787
 788	cmd->prot_type = pi_prot_type;
 789	cmd->prot_length = dev->prot_length * sectors;
 790
 791	/**
 792	 * In case protection information exists over the wire
 793	 * we modify command data length to describe pure data.
 794	 * The actual transfer length is data length + protection
 795	 * length
 796	 **/
 797	if (protect)
 798		cmd->data_length = sectors * dev->dev_attrib.block_size;
 799
 800	pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
 801		 "prot_op=%d prot_checks=%d\n",
 802		 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
 803		 cmd->prot_op, cmd->prot_checks);
 804
 805	return TCM_NO_SENSE;
 806}
 807
 808static int
 809sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
 810{
 811	if (cdb[1] & 0x10) {
 812		/* see explanation in spc_emulate_modesense */
 813		if (!target_check_fua(dev)) {
 814			pr_err("Got CDB: 0x%02x with DPO bit set, but device"
 815			       " does not advertise support for DPO\n", cdb[0]);
 816			return -EINVAL;
 817		}
 818	}
 819	if (cdb[1] & 0x8) {
 820		if (!target_check_fua(dev)) {
 821			pr_err("Got CDB: 0x%02x with FUA bit set, but device"
 822			       " does not advertise support for FUA write\n",
 823			       cdb[0]);
 824			return -EINVAL;
 825		}
 826		cmd->se_cmd_flags |= SCF_FUA;
 827	}
 828	return 0;
 829}
 830
 831sense_reason_t
 832sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 833{
 834	struct se_device *dev = cmd->se_dev;
 835	unsigned char *cdb = cmd->t_task_cdb;
 836	unsigned int size;
 837	u32 sectors = 0;
 838	sense_reason_t ret;
 839
 840	cmd->protocol_data = ops;
 841
 842	switch (cdb[0]) {
 843	case READ_6:
 844		sectors = transport_get_sectors_6(cdb);
 845		cmd->t_task_lba = transport_lba_21(cdb);
 846		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 847		cmd->execute_cmd = sbc_execute_rw;
 848		break;
 849	case READ_10:
 850		sectors = transport_get_sectors_10(cdb);
 851		cmd->t_task_lba = transport_lba_32(cdb);
 852
 853		if (sbc_check_dpofua(dev, cmd, cdb))
 854			return TCM_INVALID_CDB_FIELD;
 855
 856		ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
 857		if (ret)
 858			return ret;
 859
 860		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 861		cmd->execute_cmd = sbc_execute_rw;
 862		break;
 863	case READ_12:
 864		sectors = transport_get_sectors_12(cdb);
 865		cmd->t_task_lba = transport_lba_32(cdb);
 866
 867		if (sbc_check_dpofua(dev, cmd, cdb))
 868			return TCM_INVALID_CDB_FIELD;
 869
 870		ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
 871		if (ret)
 872			return ret;
 873
 874		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 875		cmd->execute_cmd = sbc_execute_rw;
 876		break;
 877	case READ_16:
 878		sectors = transport_get_sectors_16(cdb);
 879		cmd->t_task_lba = transport_lba_64(cdb);
 880
 881		if (sbc_check_dpofua(dev, cmd, cdb))
 882			return TCM_INVALID_CDB_FIELD;
 883
 884		ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
 885		if (ret)
 886			return ret;
 887
 888		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 889		cmd->execute_cmd = sbc_execute_rw;
 890		break;
 891	case WRITE_6:
 892		sectors = transport_get_sectors_6(cdb);
 893		cmd->t_task_lba = transport_lba_21(cdb);
 894		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 895		cmd->execute_cmd = sbc_execute_rw;
 896		break;
 897	case WRITE_10:
 898	case WRITE_VERIFY:
 899		sectors = transport_get_sectors_10(cdb);
 900		cmd->t_task_lba = transport_lba_32(cdb);
 901
 902		if (sbc_check_dpofua(dev, cmd, cdb))
 903			return TCM_INVALID_CDB_FIELD;
 904
 905		ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
 906		if (ret)
 907			return ret;
 908
 909		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 910		cmd->execute_cmd = sbc_execute_rw;
 911		break;
 912	case WRITE_12:
 913		sectors = transport_get_sectors_12(cdb);
 914		cmd->t_task_lba = transport_lba_32(cdb);
 915
 916		if (sbc_check_dpofua(dev, cmd, cdb))
 917			return TCM_INVALID_CDB_FIELD;
 918
 919		ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
 920		if (ret)
 921			return ret;
 922
 923		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 924		cmd->execute_cmd = sbc_execute_rw;
 925		break;
 926	case WRITE_16:
 927		sectors = transport_get_sectors_16(cdb);
 928		cmd->t_task_lba = transport_lba_64(cdb);
 929
 930		if (sbc_check_dpofua(dev, cmd, cdb))
 931			return TCM_INVALID_CDB_FIELD;
 932
 933		ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
 934		if (ret)
 935			return ret;
 936
 937		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 938		cmd->execute_cmd = sbc_execute_rw;
 939		break;
 940	case XDWRITEREAD_10:
 941		if (cmd->data_direction != DMA_TO_DEVICE ||
 942		    !(cmd->se_cmd_flags & SCF_BIDI))
 943			return TCM_INVALID_CDB_FIELD;
 944		sectors = transport_get_sectors_10(cdb);
 945
 946		if (sbc_check_dpofua(dev, cmd, cdb))
 947			return TCM_INVALID_CDB_FIELD;
 948
 949		cmd->t_task_lba = transport_lba_32(cdb);
 950		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 951
 952		/*
 953		 * Setup BIDI XOR callback to be run after I/O completion.
 954		 */
 955		cmd->execute_cmd = sbc_execute_rw;
 956		cmd->transport_complete_callback = &xdreadwrite_callback;
 957		break;
 958	case VARIABLE_LENGTH_CMD:
 959	{
 960		u16 service_action = get_unaligned_be16(&cdb[8]);
 961		switch (service_action) {
 962		case XDWRITEREAD_32:
 963			sectors = transport_get_sectors_32(cdb);
 964
 965			if (sbc_check_dpofua(dev, cmd, cdb))
 966				return TCM_INVALID_CDB_FIELD;
 967			/*
 968			 * Use WRITE_32 and READ_32 opcodes for the emulated
 969			 * XDWRITE_READ_32 logic.
 970			 */
 971			cmd->t_task_lba = transport_lba_64_ext(cdb);
 972			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 973
 974			/*
 975			 * Setup BIDI XOR callback to be run during after I/O
 976			 * completion.
 977			 */
 978			cmd->execute_cmd = sbc_execute_rw;
 979			cmd->transport_complete_callback = &xdreadwrite_callback;
 980			break;
 981		case WRITE_SAME_32:
 982			sectors = transport_get_sectors_32(cdb);
 983			if (!sectors) {
 984				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
 985				       " supported\n");
 986				return TCM_INVALID_CDB_FIELD;
 987			}
 988
 989			size = sbc_get_size(cmd, 1);
 990			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
 991
 992			ret = sbc_setup_write_same(cmd, &cdb[10], ops);
 993			if (ret)
 994				return ret;
 995			break;
 996		default:
 997			pr_err("VARIABLE_LENGTH_CMD service action"
 998				" 0x%04x not supported\n", service_action);
 999			return TCM_UNSUPPORTED_SCSI_OPCODE;
1000		}
1001		break;
1002	}
1003	case COMPARE_AND_WRITE:
1004		sectors = cdb[13];
1005		/*
1006		 * Currently enforce COMPARE_AND_WRITE for a single sector
1007		 */
1008		if (sectors > 1) {
1009			pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
1010			       " than 1\n", sectors);
1011			return TCM_INVALID_CDB_FIELD;
1012		}
1013		if (sbc_check_dpofua(dev, cmd, cdb))
1014			return TCM_INVALID_CDB_FIELD;
1015
1016		/*
1017		 * Double size because we have two buffers, note that
1018		 * zero is not an error..
1019		 */
1020		size = 2 * sbc_get_size(cmd, sectors);
1021		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
1022		cmd->t_task_nolb = sectors;
1023		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
1024		cmd->execute_cmd = sbc_compare_and_write;
1025		cmd->transport_complete_callback = compare_and_write_callback;
1026		break;
1027	case READ_CAPACITY:
1028		size = READ_CAP_LEN;
1029		cmd->execute_cmd = sbc_emulate_readcapacity;
1030		break;
1031	case SERVICE_ACTION_IN_16:
1032		switch (cmd->t_task_cdb[1] & 0x1f) {
1033		case SAI_READ_CAPACITY_16:
1034			cmd->execute_cmd = sbc_emulate_readcapacity_16;
1035			break;
1036		case SAI_REPORT_REFERRALS:
1037			cmd->execute_cmd = target_emulate_report_referrals;
1038			break;
1039		default:
1040			pr_err("Unsupported SA: 0x%02x\n",
1041				cmd->t_task_cdb[1] & 0x1f);
1042			return TCM_INVALID_CDB_FIELD;
1043		}
1044		size = (cdb[10] << 24) | (cdb[11] << 16) |
1045		       (cdb[12] << 8) | cdb[13];
1046		break;
1047	case SYNCHRONIZE_CACHE:
1048	case SYNCHRONIZE_CACHE_16:
1049		if (cdb[0] == SYNCHRONIZE_CACHE) {
1050			sectors = transport_get_sectors_10(cdb);
1051			cmd->t_task_lba = transport_lba_32(cdb);
1052		} else {
1053			sectors = transport_get_sectors_16(cdb);
1054			cmd->t_task_lba = transport_lba_64(cdb);
1055		}
1056		if (ops->execute_sync_cache) {
1057			cmd->execute_cmd = ops->execute_sync_cache;
1058			goto check_lba;
1059		}
1060		size = 0;
1061		cmd->execute_cmd = sbc_emulate_noop;
1062		break;
1063	case UNMAP:
1064		if (!ops->execute_unmap)
1065			return TCM_UNSUPPORTED_SCSI_OPCODE;
1066
1067		if (!dev->dev_attrib.emulate_tpu) {
1068			pr_err("Got UNMAP, but backend device has"
1069			       " emulate_tpu disabled\n");
1070			return TCM_UNSUPPORTED_SCSI_OPCODE;
1071		}
1072		size = get_unaligned_be16(&cdb[7]);
1073		cmd->execute_cmd = sbc_execute_unmap;
1074		break;
1075	case WRITE_SAME_16:
1076		sectors = transport_get_sectors_16(cdb);
1077		if (!sectors) {
1078			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
1079			return TCM_INVALID_CDB_FIELD;
1080		}
1081
1082		size = sbc_get_size(cmd, 1);
1083		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
1084
1085		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
1086		if (ret)
1087			return ret;
1088		break;
1089	case WRITE_SAME:
1090		sectors = transport_get_sectors_10(cdb);
1091		if (!sectors) {
1092			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
1093			return TCM_INVALID_CDB_FIELD;
1094		}
1095
1096		size = sbc_get_size(cmd, 1);
1097		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
1098
1099		/*
1100		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
1101		 * of byte 1 bit 3 UNMAP instead of original reserved field
1102		 */
1103		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
1104		if (ret)
1105			return ret;
1106		break;
1107	case VERIFY:
1108	case VERIFY_16:
1109		size = 0;
1110		if (cdb[0] == VERIFY) {
1111			sectors = transport_get_sectors_10(cdb);
1112			cmd->t_task_lba = transport_lba_32(cdb);
1113		} else {
1114			sectors = transport_get_sectors_16(cdb);
1115			cmd->t_task_lba = transport_lba_64(cdb);
1116		}
1117		cmd->execute_cmd = sbc_emulate_noop;
1118		goto check_lba;
1119	case REZERO_UNIT:
1120	case SEEK_6:
1121	case SEEK_10:
1122		/*
1123		 * There are still clients out there which use these old SCSI-2
1124		 * commands. This mainly happens when running VMs with legacy
1125		 * guest systems, connected via SCSI command pass-through to
1126		 * iSCSI targets. Make them happy and return status GOOD.
1127		 */
1128		size = 0;
1129		cmd->execute_cmd = sbc_emulate_noop;
1130		break;
1131	case START_STOP:
1132		size = 0;
1133		cmd->execute_cmd = sbc_emulate_startstop;
1134		break;
1135	default:
1136		ret = spc_parse_cdb(cmd, &size);
1137		if (ret)
1138			return ret;
1139	}
1140
1141	/* reject any command that we don't have a handler for */
1142	if (!cmd->execute_cmd)
1143		return TCM_UNSUPPORTED_SCSI_OPCODE;
1144
1145	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1146		unsigned long long end_lba;
1147check_lba:
1148		end_lba = dev->transport->get_blocks(dev) + 1;
1149		if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
1150		    ((cmd->t_task_lba + sectors) > end_lba)) {
1151			pr_err("cmd exceeds last lba %llu "
1152				"(lba %llu, sectors %u)\n",
1153				end_lba, cmd->t_task_lba, sectors);
1154			return TCM_ADDRESS_OUT_OF_RANGE;
1155		}
1156
1157		if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
1158			size = sbc_get_size(cmd, sectors);
1159	}
1160
1161	return target_cmd_size_check(cmd, size);
1162}
1163EXPORT_SYMBOL(sbc_parse_cdb);
1164
1165u32 sbc_get_device_type(struct se_device *dev)
1166{
1167	return TYPE_DISK;
1168}
1169EXPORT_SYMBOL(sbc_get_device_type);
1170
1171static sense_reason_t
1172sbc_execute_unmap(struct se_cmd *cmd)
1173{
1174	struct sbc_ops *ops = cmd->protocol_data;
1175	struct se_device *dev = cmd->se_dev;
1176	unsigned char *buf, *ptr = NULL;
1177	sector_t lba;
1178	int size;
1179	u32 range;
1180	sense_reason_t ret = 0;
1181	int dl, bd_dl;
1182
1183	/* We never set ANC_SUP */
1184	if (cmd->t_task_cdb[1])
1185		return TCM_INVALID_CDB_FIELD;
1186
1187	if (cmd->data_length == 0) {
1188		target_complete_cmd(cmd, SAM_STAT_GOOD);
1189		return 0;
1190	}
1191
1192	if (cmd->data_length < 8) {
1193		pr_warn("UNMAP parameter list length %u too small\n",
1194			cmd->data_length);
1195		return TCM_PARAMETER_LIST_LENGTH_ERROR;
1196	}
1197
1198	buf = transport_kmap_data_sg(cmd);
1199	if (!buf)
1200		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1201
1202	dl = get_unaligned_be16(&buf[0]);
1203	bd_dl = get_unaligned_be16(&buf[2]);
1204
1205	size = cmd->data_length - 8;
1206	if (bd_dl > size)
1207		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
1208			cmd->data_length, bd_dl);
1209	else
1210		size = bd_dl;
1211
1212	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
1213		ret = TCM_INVALID_PARAMETER_LIST;
1214		goto err;
1215	}
1216
1217	/* First UNMAP block descriptor starts at 8 byte offset */
1218	ptr = &buf[8];
1219	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1220		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1221
1222	while (size >= 16) {
1223		lba = get_unaligned_be64(&ptr[0]);
1224		range = get_unaligned_be32(&ptr[8]);
1225		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1226				 (unsigned long long)lba, range);
1227
1228		if (range > dev->dev_attrib.max_unmap_lba_count) {
1229			ret = TCM_INVALID_PARAMETER_LIST;
1230			goto err;
1231		}
1232
1233		if (lba + range > dev->transport->get_blocks(dev) + 1) {
1234			ret = TCM_ADDRESS_OUT_OF_RANGE;
1235			goto err;
1236		}
1237
1238		ret = ops->execute_unmap(cmd, lba, range);
1239		if (ret)
1240			goto err;
1241
1242		ptr += 16;
1243		size -= 16;
1244	}
1245
1246err:
1247	transport_kunmap_data_sg(cmd);
1248	if (!ret)
1249		target_complete_cmd(cmd, GOOD);
1250	return ret;
1251}
1252
1253void
1254sbc_dif_generate(struct se_cmd *cmd)
1255{
1256	struct se_device *dev = cmd->se_dev;
1257	struct t10_pi_tuple *sdt;
1258	struct scatterlist *dsg = cmd->t_data_sg, *psg;
1259	sector_t sector = cmd->t_task_lba;
1260	void *daddr, *paddr;
1261	int i, j, offset = 0;
1262	unsigned int block_size = dev->dev_attrib.block_size;
1263
1264	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1265		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1266		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1267
1268		for (j = 0; j < psg->length;
1269				j += sizeof(*sdt)) {
1270			__u16 crc;
1271			unsigned int avail;
1272
1273			if (offset >= dsg->length) {
1274				offset -= dsg->length;
1275				kunmap_atomic(daddr - dsg->offset);
1276				dsg = sg_next(dsg);
1277				if (!dsg) {
1278					kunmap_atomic(paddr - psg->offset);
1279					return;
1280				}
1281				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1282			}
1283
1284			sdt = paddr + j;
1285			avail = min(block_size, dsg->length - offset);
1286			crc = crc_t10dif(daddr + offset, avail);
1287			if (avail < block_size) {
1288				kunmap_atomic(daddr - dsg->offset);
1289				dsg = sg_next(dsg);
1290				if (!dsg) {
1291					kunmap_atomic(paddr - psg->offset);
1292					return;
1293				}
1294				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1295				offset = block_size - avail;
1296				crc = crc_t10dif_update(crc, daddr, offset);
1297			} else {
1298				offset += block_size;
1299			}
1300
1301			sdt->guard_tag = cpu_to_be16(crc);
1302			if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
1303				sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1304			sdt->app_tag = 0;
1305
1306			pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
1307				 " app_tag: 0x%04x ref_tag: %u\n",
1308				 (cmd->data_direction == DMA_TO_DEVICE) ?
1309				 "WRITE" : "READ", (unsigned long long)sector,
1310				 sdt->guard_tag, sdt->app_tag,
1311				 be32_to_cpu(sdt->ref_tag));
1312
1313			sector++;
1314		}
1315
1316		kunmap_atomic(daddr - dsg->offset);
1317		kunmap_atomic(paddr - psg->offset);
1318	}
1319}
1320
1321static sense_reason_t
1322sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
1323		  __u16 crc, sector_t sector, unsigned int ei_lba)
1324{
1325	__be16 csum;
1326
1327	if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
1328		goto check_ref;
1329
1330	csum = cpu_to_be16(crc);
1331
1332	if (sdt->guard_tag != csum) {
1333		pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1334			" csum 0x%04x\n", (unsigned long long)sector,
1335			be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1336		return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1337	}
1338
1339check_ref:
1340	if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG))
1341		return 0;
1342
1343	if (cmd->prot_type == TARGET_DIF_TYPE1_PROT &&
1344	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1345		pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1346		       " sector MSB: 0x%08x\n", (unsigned long long)sector,
1347		       be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1348		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1349	}
1350
1351	if (cmd->prot_type == TARGET_DIF_TYPE2_PROT &&
1352	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
1353		pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1354		       " ei_lba: 0x%08x\n", (unsigned long long)sector,
1355			be32_to_cpu(sdt->ref_tag), ei_lba);
1356		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1357	}
1358
1359	return 0;
1360}
1361
1362void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1363		       struct scatterlist *sg, int sg_off)
1364{
1365	struct se_device *dev = cmd->se_dev;
1366	struct scatterlist *psg;
1367	void *paddr, *addr;
1368	unsigned int i, len, left;
1369	unsigned int offset = sg_off;
1370
1371	if (!sg)
1372		return;
1373
1374	left = sectors * dev->prot_length;
1375
1376	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1377		unsigned int psg_len, copied = 0;
1378
1379		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1380		psg_len = min(left, psg->length);
1381		while (psg_len) {
1382			len = min(psg_len, sg->length - offset);
1383			addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1384
1385			if (read)
1386				memcpy(paddr + copied, addr, len);
1387			else
1388				memcpy(addr, paddr + copied, len);
1389
1390			left -= len;
1391			offset += len;
1392			copied += len;
1393			psg_len -= len;
1394
1395			kunmap_atomic(addr - sg->offset - offset);
1396
1397			if (offset >= sg->length) {
1398				sg = sg_next(sg);
1399				offset = 0;
1400			}
1401		}
1402		kunmap_atomic(paddr - psg->offset);
1403	}
1404}
1405EXPORT_SYMBOL(sbc_dif_copy_prot);
1406
1407sense_reason_t
1408sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1409	       unsigned int ei_lba, struct scatterlist *psg, int psg_off)
1410{
1411	struct se_device *dev = cmd->se_dev;
1412	struct t10_pi_tuple *sdt;
1413	struct scatterlist *dsg = cmd->t_data_sg;
1414	sector_t sector = start;
1415	void *daddr, *paddr;
1416	int i;
1417	sense_reason_t rc;
1418	int dsg_off = 0;
1419	unsigned int block_size = dev->dev_attrib.block_size;
1420
1421	for (; psg && sector < start + sectors; psg = sg_next(psg)) {
1422		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1423		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1424
1425		for (i = psg_off; i < psg->length &&
1426				sector < start + sectors;
1427				i += sizeof(*sdt)) {
1428			__u16 crc;
1429			unsigned int avail;
1430
1431			if (dsg_off >= dsg->length) {
1432				dsg_off -= dsg->length;
1433				kunmap_atomic(daddr - dsg->offset);
1434				dsg = sg_next(dsg);
1435				if (!dsg) {
1436					kunmap_atomic(paddr - psg->offset);
1437					return 0;
1438				}
1439				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1440			}
1441
1442			sdt = paddr + i;
1443
1444			pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1445				 " app_tag: 0x%04x ref_tag: %u\n",
1446				 (unsigned long long)sector, sdt->guard_tag,
1447				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1448
1449			if (sdt->app_tag == cpu_to_be16(0xffff)) {
1450				dsg_off += block_size;
1451				goto next;
1452			}
1453
1454			avail = min(block_size, dsg->length - dsg_off);
1455			crc = crc_t10dif(daddr + dsg_off, avail);
1456			if (avail < block_size) {
1457				kunmap_atomic(daddr - dsg->offset);
1458				dsg = sg_next(dsg);
1459				if (!dsg) {
1460					kunmap_atomic(paddr - psg->offset);
1461					return 0;
1462				}
1463				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1464				dsg_off = block_size - avail;
1465				crc = crc_t10dif_update(crc, daddr, dsg_off);
1466			} else {
1467				dsg_off += block_size;
1468			}
1469
1470			rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
1471			if (rc) {
1472				kunmap_atomic(daddr - dsg->offset);
1473				kunmap_atomic(paddr - psg->offset);
1474				cmd->bad_sector = sector;
1475				return rc;
1476			}
1477next:
1478			sector++;
1479			ei_lba++;
1480		}
1481
1482		psg_off = 0;
1483		kunmap_atomic(daddr - dsg->offset);
1484		kunmap_atomic(paddr - psg->offset);
1485	}
1486
1487	return 0;
1488}
1489EXPORT_SYMBOL(sbc_dif_verify);