Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.14.15.
   1/*
   2 * osd_initiator - Main body of the osd initiator library.
   3 *
   4 * Note: The file does not contain the advanced security functionality which
   5 * is only needed by the security_manager's initiators.
   6 *
   7 * Copyright (C) 2008 Panasas Inc.  All rights reserved.
   8 *
   9 * Authors:
  10 *   Boaz Harrosh <bharrosh@panasas.com>
  11 *   Benny Halevy <bhalevy@panasas.com>
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License version 2
  15 *
  16 * Redistribution and use in source and binary forms, with or without
  17 * modification, are permitted provided that the following conditions
  18 * are met:
  19 *
  20 *  1. Redistributions of source code must retain the above copyright
  21 *     notice, this list of conditions and the following disclaimer.
  22 *  2. Redistributions in binary form must reproduce the above copyright
  23 *     notice, this list of conditions and the following disclaimer in the
  24 *     documentation and/or other materials provided with the distribution.
  25 *  3. Neither the name of the Panasas company nor the names of its
  26 *     contributors may be used to endorse or promote products derived
  27 *     from this software without specific prior written permission.
  28 *
  29 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  30 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  31 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  32 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  37 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  38 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  39 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  40 */
  41
  42#include <linux/slab.h>
  43
  44#include <scsi/osd_initiator.h>
  45#include <scsi/osd_sec.h>
  46#include <scsi/osd_attributes.h>
  47#include <scsi/osd_sense.h>
  48
  49#include <scsi/scsi_device.h>
  50
  51#include "osd_debug.h"
  52
  53#ifndef __unused
  54#    define __unused			__attribute__((unused))
  55#endif
  56
  57enum { OSD_REQ_RETRIES = 1 };
  58
  59MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
  60MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
  61MODULE_LICENSE("GPL");
  62
  63static inline void build_test(void)
  64{
  65	/* structures were not packed */
  66	BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN);
  67	BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN);
  68	BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN);
  69}
  70
  71static const char *_osd_ver_desc(struct osd_request *or)
  72{
  73	return osd_req_is_ver1(or) ? "OSD1" : "OSD2";
  74}
  75
  76#define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
  77
  78static int _osd_get_print_system_info(struct osd_dev *od,
  79	void *caps, struct osd_dev_info *odi)
  80{
  81	struct osd_request *or;
  82	struct osd_attr get_attrs[] = {
  83		ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8),
  84		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16),
  85		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32),
  86		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4),
  87		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/),
  88		ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/),
  89		ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8),
  90		ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8),
  91		ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8),
  92		ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6),
  93		/* IBM-OSD-SIM Has a bug with this one put it last */
  94		ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20),
  95	};
  96	void *iter = NULL, *pFirst;
  97	int nelem = ARRAY_SIZE(get_attrs), a = 0;
  98	int ret;
  99
 100	or = osd_start_request(od, GFP_KERNEL);
 101	if (!or)
 102		return -ENOMEM;
 103
 104	/* get attrs */
 105	osd_req_get_attributes(or, &osd_root_object);
 106	osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs));
 107
 108	ret = osd_finalize_request(or, 0, caps, NULL);
 109	if (ret)
 110		goto out;
 111
 112	ret = osd_execute_request(or);
 113	if (ret) {
 114		OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret);
 115		goto out;
 116	}
 117
 118	osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter);
 119
 120	OSD_INFO("Detected %s device\n",
 121		_osd_ver_desc(or));
 122
 123	pFirst = get_attrs[a++].val_ptr;
 124	OSD_INFO("VENDOR_IDENTIFICATION  [%s]\n",
 125		(char *)pFirst);
 126
 127	pFirst = get_attrs[a++].val_ptr;
 128	OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
 129		(char *)pFirst);
 130
 131	pFirst = get_attrs[a++].val_ptr;
 132	OSD_INFO("PRODUCT_MODEL          [%s]\n",
 133		(char *)pFirst);
 134
 135	pFirst = get_attrs[a++].val_ptr;
 136	OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
 137		pFirst ? get_unaligned_be32(pFirst) : ~0U);
 138
 139	pFirst = get_attrs[a++].val_ptr;
 140	OSD_INFO("PRODUCT_SERIAL_NUMBER  [%s]\n",
 141		(char *)pFirst);
 142
 143	odi->osdname_len = get_attrs[a].len;
 144	/* Avoid NULL for memcmp optimization 0-length is good enough */
 145	odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
 146	if (odi->osdname_len)
 147		memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
 148	OSD_INFO("OSD_NAME               [%s]\n", odi->osdname);
 149	a++;
 150
 151	pFirst = get_attrs[a++].val_ptr;
 152	OSD_INFO("TOTAL_CAPACITY         [0x%llx]\n",
 153		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
 154
 155	pFirst = get_attrs[a++].val_ptr;
 156	OSD_INFO("USED_CAPACITY          [0x%llx]\n",
 157		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
 158
 159	pFirst = get_attrs[a++].val_ptr;
 160	OSD_INFO("NUMBER_OF_PARTITIONS   [%llu]\n",
 161		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
 162
 163	if (a >= nelem)
 164		goto out;
 165
 166	/* FIXME: Where are the time utilities */
 167	pFirst = get_attrs[a++].val_ptr;
 168	OSD_INFO("CLOCK                  [0x%02x%02x%02x%02x%02x%02x]\n",
 169		((char *)pFirst)[0], ((char *)pFirst)[1],
 170		((char *)pFirst)[2], ((char *)pFirst)[3],
 171		((char *)pFirst)[4], ((char *)pFirst)[5]);
 172
 173	if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */
 174		unsigned len = get_attrs[a].len;
 175		char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */
 176
 177		hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
 178				   sid_dump, sizeof(sid_dump), true);
 179		OSD_INFO("OSD_SYSTEM_ID(%d)\n"
 180			 "        [%s]\n", len, sid_dump);
 181
 182		if (unlikely(len > sizeof(odi->systemid))) {
 183			OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
 184				"device idetification might not work\n", len);
 185			len = sizeof(odi->systemid);
 186		}
 187		odi->systemid_len = len;
 188		memcpy(odi->systemid, get_attrs[a].val_ptr, len);
 189		a++;
 190	}
 191out:
 192	osd_end_request(or);
 193	return ret;
 194}
 195
 196int osd_auto_detect_ver(struct osd_dev *od,
 197	void *caps, struct osd_dev_info *odi)
 198{
 199	int ret;
 200
 201	/* Auto-detect the osd version */
 202	ret = _osd_get_print_system_info(od, caps, odi);
 203	if (ret) {
 204		osd_dev_set_ver(od, OSD_VER1);
 205		OSD_DEBUG("converting to OSD1\n");
 206		ret = _osd_get_print_system_info(od, caps, odi);
 207	}
 208
 209	return ret;
 210}
 211EXPORT_SYMBOL(osd_auto_detect_ver);
 212
 213static unsigned _osd_req_cdb_len(struct osd_request *or)
 214{
 215	return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN;
 216}
 217
 218static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len)
 219{
 220	return osd_req_is_ver1(or) ?
 221		osdv1_attr_list_elem_size(len) :
 222		osdv2_attr_list_elem_size(len);
 223}
 224
 225static void _osd_req_alist_elem_encode(struct osd_request *or,
 226	void *attr_last, const struct osd_attr *oa)
 227{
 228	if (osd_req_is_ver1(or)) {
 229		struct osdv1_attributes_list_element *attr = attr_last;
 230
 231		attr->attr_page = cpu_to_be32(oa->attr_page);
 232		attr->attr_id = cpu_to_be32(oa->attr_id);
 233		attr->attr_bytes = cpu_to_be16(oa->len);
 234		memcpy(attr->attr_val, oa->val_ptr, oa->len);
 235	} else {
 236		struct osdv2_attributes_list_element *attr = attr_last;
 237
 238		attr->attr_page = cpu_to_be32(oa->attr_page);
 239		attr->attr_id = cpu_to_be32(oa->attr_id);
 240		attr->attr_bytes = cpu_to_be16(oa->len);
 241		memcpy(attr->attr_val, oa->val_ptr, oa->len);
 242	}
 243}
 244
 245static int _osd_req_alist_elem_decode(struct osd_request *or,
 246	void *cur_p, struct osd_attr *oa, unsigned max_bytes)
 247{
 248	unsigned inc;
 249	if (osd_req_is_ver1(or)) {
 250		struct osdv1_attributes_list_element *attr = cur_p;
 251
 252		if (max_bytes < sizeof(*attr))
 253			return -1;
 254
 255		oa->len = be16_to_cpu(attr->attr_bytes);
 256		inc = _osd_req_alist_elem_size(or, oa->len);
 257		if (inc > max_bytes)
 258			return -1;
 259
 260		oa->attr_page = be32_to_cpu(attr->attr_page);
 261		oa->attr_id = be32_to_cpu(attr->attr_id);
 262
 263		/* OSD1: On empty attributes we return a pointer to 2 bytes
 264		 * of zeros. This keeps similar behaviour with OSD2.
 265		 * (See below)
 266		 */
 267		oa->val_ptr = likely(oa->len) ? attr->attr_val :
 268						(u8 *)&attr->attr_bytes;
 269	} else {
 270		struct osdv2_attributes_list_element *attr = cur_p;
 271
 272		if (max_bytes < sizeof(*attr))
 273			return -1;
 274
 275		oa->len = be16_to_cpu(attr->attr_bytes);
 276		inc = _osd_req_alist_elem_size(or, oa->len);
 277		if (inc > max_bytes)
 278			return -1;
 279
 280		oa->attr_page = be32_to_cpu(attr->attr_page);
 281		oa->attr_id = be32_to_cpu(attr->attr_id);
 282
 283		/* OSD2: For convenience, on empty attributes, we return 8 bytes
 284		 * of zeros here. This keeps the same behaviour with OSD2r04,
 285		 * and is nice with null terminating ASCII fields.
 286		 * oa->val_ptr == NULL marks the end-of-list, or error.
 287		 */
 288		oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved;
 289	}
 290	return inc;
 291}
 292
 293static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
 294{
 295	return osd_req_is_ver1(or) ?
 296		osdv1_list_size(list_head) :
 297		osdv2_list_size(list_head);
 298}
 299
 300static unsigned _osd_req_sizeof_alist_header(struct osd_request *or)
 301{
 302	return osd_req_is_ver1(or) ?
 303		sizeof(struct osdv1_attributes_list_header) :
 304		sizeof(struct osdv2_attributes_list_header);
 305}
 306
 307static void _osd_req_set_alist_type(struct osd_request *or,
 308	void *list, int list_type)
 309{
 310	if (osd_req_is_ver1(or)) {
 311		struct osdv1_attributes_list_header *attr_list = list;
 312
 313		memset(attr_list, 0, sizeof(*attr_list));
 314		attr_list->type = list_type;
 315	} else {
 316		struct osdv2_attributes_list_header *attr_list = list;
 317
 318		memset(attr_list, 0, sizeof(*attr_list));
 319		attr_list->type = list_type;
 320	}
 321}
 322
 323static bool _osd_req_is_alist_type(struct osd_request *or,
 324	void *list, int list_type)
 325{
 326	if (!list)
 327		return false;
 328
 329	if (osd_req_is_ver1(or)) {
 330		struct osdv1_attributes_list_header *attr_list = list;
 331
 332		return attr_list->type == list_type;
 333	} else {
 334		struct osdv2_attributes_list_header *attr_list = list;
 335
 336		return attr_list->type == list_type;
 337	}
 338}
 339
 340/* This is for List-objects not Attributes-Lists */
 341static void _osd_req_encode_olist(struct osd_request *or,
 342	struct osd_obj_id_list *list)
 343{
 344	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
 345
 346	if (osd_req_is_ver1(or)) {
 347		cdbh->v1.list_identifier = list->list_identifier;
 348		cdbh->v1.start_address = list->continuation_id;
 349	} else {
 350		cdbh->v2.list_identifier = list->list_identifier;
 351		cdbh->v2.start_address = list->continuation_id;
 352	}
 353}
 354
 355static osd_cdb_offset osd_req_encode_offset(struct osd_request *or,
 356	u64 offset, unsigned *padding)
 357{
 358	return __osd_encode_offset(offset, padding,
 359			osd_req_is_ver1(or) ?
 360				OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT,
 361			OSD_OFFSET_MAX_SHIFT);
 362}
 363
 364static struct osd_security_parameters *
 365_osd_req_sec_params(struct osd_request *or)
 366{
 367	struct osd_cdb *ocdb = &or->cdb;
 368
 369	if (osd_req_is_ver1(or))
 370		return (struct osd_security_parameters *)&ocdb->v1.sec_params;
 371	else
 372		return (struct osd_security_parameters *)&ocdb->v2.sec_params;
 373}
 374
 375void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
 376{
 377	memset(osdd, 0, sizeof(*osdd));
 378	osdd->scsi_device = scsi_device;
 379	osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT;
 380#ifdef OSD_VER1_SUPPORT
 381	osdd->version = OSD_VER2;
 382#endif
 383	/* TODO: Allocate pools for osd_request attributes ... */
 384}
 385EXPORT_SYMBOL(osd_dev_init);
 386
 387void osd_dev_fini(struct osd_dev *osdd)
 388{
 389	/* TODO: De-allocate pools */
 390
 391	osdd->scsi_device = NULL;
 392}
 393EXPORT_SYMBOL(osd_dev_fini);
 394
 395static struct osd_request *_osd_request_alloc(gfp_t gfp)
 396{
 397	struct osd_request *or;
 398
 399	/* TODO: Use mempool with one saved request */
 400	or = kzalloc(sizeof(*or), gfp);
 401	return or;
 402}
 403
 404static void _osd_request_free(struct osd_request *or)
 405{
 406	kfree(or);
 407}
 408
 409struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp)
 410{
 411	struct osd_request *or;
 412
 413	or = _osd_request_alloc(gfp);
 414	if (!or)
 415		return NULL;
 416
 417	or->osd_dev = dev;
 418	or->alloc_flags = gfp;
 419	or->timeout = dev->def_timeout;
 420	or->retries = OSD_REQ_RETRIES;
 421
 422	return or;
 423}
 424EXPORT_SYMBOL(osd_start_request);
 425
 426static void _osd_free_seg(struct osd_request *or __unused,
 427	struct _osd_req_data_segment *seg)
 428{
 429	if (!seg->buff || !seg->alloc_size)
 430		return;
 431
 432	kfree(seg->buff);
 433	seg->buff = NULL;
 434	seg->alloc_size = 0;
 435}
 436
 437static void _put_request(struct request *rq)
 438{
 439	/*
 440	 * If osd_finalize_request() was called but the request was not
 441	 * executed through the block layer, then we must release BIOs.
 442	 * TODO: Keep error code in or->async_error. Need to audit all
 443	 *       code paths.
 444	 */
 445	if (unlikely(rq->bio))
 446		blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
 447	else
 448		blk_put_request(rq);
 449}
 450
 451void osd_end_request(struct osd_request *or)
 452{
 453	struct request *rq = or->request;
 454
 455	if (rq) {
 456		if (rq->next_rq) {
 457			_put_request(rq->next_rq);
 458			rq->next_rq = NULL;
 459		}
 460
 461		_put_request(rq);
 462	}
 463
 464	_osd_free_seg(or, &or->get_attr);
 465	_osd_free_seg(or, &or->enc_get_attr);
 466	_osd_free_seg(or, &or->set_attr);
 467	_osd_free_seg(or, &or->cdb_cont);
 468
 469	_osd_request_free(or);
 470}
 471EXPORT_SYMBOL(osd_end_request);
 472
 473static void _set_error_resid(struct osd_request *or, struct request *req,
 474			     int error)
 475{
 476	or->async_error = error;
 477	or->req_errors = req->errors ? : error;
 478	or->sense_len = req->sense_len;
 479	if (or->out.req)
 480		or->out.residual = or->out.req->resid_len;
 481	if (or->in.req)
 482		or->in.residual = or->in.req->resid_len;
 483}
 484
 485int osd_execute_request(struct osd_request *or)
 486{
 487	int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
 488
 489	_set_error_resid(or, or->request, error);
 490	return error;
 491}
 492EXPORT_SYMBOL(osd_execute_request);
 493
 494static void osd_request_async_done(struct request *req, int error)
 495{
 496	struct osd_request *or = req->end_io_data;
 497
 498	_set_error_resid(or, req, error);
 499	if (req->next_rq) {
 500		__blk_put_request(req->q, req->next_rq);
 501		req->next_rq = NULL;
 502	}
 503
 504	__blk_put_request(req->q, req);
 505	or->request = NULL;
 506	or->in.req = NULL;
 507	or->out.req = NULL;
 508
 509	if (or->async_done)
 510		or->async_done(or, or->async_private);
 511	else
 512		osd_end_request(or);
 513}
 514
 515int osd_execute_request_async(struct osd_request *or,
 516	osd_req_done_fn *done, void *private)
 517{
 518	or->request->end_io_data = or;
 519	or->async_private = private;
 520	or->async_done = done;
 521
 522	blk_execute_rq_nowait(or->request->q, NULL, or->request, 0,
 523			      osd_request_async_done);
 524	return 0;
 525}
 526EXPORT_SYMBOL(osd_execute_request_async);
 527
 528u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
 529u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
 530
 531static int _osd_realloc_seg(struct osd_request *or,
 532	struct _osd_req_data_segment *seg, unsigned max_bytes)
 533{
 534	void *buff;
 535
 536	if (seg->alloc_size >= max_bytes)
 537		return 0;
 538
 539	buff = krealloc(seg->buff, max_bytes, or->alloc_flags);
 540	if (!buff) {
 541		OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
 542			seg->alloc_size);
 543		return -ENOMEM;
 544	}
 545
 546	memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size);
 547	seg->buff = buff;
 548	seg->alloc_size = max_bytes;
 549	return 0;
 550}
 551
 552static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
 553{
 554	OSD_DEBUG("total_bytes=%d\n", total_bytes);
 555	return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
 556}
 557
 558static int _alloc_set_attr_list(struct osd_request *or,
 559	const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
 560{
 561	unsigned total_bytes = add_bytes;
 562
 563	for (; nelem; --nelem, ++oa)
 564		total_bytes += _osd_req_alist_elem_size(or, oa->len);
 565
 566	OSD_DEBUG("total_bytes=%d\n", total_bytes);
 567	return _osd_realloc_seg(or, &or->set_attr, total_bytes);
 568}
 569
 570static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes)
 571{
 572	OSD_DEBUG("total_bytes=%d\n", max_bytes);
 573	return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes);
 574}
 575
 576static int _alloc_get_attr_list(struct osd_request *or)
 577{
 578	OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes);
 579	return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes);
 580}
 581
 582/*
 583 * Common to all OSD commands
 584 */
 585
 586static void _osdv1_req_encode_common(struct osd_request *or,
 587	__be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
 588{
 589	struct osdv1_cdb *ocdb = &or->cdb.v1;
 590
 591	/*
 592	 * For speed, the commands
 593	 *	OSD_ACT_PERFORM_SCSI_COMMAND	, V1 0x8F7E, V2 0x8F7C
 594	 *	OSD_ACT_SCSI_TASK_MANAGEMENT	, V1 0x8F7F, V2 0x8F7D
 595	 * are not supported here. Should pass zero and set after the call
 596	 */
 597	act &= cpu_to_be16(~0x0080); /* V1 action code */
 598
 599	OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act));
 600
 601	ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
 602	ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
 603	ocdb->h.varlen_cdb.service_action = act;
 604
 605	ocdb->h.partition = cpu_to_be64(obj->partition);
 606	ocdb->h.object = cpu_to_be64(obj->id);
 607	ocdb->h.v1.length = cpu_to_be64(len);
 608	ocdb->h.v1.start_address = cpu_to_be64(offset);
 609}
 610
 611static void _osdv2_req_encode_common(struct osd_request *or,
 612	 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
 613{
 614	struct osdv2_cdb *ocdb = &or->cdb.v2;
 615
 616	OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act));
 617
 618	ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
 619	ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
 620	ocdb->h.varlen_cdb.service_action = act;
 621
 622	ocdb->h.partition = cpu_to_be64(obj->partition);
 623	ocdb->h.object = cpu_to_be64(obj->id);
 624	ocdb->h.v2.length = cpu_to_be64(len);
 625	ocdb->h.v2.start_address = cpu_to_be64(offset);
 626}
 627
 628static void _osd_req_encode_common(struct osd_request *or,
 629	__be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
 630{
 631	if (osd_req_is_ver1(or))
 632		_osdv1_req_encode_common(or, act, obj, offset, len);
 633	else
 634		_osdv2_req_encode_common(or, act, obj, offset, len);
 635}
 636
 637/*
 638 * Device commands
 639 */
 640/*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */
 641/*TODO: void osd_req_set_master_key(struct osd_request *, ...); */
 642
 643void osd_req_format(struct osd_request *or, u64 tot_capacity)
 644{
 645	_osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0,
 646				tot_capacity);
 647}
 648EXPORT_SYMBOL(osd_req_format);
 649
 650int osd_req_list_dev_partitions(struct osd_request *or,
 651	osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem)
 652{
 653	return osd_req_list_partition_objects(or, 0, initial_id, list, nelem);
 654}
 655EXPORT_SYMBOL(osd_req_list_dev_partitions);
 656
 657static void _osd_req_encode_flush(struct osd_request *or,
 658	enum osd_options_flush_scope_values op)
 659{
 660	struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb);
 661
 662	ocdb->command_specific_options = op;
 663}
 664
 665void osd_req_flush_obsd(struct osd_request *or,
 666	enum osd_options_flush_scope_values op)
 667{
 668	_osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0);
 669	_osd_req_encode_flush(or, op);
 670}
 671EXPORT_SYMBOL(osd_req_flush_obsd);
 672
 673/*TODO: void osd_req_perform_scsi_command(struct osd_request *,
 674	const u8 *cdb, ...); */
 675/*TODO: void osd_req_task_management(struct osd_request *, ...); */
 676
 677/*
 678 * Partition commands
 679 */
 680static void _osd_req_encode_partition(struct osd_request *or,
 681	__be16 act, osd_id partition)
 682{
 683	struct osd_obj_id par = {
 684		.partition = partition,
 685		.id = 0,
 686	};
 687
 688	_osd_req_encode_common(or, act, &par, 0, 0);
 689}
 690
 691void osd_req_create_partition(struct osd_request *or, osd_id partition)
 692{
 693	_osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition);
 694}
 695EXPORT_SYMBOL(osd_req_create_partition);
 696
 697void osd_req_remove_partition(struct osd_request *or, osd_id partition)
 698{
 699	_osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition);
 700}
 701EXPORT_SYMBOL(osd_req_remove_partition);
 702
 703/*TODO: void osd_req_set_partition_key(struct osd_request *,
 704	osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
 705	u8 seed[OSD_CRYPTO_SEED_SIZE]); */
 706
 707static int _osd_req_list_objects(struct osd_request *or,
 708	__be16 action, const struct osd_obj_id *obj, osd_id initial_id,
 709	struct osd_obj_id_list *list, unsigned nelem)
 710{
 711	struct request_queue *q = osd_request_queue(or->osd_dev);
 712	u64 len = nelem * sizeof(osd_id) + sizeof(*list);
 713	struct bio *bio;
 714
 715	_osd_req_encode_common(or, action, obj, (u64)initial_id, len);
 716
 717	if (list->list_identifier)
 718		_osd_req_encode_olist(or, list);
 719
 720	WARN_ON(or->in.bio);
 721	bio = bio_map_kern(q, list, len, or->alloc_flags);
 722	if (IS_ERR(bio)) {
 723		OSD_ERR("!!! Failed to allocate list_objects BIO\n");
 724		return PTR_ERR(bio);
 725	}
 726
 727	bio->bi_rw &= ~REQ_WRITE;
 728	or->in.bio = bio;
 729	or->in.total_bytes = bio->bi_size;
 730	return 0;
 731}
 732
 733int osd_req_list_partition_collections(struct osd_request *or,
 734	osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
 735	unsigned nelem)
 736{
 737	struct osd_obj_id par = {
 738		.partition = partition,
 739		.id = 0,
 740	};
 741
 742	return osd_req_list_collection_objects(or, &par, initial_id, list,
 743					       nelem);
 744}
 745EXPORT_SYMBOL(osd_req_list_partition_collections);
 746
 747int osd_req_list_partition_objects(struct osd_request *or,
 748	osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
 749	unsigned nelem)
 750{
 751	struct osd_obj_id par = {
 752		.partition = partition,
 753		.id = 0,
 754	};
 755
 756	return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list,
 757				     nelem);
 758}
 759EXPORT_SYMBOL(osd_req_list_partition_objects);
 760
 761void osd_req_flush_partition(struct osd_request *or,
 762	osd_id partition, enum osd_options_flush_scope_values op)
 763{
 764	_osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition);
 765	_osd_req_encode_flush(or, op);
 766}
 767EXPORT_SYMBOL(osd_req_flush_partition);
 768
 769/*
 770 * Collection commands
 771 */
 772/*TODO: void osd_req_create_collection(struct osd_request *,
 773	const struct osd_obj_id *); */
 774/*TODO: void osd_req_remove_collection(struct osd_request *,
 775	const struct osd_obj_id *); */
 776
 777int osd_req_list_collection_objects(struct osd_request *or,
 778	const struct osd_obj_id *obj, osd_id initial_id,
 779	struct osd_obj_id_list *list, unsigned nelem)
 780{
 781	return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj,
 782				     initial_id, list, nelem);
 783}
 784EXPORT_SYMBOL(osd_req_list_collection_objects);
 785
 786/*TODO: void query(struct osd_request *, ...); V2 */
 787
 788void osd_req_flush_collection(struct osd_request *or,
 789	const struct osd_obj_id *obj, enum osd_options_flush_scope_values op)
 790{
 791	_osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0);
 792	_osd_req_encode_flush(or, op);
 793}
 794EXPORT_SYMBOL(osd_req_flush_collection);
 795
 796/*TODO: void get_member_attrs(struct osd_request *, ...); V2 */
 797/*TODO: void set_member_attrs(struct osd_request *, ...); V2 */
 798
 799/*
 800 * Object commands
 801 */
 802void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj)
 803{
 804	_osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0);
 805}
 806EXPORT_SYMBOL(osd_req_create_object);
 807
 808void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj)
 809{
 810	_osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0);
 811}
 812EXPORT_SYMBOL(osd_req_remove_object);
 813
 814
 815/*TODO: void osd_req_create_multi(struct osd_request *or,
 816	struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem);
 817*/
 818
 819void osd_req_write(struct osd_request *or,
 820	const struct osd_obj_id *obj, u64 offset,
 821	struct bio *bio, u64 len)
 822{
 823	_osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
 824	WARN_ON(or->out.bio || or->out.total_bytes);
 825	WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
 826	or->out.bio = bio;
 827	or->out.total_bytes = len;
 828}
 829EXPORT_SYMBOL(osd_req_write);
 830
 831int osd_req_write_kern(struct osd_request *or,
 832	const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
 833{
 834	struct request_queue *req_q = osd_request_queue(or->osd_dev);
 835	struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
 836
 837	if (IS_ERR(bio))
 838		return PTR_ERR(bio);
 839
 840	bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
 841	osd_req_write(or, obj, offset, bio, len);
 842	return 0;
 843}
 844EXPORT_SYMBOL(osd_req_write_kern);
 845
 846/*TODO: void osd_req_append(struct osd_request *,
 847	const struct osd_obj_id *, struct bio *data_out); */
 848/*TODO: void osd_req_create_write(struct osd_request *,
 849	const struct osd_obj_id *, struct bio *data_out, u64 offset); */
 850/*TODO: void osd_req_clear(struct osd_request *,
 851	const struct osd_obj_id *, u64 offset, u64 len); */
 852/*TODO: void osd_req_punch(struct osd_request *,
 853	const struct osd_obj_id *, u64 offset, u64 len); V2 */
 854
 855void osd_req_flush_object(struct osd_request *or,
 856	const struct osd_obj_id *obj, enum osd_options_flush_scope_values op,
 857	/*V2*/ u64 offset, /*V2*/ u64 len)
 858{
 859	if (unlikely(osd_req_is_ver1(or) && (offset || len))) {
 860		OSD_DEBUG("OSD Ver1 flush on specific range ignored\n");
 861		offset = 0;
 862		len = 0;
 863	}
 864
 865	_osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len);
 866	_osd_req_encode_flush(or, op);
 867}
 868EXPORT_SYMBOL(osd_req_flush_object);
 869
 870void osd_req_read(struct osd_request *or,
 871	const struct osd_obj_id *obj, u64 offset,
 872	struct bio *bio, u64 len)
 873{
 874	_osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
 875	WARN_ON(or->in.bio || or->in.total_bytes);
 876	WARN_ON(bio->bi_rw & REQ_WRITE);
 877	or->in.bio = bio;
 878	or->in.total_bytes = len;
 879}
 880EXPORT_SYMBOL(osd_req_read);
 881
 882int osd_req_read_kern(struct osd_request *or,
 883	const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
 884{
 885	struct request_queue *req_q = osd_request_queue(or->osd_dev);
 886	struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
 887
 888	if (IS_ERR(bio))
 889		return PTR_ERR(bio);
 890
 891	osd_req_read(or, obj, offset, bio, len);
 892	return 0;
 893}
 894EXPORT_SYMBOL(osd_req_read_kern);
 895
 896static int _add_sg_continuation_descriptor(struct osd_request *or,
 897	const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
 898{
 899	struct osd_sg_continuation_descriptor *oscd;
 900	u32 oscd_size;
 901	unsigned i;
 902	int ret;
 903
 904	oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
 905
 906	if (!or->cdb_cont.total_bytes) {
 907		/* First time, jump over the header, we will write to:
 908		 *	cdb_cont.buff + cdb_cont.total_bytes
 909		 */
 910		or->cdb_cont.total_bytes =
 911				sizeof(struct osd_continuation_segment_header);
 912	}
 913
 914	ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
 915	if (unlikely(ret))
 916		return ret;
 917
 918	oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
 919	oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
 920	oscd->hdr.pad_length = 0;
 921	oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
 922
 923	*len = 0;
 924	/* copy the sg entries and convert to network byte order */
 925	for (i = 0; i < numentries; i++) {
 926		oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
 927		oscd->entries[i].len    = cpu_to_be64(sglist[i].len);
 928		*len += sglist[i].len;
 929	}
 930
 931	or->cdb_cont.total_bytes += oscd_size;
 932	OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
 933		  or->cdb_cont.total_bytes, oscd_size, numentries);
 934	return 0;
 935}
 936
 937static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
 938{
 939	struct request_queue *req_q = osd_request_queue(or->osd_dev);
 940	struct bio *bio;
 941	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
 942	struct osd_continuation_segment_header *cont_seg_hdr;
 943
 944	if (!or->cdb_cont.total_bytes)
 945		return 0;
 946
 947	cont_seg_hdr = or->cdb_cont.buff;
 948	cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
 949	cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
 950
 951	/* create a bio for continuation segment */
 952	bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
 953			   GFP_KERNEL);
 954	if (IS_ERR(bio))
 955		return PTR_ERR(bio);
 956
 957	bio->bi_rw |= REQ_WRITE;
 958
 959	/* integrity check the continuation before the bio is linked
 960	 * with the other data segments since the continuation
 961	 * integrity is separate from the other data segments.
 962	 */
 963	osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
 964
 965	cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
 966
 967	/* we can't use _req_append_segment, because we need to link in the
 968	 * continuation bio to the head of the bio list - the
 969	 * continuation segment (if it exists) is always the first segment in
 970	 * the out data buffer.
 971	 */
 972	bio->bi_next = or->out.bio;
 973	or->out.bio = bio;
 974	or->out.total_bytes += or->cdb_cont.total_bytes;
 975
 976	return 0;
 977}
 978
 979/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
 980 * @sglist that has the scatter gather entries. Scatter-gather enables a write
 981 * of multiple none-contiguous areas of an object, in a single call. The extents
 982 * may overlap and/or be in any order. The only constrain is that:
 983 *	total_bytes(sglist) >= total_bytes(bio)
 984 */
 985int osd_req_write_sg(struct osd_request *or,
 986	const struct osd_obj_id *obj, struct bio *bio,
 987	const struct osd_sg_entry *sglist, unsigned numentries)
 988{
 989	u64 len;
 990	int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
 991
 992	if (ret)
 993		return ret;
 994	osd_req_write(or, obj, 0, bio, len);
 995
 996	return 0;
 997}
 998EXPORT_SYMBOL(osd_req_write_sg);
 999
1000/* osd_req_read_sg: Read multiple extents of an object into @bio
1001 * See osd_req_write_sg
1002 */
1003int osd_req_read_sg(struct osd_request *or,
1004	const struct osd_obj_id *obj, struct bio *bio,
1005	const struct osd_sg_entry *sglist, unsigned numentries)
1006{
1007	u64 len;
1008	u64 off;
1009	int ret;
1010
1011	if (numentries > 1) {
1012		off = 0;
1013		ret = _add_sg_continuation_descriptor(or, sglist, numentries,
1014						      &len);
1015		if (ret)
1016			return ret;
1017	} else {
1018		/* Optimize the case of single segment, read_sg is a
1019		 * bidi operation.
1020		 */
1021		len = sglist->len;
1022		off = sglist->offset;
1023	}
1024	osd_req_read(or, obj, off, bio, len);
1025
1026	return 0;
1027}
1028EXPORT_SYMBOL(osd_req_read_sg);
1029
1030/* SG-list write/read Kern API
1031 *
1032 * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
1033 * of sg_entries. @numentries indicates how many pointers and sg_entries there
1034 * are.  By requiring an array of buff pointers. This allows a caller to do a
1035 * single write/read and scatter into multiple buffers.
1036 * NOTE: Each buffer + len should not cross a page boundary.
1037 */
1038static struct bio *_create_sg_bios(struct osd_request *or,
1039	void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
1040{
1041	struct request_queue *q = osd_request_queue(or->osd_dev);
1042	struct bio *bio;
1043	unsigned i;
1044
1045	bio = bio_kmalloc(GFP_KERNEL, numentries);
1046	if (unlikely(!bio)) {
1047		OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries);
1048		return ERR_PTR(-ENOMEM);
1049	}
1050
1051	for (i = 0; i < numentries; i++) {
1052		unsigned offset = offset_in_page(buff[i]);
1053		struct page *page = virt_to_page(buff[i]);
1054		unsigned len = sglist[i].len;
1055		unsigned added_len;
1056
1057		BUG_ON(offset + len > PAGE_SIZE);
1058		added_len = bio_add_pc_page(q, bio, page, len, offset);
1059		if (unlikely(len != added_len)) {
1060			OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
1061				  len, added_len);
1062			bio_put(bio);
1063			return ERR_PTR(-ENOMEM);
1064		}
1065	}
1066
1067	return bio;
1068}
1069
1070int osd_req_write_sg_kern(struct osd_request *or,
1071	const struct osd_obj_id *obj, void **buff,
1072	const struct osd_sg_entry *sglist, unsigned numentries)
1073{
1074	struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1075	if (IS_ERR(bio))
1076		return PTR_ERR(bio);
1077
1078	bio->bi_rw |= REQ_WRITE;
1079	osd_req_write_sg(or, obj, bio, sglist, numentries);
1080
1081	return 0;
1082}
1083EXPORT_SYMBOL(osd_req_write_sg_kern);
1084
1085int osd_req_read_sg_kern(struct osd_request *or,
1086	const struct osd_obj_id *obj, void **buff,
1087	const struct osd_sg_entry *sglist, unsigned numentries)
1088{
1089	struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1090	if (IS_ERR(bio))
1091		return PTR_ERR(bio);
1092
1093	osd_req_read_sg(or, obj, bio, sglist, numentries);
1094
1095	return 0;
1096}
1097EXPORT_SYMBOL(osd_req_read_sg_kern);
1098
1099
1100
1101void osd_req_get_attributes(struct osd_request *or,
1102	const struct osd_obj_id *obj)
1103{
1104	_osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0);
1105}
1106EXPORT_SYMBOL(osd_req_get_attributes);
1107
1108void osd_req_set_attributes(struct osd_request *or,
1109	const struct osd_obj_id *obj)
1110{
1111	_osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0);
1112}
1113EXPORT_SYMBOL(osd_req_set_attributes);
1114
1115/*
1116 * Attributes List-mode
1117 */
1118
1119int osd_req_add_set_attr_list(struct osd_request *or,
1120	const struct osd_attr *oa, unsigned nelem)
1121{
1122	unsigned total_bytes = or->set_attr.total_bytes;
1123	void *attr_last;
1124	int ret;
1125
1126	if (or->attributes_mode &&
1127	    or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
1128		WARN_ON(1);
1129		return -EINVAL;
1130	}
1131	or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1132
1133	if (!total_bytes) { /* first-time: allocate and put list header */
1134		total_bytes = _osd_req_sizeof_alist_header(or);
1135		ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
1136		if (ret)
1137			return ret;
1138		_osd_req_set_alist_type(or, or->set_attr.buff,
1139					OSD_ATTR_LIST_SET_RETRIEVE);
1140	}
1141	attr_last = or->set_attr.buff + total_bytes;
1142
1143	for (; nelem; --nelem) {
1144		unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
1145
1146		total_bytes += elem_size;
1147		if (unlikely(or->set_attr.alloc_size < total_bytes)) {
1148			or->set_attr.total_bytes = total_bytes - elem_size;
1149			ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
1150			if (ret)
1151				return ret;
1152			attr_last =
1153				or->set_attr.buff + or->set_attr.total_bytes;
1154		}
1155
1156		_osd_req_alist_elem_encode(or, attr_last, oa);
1157
1158		attr_last += elem_size;
1159		++oa;
1160	}
1161
1162	or->set_attr.total_bytes = total_bytes;
1163	return 0;
1164}
1165EXPORT_SYMBOL(osd_req_add_set_attr_list);
1166
1167static int _req_append_segment(struct osd_request *or,
1168	unsigned padding, struct _osd_req_data_segment *seg,
1169	struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
1170{
1171	void *pad_buff;
1172	int ret;
1173
1174	if (padding) {
1175		/* check if we can just add it to last buffer */
1176		if (last_seg &&
1177		    (padding <= last_seg->alloc_size - last_seg->total_bytes))
1178			pad_buff = last_seg->buff + last_seg->total_bytes;
1179		else
1180			pad_buff = io->pad_buff;
1181
1182		ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
1183				       or->alloc_flags);
1184		if (ret)
1185			return ret;
1186		io->total_bytes += padding;
1187	}
1188
1189	ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
1190			       or->alloc_flags);
1191	if (ret)
1192		return ret;
1193
1194	io->total_bytes += seg->total_bytes;
1195	OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff,
1196		  seg->total_bytes);
1197	return 0;
1198}
1199
1200static int _osd_req_finalize_set_attr_list(struct osd_request *or)
1201{
1202	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1203	unsigned padding;
1204	int ret;
1205
1206	if (!or->set_attr.total_bytes) {
1207		cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED;
1208		return 0;
1209	}
1210
1211	cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes);
1212	cdbh->attrs_list.set_attr_offset =
1213		osd_req_encode_offset(or, or->out.total_bytes, &padding);
1214
1215	ret = _req_append_segment(or, padding, &or->set_attr,
1216				  or->out.last_seg, &or->out);
1217	if (ret)
1218		return ret;
1219
1220	or->out.last_seg = &or->set_attr;
1221	return 0;
1222}
1223
1224int osd_req_add_get_attr_list(struct osd_request *or,
1225	const struct osd_attr *oa, unsigned nelem)
1226{
1227	unsigned total_bytes = or->enc_get_attr.total_bytes;
1228	void *attr_last;
1229	int ret;
1230
1231	if (or->attributes_mode &&
1232	    or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
1233		WARN_ON(1);
1234		return -EINVAL;
1235	}
1236	or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1237
1238	/* first time calc data-in list header size */
1239	if (!or->get_attr.total_bytes)
1240		or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or);
1241
1242	/* calc data-out info */
1243	if (!total_bytes) { /* first-time: allocate and put list header */
1244		unsigned max_bytes;
1245
1246		total_bytes = _osd_req_sizeof_alist_header(or);
1247		max_bytes = total_bytes +
1248			nelem * sizeof(struct osd_attributes_list_attrid);
1249		ret = _alloc_get_attr_desc(or, max_bytes);
1250		if (ret)
1251			return ret;
1252
1253		_osd_req_set_alist_type(or, or->enc_get_attr.buff,
1254					OSD_ATTR_LIST_GET);
1255	}
1256	attr_last = or->enc_get_attr.buff + total_bytes;
1257
1258	for (; nelem; --nelem) {
1259		struct osd_attributes_list_attrid *attrid;
1260		const unsigned cur_size = sizeof(*attrid);
1261
1262		total_bytes += cur_size;
1263		if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) {
1264			or->enc_get_attr.total_bytes = total_bytes - cur_size;
1265			ret = _alloc_get_attr_desc(or,
1266					total_bytes + nelem * sizeof(*attrid));
1267			if (ret)
1268				return ret;
1269			attr_last = or->enc_get_attr.buff +
1270				or->enc_get_attr.total_bytes;
1271		}
1272
1273		attrid = attr_last;
1274		attrid->attr_page = cpu_to_be32(oa->attr_page);
1275		attrid->attr_id = cpu_to_be32(oa->attr_id);
1276
1277		attr_last += cur_size;
1278
1279		/* calc data-in size */
1280		or->get_attr.total_bytes +=
1281			_osd_req_alist_elem_size(or, oa->len);
1282		++oa;
1283	}
1284
1285	or->enc_get_attr.total_bytes = total_bytes;
1286
1287	OSD_DEBUG(
1288	       "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%Zu)\n",
1289	       or->get_attr.total_bytes,
1290	       or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or),
1291	       or->enc_get_attr.total_bytes,
1292	       (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or))
1293			/ sizeof(struct osd_attributes_list_attrid));
1294
1295	return 0;
1296}
1297EXPORT_SYMBOL(osd_req_add_get_attr_list);
1298
1299static int _osd_req_finalize_get_attr_list(struct osd_request *or)
1300{
1301	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1302	unsigned out_padding;
1303	unsigned in_padding;
1304	int ret;
1305
1306	if (!or->enc_get_attr.total_bytes) {
1307		cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED;
1308		cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED;
1309		return 0;
1310	}
1311
1312	ret = _alloc_get_attr_list(or);
1313	if (ret)
1314		return ret;
1315
1316	/* The out-going buffer info update */
1317	OSD_DEBUG("out-going\n");
1318	cdbh->attrs_list.get_attr_desc_bytes =
1319		cpu_to_be32(or->enc_get_attr.total_bytes);
1320
1321	cdbh->attrs_list.get_attr_desc_offset =
1322		osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1323
1324	ret = _req_append_segment(or, out_padding, &or->enc_get_attr,
1325				  or->out.last_seg, &or->out);
1326	if (ret)
1327		return ret;
1328	or->out.last_seg = &or->enc_get_attr;
1329
1330	/* The incoming buffer info update */
1331	OSD_DEBUG("in-coming\n");
1332	cdbh->attrs_list.get_attr_alloc_length =
1333		cpu_to_be32(or->get_attr.total_bytes);
1334
1335	cdbh->attrs_list.get_attr_offset =
1336		osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1337
1338	ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1339				  &or->in);
1340	if (ret)
1341		return ret;
1342	or->in.last_seg = &or->get_attr;
1343
1344	return 0;
1345}
1346
1347int osd_req_decode_get_attr_list(struct osd_request *or,
1348	struct osd_attr *oa, int *nelem, void **iterator)
1349{
1350	unsigned cur_bytes, returned_bytes;
1351	int n;
1352	const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or);
1353	void *cur_p;
1354
1355	if (!_osd_req_is_alist_type(or, or->get_attr.buff,
1356				    OSD_ATTR_LIST_SET_RETRIEVE)) {
1357		oa->attr_page = 0;
1358		oa->attr_id = 0;
1359		oa->val_ptr = NULL;
1360		oa->len = 0;
1361		*iterator = NULL;
1362		return 0;
1363	}
1364
1365	if (*iterator) {
1366		BUG_ON((*iterator < or->get_attr.buff) ||
1367		     (or->get_attr.buff + or->get_attr.alloc_size < *iterator));
1368		cur_p = *iterator;
1369		cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list;
1370		returned_bytes = or->get_attr.total_bytes;
1371	} else { /* first time decode the list header */
1372		cur_bytes = sizeof_attr_list;
1373		returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) +
1374					sizeof_attr_list;
1375
1376		cur_p = or->get_attr.buff + sizeof_attr_list;
1377
1378		if (returned_bytes > or->get_attr.alloc_size) {
1379			OSD_DEBUG("target report: space was not big enough! "
1380				  "Allocate=%u Needed=%u\n",
1381				  or->get_attr.alloc_size,
1382				  returned_bytes + sizeof_attr_list);
1383
1384			returned_bytes =
1385				or->get_attr.alloc_size - sizeof_attr_list;
1386		}
1387		or->get_attr.total_bytes = returned_bytes;
1388	}
1389
1390	for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
1391		int inc = _osd_req_alist_elem_decode(or, cur_p, oa,
1392						 returned_bytes - cur_bytes);
1393
1394		if (inc < 0) {
1395			OSD_ERR("BAD FOOD from target. list not valid!"
1396				"c=%d r=%d n=%d\n",
1397				cur_bytes, returned_bytes, n);
1398			oa->val_ptr = NULL;
1399			cur_bytes = returned_bytes; /* break the caller loop */
1400			break;
1401		}
1402
1403		cur_bytes += inc;
1404		cur_p += inc;
1405		++oa;
1406	}
1407
1408	*iterator = (returned_bytes - cur_bytes) ? cur_p : NULL;
1409	*nelem = n;
1410	return returned_bytes - cur_bytes;
1411}
1412EXPORT_SYMBOL(osd_req_decode_get_attr_list);
1413
1414/*
1415 * Attributes Page-mode
1416 */
1417
1418int osd_req_add_get_attr_page(struct osd_request *or,
1419	u32 page_id, void *attar_page, unsigned max_page_len,
1420	const struct osd_attr *set_one_attr)
1421{
1422	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1423
1424	if (or->attributes_mode &&
1425	    or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1426		WARN_ON(1);
1427		return -EINVAL;
1428	}
1429	or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE;
1430
1431	or->get_attr.buff = attar_page;
1432	or->get_attr.total_bytes = max_page_len;
1433
1434	cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
1435	cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
1436
1437	if (!set_one_attr || !set_one_attr->attr_page)
1438		return 0; /* The set is optional */
1439
1440	or->set_attr.buff = set_one_attr->val_ptr;
1441	or->set_attr.total_bytes = set_one_attr->len;
1442
1443	cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
1444	cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
1445	cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
1446	return 0;
1447}
1448EXPORT_SYMBOL(osd_req_add_get_attr_page);
1449
1450static int _osd_req_finalize_attr_page(struct osd_request *or)
1451{
1452	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1453	unsigned in_padding, out_padding;
1454	int ret;
1455
1456	/* returned page */
1457	cdbh->attrs_page.get_attr_offset =
1458		osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1459
1460	ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1461				  &or->in);
1462	if (ret)
1463		return ret;
1464
1465	if (or->set_attr.total_bytes == 0)
1466		return 0;
1467
1468	/* set one value */
1469	cdbh->attrs_page.set_attr_offset =
1470		osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1471
1472	ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
1473				  &or->out);
1474	return ret;
1475}
1476
1477static inline void osd_sec_parms_set_out_offset(bool is_v1,
1478	struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
1479{
1480	if (is_v1)
1481		sec_parms->v1.data_out_integrity_check_offset = offset;
1482	else
1483		sec_parms->v2.data_out_integrity_check_offset = offset;
1484}
1485
1486static inline void osd_sec_parms_set_in_offset(bool is_v1,
1487	struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
1488{
1489	if (is_v1)
1490		sec_parms->v1.data_in_integrity_check_offset = offset;
1491	else
1492		sec_parms->v2.data_in_integrity_check_offset = offset;
1493}
1494
1495static int _osd_req_finalize_data_integrity(struct osd_request *or,
1496	bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
1497	const u8 *cap_key)
1498{
1499	struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1500	int ret;
1501
1502	if (!osd_is_sec_alldata(sec_parms))
1503		return 0;
1504
1505	if (has_out) {
1506		struct _osd_req_data_segment seg = {
1507			.buff = &or->out_data_integ,
1508			.total_bytes = sizeof(or->out_data_integ),
1509		};
1510		unsigned pad;
1511
1512		or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
1513		or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1514			or->set_attr.total_bytes);
1515		or->out_data_integ.get_attributes_bytes = cpu_to_be64(
1516			or->enc_get_attr.total_bytes);
1517
1518		osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms,
1519			osd_req_encode_offset(or, or->out.total_bytes, &pad));
1520
1521		ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
1522					  &or->out);
1523		if (ret)
1524			return ret;
1525		or->out.last_seg = NULL;
1526
1527		/* they are now all chained to request sign them all together */
1528		osd_sec_sign_data(&or->out_data_integ, out_data_bio,
1529				  cap_key);
1530	}
1531
1532	if (has_in) {
1533		struct _osd_req_data_segment seg = {
1534			.buff = &or->in_data_integ,
1535			.total_bytes = sizeof(or->in_data_integ),
1536		};
1537		unsigned pad;
1538
1539		osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms,
1540			osd_req_encode_offset(or, or->in.total_bytes, &pad));
1541
1542		ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
1543					  &or->in);
1544		if (ret)
1545			return ret;
1546
1547		or->in.last_seg = NULL;
1548	}
1549
1550	return 0;
1551}
1552
1553/*
1554 * osd_finalize_request and helpers
1555 */
1556static struct request *_make_request(struct request_queue *q, bool has_write,
1557			      struct _osd_io_info *oii, gfp_t flags)
1558{
1559	if (oii->bio)
1560		return blk_make_request(q, oii->bio, flags);
1561	else {
1562		struct request *req;
1563
1564		req = blk_get_request(q, has_write ? WRITE : READ, flags);
1565		if (unlikely(!req))
1566			return ERR_PTR(-ENOMEM);
1567
1568		return req;
1569	}
1570}
1571
1572static int _init_blk_request(struct osd_request *or,
1573	bool has_in, bool has_out)
1574{
1575	gfp_t flags = or->alloc_flags;
1576	struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1577	struct request_queue *q = scsi_device->request_queue;
1578	struct request *req;
1579	int ret;
1580
1581	req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1582	if (IS_ERR(req)) {
1583		ret = PTR_ERR(req);
1584		goto out;
1585	}
1586
1587	or->request = req;
1588	req->cmd_type = REQ_TYPE_BLOCK_PC;
1589	req->cmd_flags |= REQ_QUIET;
1590
1591	req->timeout = or->timeout;
1592	req->retries = or->retries;
1593	req->sense = or->sense;
1594	req->sense_len = 0;
1595
1596	if (has_out) {
1597		or->out.req = req;
1598		if (has_in) {
1599			/* allocate bidi request */
1600			req = _make_request(q, false, &or->in, flags);
1601			if (IS_ERR(req)) {
1602				OSD_DEBUG("blk_get_request for bidi failed\n");
1603				ret = PTR_ERR(req);
1604				goto out;
1605			}
1606			req->cmd_type = REQ_TYPE_BLOCK_PC;
1607			or->in.req = or->request->next_rq = req;
1608		}
1609	} else if (has_in)
1610		or->in.req = req;
1611
1612	ret = 0;
1613out:
1614	OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n",
1615			or, has_in, has_out, ret, or->request);
1616	return ret;
1617}
1618
1619int osd_finalize_request(struct osd_request *or,
1620	u8 options, const void *cap, const u8 *cap_key)
1621{
1622	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1623	bool has_in, has_out;
1624	 /* Save for data_integrity without the cdb_continuation */
1625	struct bio *out_data_bio = or->out.bio;
1626	u64 out_data_bytes = or->out.total_bytes;
1627	int ret;
1628
1629	if (options & OSD_REQ_FUA)
1630		cdbh->options |= OSD_CDB_FUA;
1631
1632	if (options & OSD_REQ_DPO)
1633		cdbh->options |= OSD_CDB_DPO;
1634
1635	if (options & OSD_REQ_BYPASS_TIMESTAMPS)
1636		cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS;
1637
1638	osd_set_caps(&or->cdb, cap);
1639
1640	has_in = or->in.bio || or->get_attr.total_bytes;
1641	has_out = or->out.bio || or->cdb_cont.total_bytes ||
1642		or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
1643
1644	ret = _osd_req_finalize_cdb_cont(or, cap_key);
1645	if (ret) {
1646		OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
1647		return ret;
1648	}
1649	ret = _init_blk_request(or, has_in, has_out);
1650	if (ret) {
1651		OSD_DEBUG("_init_blk_request failed\n");
1652		return ret;
1653	}
1654
1655	or->out.pad_buff = sg_out_pad_buffer;
1656	or->in.pad_buff = sg_in_pad_buffer;
1657
1658	if (!or->attributes_mode)
1659		or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1660	cdbh->command_specific_options |= or->attributes_mode;
1661	if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1662		ret = _osd_req_finalize_attr_page(or);
1663		if (ret) {
1664			OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
1665			return ret;
1666		}
1667	} else {
1668		/* TODO: I think that for the GET_ATTR command these 2 should
1669		 * be reversed to keep them in execution order (for embeded
1670		 * targets with low memory footprint)
1671		 */
1672		ret = _osd_req_finalize_set_attr_list(or);
1673		if (ret) {
1674			OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n");
1675			return ret;
1676		}
1677
1678		ret = _osd_req_finalize_get_attr_list(or);
1679		if (ret) {
1680			OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n");
1681			return ret;
1682		}
1683	}
1684
1685	ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1686					       out_data_bio, out_data_bytes,
1687					       cap_key);
1688	if (ret)
1689		return ret;
1690
1691	osd_sec_sign_cdb(&or->cdb, cap_key);
1692
1693	or->request->cmd = or->cdb.buff;
1694	or->request->cmd_len = _osd_req_cdb_len(or);
1695
1696	return 0;
1697}
1698EXPORT_SYMBOL(osd_finalize_request);
1699
1700static bool _is_osd_security_code(int code)
1701{
1702	return	(code == osd_security_audit_value_frozen) ||
1703		(code == osd_security_working_key_frozen) ||
1704		(code == osd_nonce_not_unique) ||
1705		(code == osd_nonce_timestamp_out_of_range) ||
1706		(code == osd_invalid_dataout_buffer_integrity_check_value);
1707}
1708
1709#define OSD_SENSE_PRINT1(fmt, a...) \
1710	do { \
1711		if (__cur_sense_need_output) \
1712			OSD_ERR(fmt, ##a); \
1713	} while (0)
1714
1715#define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1("    " fmt, ##a)
1716
1717int osd_req_decode_sense_full(struct osd_request *or,
1718	struct osd_sense_info *osi, bool silent,
1719	struct osd_obj_id *bad_obj_list __unused, int max_obj __unused,
1720	struct osd_attr *bad_attr_list, int max_attr)
1721{
1722	int sense_len, original_sense_len;
1723	struct osd_sense_info local_osi;
1724	struct scsi_sense_descriptor_based *ssdb;
1725	void *cur_descriptor;
1726#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0)
1727	const bool __cur_sense_need_output = false;
1728#else
1729	bool __cur_sense_need_output = !silent;
1730#endif
1731	int ret;
1732
1733	if (likely(!or->req_errors))
1734		return 0;
1735
1736	osi = osi ? : &local_osi;
1737	memset(osi, 0, sizeof(*osi));
1738
1739	ssdb = (typeof(ssdb))or->sense;
1740	sense_len = or->sense_len;
1741	if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
1742		OSD_ERR("Block-layer returned error(0x%x) but "
1743			"sense_len(%u) || key(%d) is empty\n",
1744			or->req_errors, sense_len, ssdb->sense_key);
1745		goto analyze;
1746	}
1747
1748	if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
1749		OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
1750			ssdb->response_code, sense_len);
1751		goto analyze;
1752	}
1753
1754	osi->key = ssdb->sense_key;
1755	osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
1756	original_sense_len = ssdb->additional_sense_length + 8;
1757
1758#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1)
1759	if (__cur_sense_need_output)
1760		__cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
1761#endif
1762	OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
1763			"additional_code=0x%x async_error=%d errors=0x%x\n",
1764			osi->key, original_sense_len, sense_len,
1765			osi->additional_code, or->async_error,
1766			or->req_errors);
1767
1768	if (original_sense_len < sense_len)
1769		sense_len = original_sense_len;
1770
1771	cur_descriptor = ssdb->ssd;
1772	sense_len -= sizeof(*ssdb);
1773	while (sense_len > 0) {
1774		struct scsi_sense_descriptor *ssd = cur_descriptor;
1775		int cur_len = ssd->additional_length + 2;
1776
1777		sense_len -= cur_len;
1778
1779		if (sense_len < 0)
1780			break; /* sense was truncated */
1781
1782		switch (ssd->descriptor_type) {
1783		case scsi_sense_information:
1784		case scsi_sense_command_specific_information:
1785		{
1786			struct scsi_sense_command_specific_data_descriptor
1787				*sscd = cur_descriptor;
1788
1789			osi->command_info =
1790				get_unaligned_be64(&sscd->information) ;
1791			OSD_SENSE_PRINT2(
1792				"command_specific_information 0x%llx \n",
1793				_LLU(osi->command_info));
1794			break;
1795		}
1796		case scsi_sense_key_specific:
1797		{
1798			struct scsi_sense_key_specific_data_descriptor
1799				*ssks = cur_descriptor;
1800
1801			osi->sense_info = get_unaligned_be16(&ssks->value);
1802			OSD_SENSE_PRINT2(
1803				"sense_key_specific_information %u"
1804				"sksv_cd_bpv_bp (0x%x)\n",
1805				osi->sense_info, ssks->sksv_cd_bpv_bp);
1806			break;
1807		}
1808		case osd_sense_object_identification:
1809		{ /*FIXME: Keep first not last, Store in array*/
1810			struct osd_sense_identification_data_descriptor
1811				*osidd = cur_descriptor;
1812
1813			osi->not_initiated_command_functions =
1814				le32_to_cpu(osidd->not_initiated_functions);
1815			osi->completed_command_functions =
1816				le32_to_cpu(osidd->completed_functions);
1817			osi->obj.partition = be64_to_cpu(osidd->partition_id);
1818			osi->obj.id = be64_to_cpu(osidd->object_id);
1819			OSD_SENSE_PRINT2(
1820				"object_identification pid=0x%llx oid=0x%llx\n",
1821				_LLU(osi->obj.partition), _LLU(osi->obj.id));
1822			OSD_SENSE_PRINT2(
1823				"not_initiated_bits(%x) "
1824				"completed_command_bits(%x)\n",
1825				osi->not_initiated_command_functions,
1826				osi->completed_command_functions);
1827			break;
1828		}
1829		case osd_sense_response_integrity_check:
1830		{
1831			struct osd_sense_response_integrity_check_descriptor
1832				*osricd = cur_descriptor;
1833			const unsigned len =
1834					  sizeof(osricd->integrity_check_value);
1835			char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */
1836
1837			hex_dump_to_buffer(osricd->integrity_check_value, len,
1838				       32, 1, key_dump, sizeof(key_dump), true);
1839			OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump);
1840		}
1841		case osd_sense_attribute_identification:
1842		{
1843			struct osd_sense_attributes_data_descriptor
1844				*osadd = cur_descriptor;
1845			unsigned len = min(cur_len, sense_len);
1846			struct osd_sense_attr *pattr = osadd->sense_attrs;
1847
1848			while (len >= sizeof(*pattr)) {
1849				u32 attr_page = be32_to_cpu(pattr->attr_page);
1850				u32 attr_id = be32_to_cpu(pattr->attr_id);
1851
1852				if (!osi->attr.attr_page) {
1853					osi->attr.attr_page = attr_page;
1854					osi->attr.attr_id = attr_id;
1855				}
1856
1857				if (bad_attr_list && max_attr) {
1858					bad_attr_list->attr_page = attr_page;
1859					bad_attr_list->attr_id = attr_id;
1860					bad_attr_list++;
1861					max_attr--;
1862				}
1863
1864				len -= sizeof(*pattr);
1865				OSD_SENSE_PRINT2(
1866					"osd_sense_attribute_identification"
1867					"attr_page=0x%x attr_id=0x%x\n",
1868					attr_page, attr_id);
1869			}
1870		}
1871		/*These are not legal for OSD*/
1872		case scsi_sense_field_replaceable_unit:
1873			OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n");
1874			break;
1875		case scsi_sense_stream_commands:
1876			OSD_SENSE_PRINT2("scsi_sense_stream_commands\n");
1877			break;
1878		case scsi_sense_block_commands:
1879			OSD_SENSE_PRINT2("scsi_sense_block_commands\n");
1880			break;
1881		case scsi_sense_ata_return:
1882			OSD_SENSE_PRINT2("scsi_sense_ata_return\n");
1883			break;
1884		default:
1885			if (ssd->descriptor_type <= scsi_sense_Reserved_last)
1886				OSD_SENSE_PRINT2(
1887					"scsi_sense Reserved descriptor (0x%x)",
1888					ssd->descriptor_type);
1889			else
1890				OSD_SENSE_PRINT2(
1891					"scsi_sense Vendor descriptor (0x%x)",
1892					ssd->descriptor_type);
1893		}
1894
1895		cur_descriptor += cur_len;
1896	}
1897
1898analyze:
1899	if (!osi->key) {
1900		/* scsi sense is Empty, the request was never issued to target
1901		 * linux return code might tell us what happened.
1902		 */
1903		if (or->async_error == -ENOMEM)
1904			osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
1905		else
1906			osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
1907		ret = or->async_error;
1908	} else if (osi->key <= scsi_sk_recovered_error) {
1909		osi->osd_err_pri = 0;
1910		ret = 0;
1911	} else if (osi->additional_code == scsi_invalid_field_in_cdb) {
1912		if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
1913			osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
1914			ret = -EFAULT; /* caller should recover from this */
1915		} else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
1916			osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
1917			ret = -ENOENT;
1918		} else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
1919			osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
1920			ret = -EACCES;
1921		} else {
1922			osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1923			ret = -EINVAL;
1924		}
1925	} else if (osi->additional_code == osd_quota_error) {
1926		osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
1927		ret = -ENOSPC;
1928	} else if (_is_osd_security_code(osi->additional_code)) {
1929		osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1930		ret = -EINVAL;
1931	} else {
1932		osi->osd_err_pri = OSD_ERR_PRI_EIO;
1933		ret = -EIO;
1934	}
1935
1936	if (!or->out.residual)
1937		or->out.residual = or->out.total_bytes;
1938	if (!or->in.residual)
1939		or->in.residual = or->in.total_bytes;
1940
1941	return ret;
1942}
1943EXPORT_SYMBOL(osd_req_decode_sense_full);
1944
1945/*
1946 * Implementation of osd_sec.h API
1947 * TODO: Move to a separate osd_sec.c file at a later stage.
1948 */
1949
1950enum { OSD_SEC_CAP_V1_ALL_CAPS =
1951	OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE   |
1952	OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR |
1953	OSD_SEC_CAP_WRITE  | OSD_SEC_CAP_READ     | OSD_SEC_CAP_POL_SEC  |
1954	OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT
1955};
1956
1957enum { OSD_SEC_CAP_V2_ALL_CAPS =
1958	OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT
1959};
1960
1961void osd_sec_init_nosec_doall_caps(void *caps,
1962	const struct osd_obj_id *obj, bool is_collection, const bool is_v1)
1963{
1964	struct osd_capability *cap = caps;
1965	u8 type;
1966	u8 descriptor_type;
1967
1968	if (likely(obj->id)) {
1969		if (unlikely(is_collection)) {
1970			type = OSD_SEC_OBJ_COLLECTION;
1971			descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ :
1972						  OSD_SEC_OBJ_DESC_COL;
1973		} else {
1974			type = OSD_SEC_OBJ_USER;
1975			descriptor_type = OSD_SEC_OBJ_DESC_OBJ;
1976		}
1977		WARN_ON(!obj->partition);
1978	} else {
1979		type = obj->partition ? OSD_SEC_OBJ_PARTITION :
1980					OSD_SEC_OBJ_ROOT;
1981		descriptor_type = OSD_SEC_OBJ_DESC_PAR;
1982	}
1983
1984	memset(cap, 0, sizeof(*cap));
1985
1986	cap->h.format = OSD_SEC_CAP_FORMAT_VER1;
1987	cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */
1988	cap->h.security_method = OSD_SEC_NOSEC;
1989/*	cap->expiration_time;
1990	cap->AUDIT[30-10];
1991	cap->discriminator[42-30];
1992	cap->object_created_time; */
1993	cap->h.object_type = type;
1994	osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS);
1995	cap->h.object_descriptor_type = descriptor_type;
1996	cap->od.obj_desc.policy_access_tag = 0;
1997	cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition);
1998	cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id);
1999}
2000EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
2001
2002/* FIXME: Extract version from caps pointer.
2003 *        Also Pete's target only supports caps from OSDv1 for now
2004 */
2005void osd_set_caps(struct osd_cdb *cdb, const void *caps)
2006{
2007	bool is_ver1 = true;
2008	/* NOTE: They start at same address */
2009	memcpy(&cdb->v1.caps, caps, is_ver1 ? OSDv1_CAP_LEN : OSD_CAP_LEN);
2010}
2011
2012bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
2013{
2014	return false;
2015}
2016
2017void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused)
2018{
2019}
2020
2021void osd_sec_sign_data(void *data_integ __unused,
2022		       struct bio *bio __unused, const u8 *cap_key __unused)
2023{
2024}
2025
2026/*
2027 * Declared in osd_protocol.h
2028 * 4.12.5 Data-In and Data-Out buffer offsets
2029 * byte offset = mantissa * (2^(exponent+8))
2030 * Returns the smallest allowed encoded offset that contains given @offset
2031 * The actual encoded offset returned is @offset + *@padding.
2032 */
2033osd_cdb_offset __osd_encode_offset(
2034	u64 offset, unsigned *padding, int min_shift, int max_shift)
2035{
2036	u64 try_offset = -1, mod, align;
2037	osd_cdb_offset be32_offset;
2038	int shift;
2039
2040	*padding = 0;
2041	if (!offset)
2042		return 0;
2043
2044	for (shift = min_shift; shift < max_shift; ++shift) {
2045		try_offset = offset >> shift;
2046		if (try_offset < (1 << OSD_OFFSET_MAX_BITS))
2047			break;
2048	}
2049
2050	BUG_ON(shift == max_shift);
2051
2052	align = 1 << shift;
2053	mod = offset & (align - 1);
2054	if (mod) {
2055		*padding = align - mod;
2056		try_offset += 1;
2057	}
2058
2059	try_offset |= ((shift - 8) & 0xf) << 28;
2060	be32_offset = cpu_to_be32((u32)try_offset);
2061
2062	OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n",
2063		 _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift,
2064		 be32_offset, *padding);
2065	return be32_offset;
2066}