Linux Audio

Check our new training course

Loading...
   1/*
   2 * osd_initiator - Main body of the osd initiator library.
   3 *
   4 * Note: The file does not contain the advanced security functionality which
   5 * is only needed by the security_manager's initiators.
   6 *
   7 * Copyright (C) 2008 Panasas Inc.  All rights reserved.
   8 *
   9 * Authors:
  10 *   Boaz Harrosh <bharrosh@panasas.com>
  11 *   Benny Halevy <bhalevy@panasas.com>
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License version 2
  15 *
  16 * Redistribution and use in source and binary forms, with or without
  17 * modification, are permitted provided that the following conditions
  18 * are met:
  19 *
  20 *  1. Redistributions of source code must retain the above copyright
  21 *     notice, this list of conditions and the following disclaimer.
  22 *  2. Redistributions in binary form must reproduce the above copyright
  23 *     notice, this list of conditions and the following disclaimer in the
  24 *     documentation and/or other materials provided with the distribution.
  25 *  3. Neither the name of the Panasas company nor the names of its
  26 *     contributors may be used to endorse or promote products derived
  27 *     from this software without specific prior written permission.
  28 *
  29 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  30 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  31 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  32 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  37 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  38 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  39 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  40 */
  41
  42#include <linux/slab.h>
  43#include <linux/module.h>
  44
  45#include <scsi/osd_initiator.h>
  46#include <scsi/osd_sec.h>
  47#include <scsi/osd_attributes.h>
  48#include <scsi/osd_sense.h>
  49
  50#include <scsi/scsi_device.h>
  51
  52#include "osd_debug.h"
  53
  54#ifndef __unused
  55#    define __unused			__attribute__((unused))
  56#endif
  57
  58enum { OSD_REQ_RETRIES = 1 };
  59
  60MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
  61MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
  62MODULE_LICENSE("GPL");
  63
  64static inline void build_test(void)
  65{
  66	/* structures were not packed */
  67	BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN);
  68	BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN);
  69	BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN);
  70}
  71
  72static const char *_osd_ver_desc(struct osd_request *or)
  73{
  74	return osd_req_is_ver1(or) ? "OSD1" : "OSD2";
  75}
  76
  77#define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
  78
  79static int _osd_get_print_system_info(struct osd_dev *od,
  80	void *caps, struct osd_dev_info *odi)
  81{
  82	struct osd_request *or;
  83	struct osd_attr get_attrs[] = {
  84		ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8),
  85		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16),
  86		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32),
  87		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4),
  88		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/),
  89		ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/),
  90		ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8),
  91		ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8),
  92		ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8),
  93		ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6),
  94		/* IBM-OSD-SIM Has a bug with this one put it last */
  95		ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20),
  96	};
  97	void *iter = NULL, *pFirst;
  98	int nelem = ARRAY_SIZE(get_attrs), a = 0;
  99	int ret;
 100
 101	or = osd_start_request(od, GFP_KERNEL);
 102	if (!or)
 103		return -ENOMEM;
 104
 105	/* get attrs */
 106	osd_req_get_attributes(or, &osd_root_object);
 107	osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs));
 108
 109	ret = osd_finalize_request(or, 0, caps, NULL);
 110	if (ret)
 111		goto out;
 112
 113	ret = osd_execute_request(or);
 114	if (ret) {
 115		OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret);
 116		goto out;
 117	}
 118
 119	osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter);
 120
 121	OSD_INFO("Detected %s device\n",
 122		_osd_ver_desc(or));
 123
 124	pFirst = get_attrs[a++].val_ptr;
 125	OSD_INFO("VENDOR_IDENTIFICATION  [%s]\n",
 126		(char *)pFirst);
 127
 128	pFirst = get_attrs[a++].val_ptr;
 129	OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
 130		(char *)pFirst);
 131
 132	pFirst = get_attrs[a++].val_ptr;
 133	OSD_INFO("PRODUCT_MODEL          [%s]\n",
 134		(char *)pFirst);
 135
 136	pFirst = get_attrs[a++].val_ptr;
 137	OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
 138		pFirst ? get_unaligned_be32(pFirst) : ~0U);
 139
 140	pFirst = get_attrs[a++].val_ptr;
 141	OSD_INFO("PRODUCT_SERIAL_NUMBER  [%s]\n",
 142		(char *)pFirst);
 143
 144	odi->osdname_len = get_attrs[a].len;
 145	/* Avoid NULL for memcmp optimization 0-length is good enough */
 146	odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
 147	if (odi->osdname_len)
 148		memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
 149	OSD_INFO("OSD_NAME               [%s]\n", odi->osdname);
 150	a++;
 151
 152	pFirst = get_attrs[a++].val_ptr;
 153	OSD_INFO("TOTAL_CAPACITY         [0x%llx]\n",
 154		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
 155
 156	pFirst = get_attrs[a++].val_ptr;
 157	OSD_INFO("USED_CAPACITY          [0x%llx]\n",
 158		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
 159
 160	pFirst = get_attrs[a++].val_ptr;
 161	OSD_INFO("NUMBER_OF_PARTITIONS   [%llu]\n",
 162		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
 163
 164	if (a >= nelem)
 165		goto out;
 166
 167	/* FIXME: Where are the time utilities */
 168	pFirst = get_attrs[a++].val_ptr;
 169	OSD_INFO("CLOCK                  [0x%02x%02x%02x%02x%02x%02x]\n",
 170		((char *)pFirst)[0], ((char *)pFirst)[1],
 171		((char *)pFirst)[2], ((char *)pFirst)[3],
 172		((char *)pFirst)[4], ((char *)pFirst)[5]);
 173
 174	if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */
 175		unsigned len = get_attrs[a].len;
 176		char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */
 177
 178		hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
 179				   sid_dump, sizeof(sid_dump), true);
 180		OSD_INFO("OSD_SYSTEM_ID(%d)\n"
 181			 "        [%s]\n", len, sid_dump);
 182
 183		if (unlikely(len > sizeof(odi->systemid))) {
 184			OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
 185				"device idetification might not work\n", len);
 186			len = sizeof(odi->systemid);
 187		}
 188		odi->systemid_len = len;
 189		memcpy(odi->systemid, get_attrs[a].val_ptr, len);
 190		a++;
 191	}
 192out:
 193	osd_end_request(or);
 194	return ret;
 195}
 196
 197int osd_auto_detect_ver(struct osd_dev *od,
 198	void *caps, struct osd_dev_info *odi)
 199{
 200	int ret;
 201
 202	/* Auto-detect the osd version */
 203	ret = _osd_get_print_system_info(od, caps, odi);
 204	if (ret) {
 205		osd_dev_set_ver(od, OSD_VER1);
 206		OSD_DEBUG("converting to OSD1\n");
 207		ret = _osd_get_print_system_info(od, caps, odi);
 208	}
 209
 210	return ret;
 211}
 212EXPORT_SYMBOL(osd_auto_detect_ver);
 213
 214static unsigned _osd_req_cdb_len(struct osd_request *or)
 215{
 216	return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN;
 217}
 218
 219static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len)
 220{
 221	return osd_req_is_ver1(or) ?
 222		osdv1_attr_list_elem_size(len) :
 223		osdv2_attr_list_elem_size(len);
 224}
 225
 226static void _osd_req_alist_elem_encode(struct osd_request *or,
 227	void *attr_last, const struct osd_attr *oa)
 228{
 229	if (osd_req_is_ver1(or)) {
 230		struct osdv1_attributes_list_element *attr = attr_last;
 231
 232		attr->attr_page = cpu_to_be32(oa->attr_page);
 233		attr->attr_id = cpu_to_be32(oa->attr_id);
 234		attr->attr_bytes = cpu_to_be16(oa->len);
 235		memcpy(attr->attr_val, oa->val_ptr, oa->len);
 236	} else {
 237		struct osdv2_attributes_list_element *attr = attr_last;
 238
 239		attr->attr_page = cpu_to_be32(oa->attr_page);
 240		attr->attr_id = cpu_to_be32(oa->attr_id);
 241		attr->attr_bytes = cpu_to_be16(oa->len);
 242		memcpy(attr->attr_val, oa->val_ptr, oa->len);
 243	}
 244}
 245
 246static int _osd_req_alist_elem_decode(struct osd_request *or,
 247	void *cur_p, struct osd_attr *oa, unsigned max_bytes)
 248{
 249	unsigned inc;
 250	if (osd_req_is_ver1(or)) {
 251		struct osdv1_attributes_list_element *attr = cur_p;
 252
 253		if (max_bytes < sizeof(*attr))
 254			return -1;
 255
 256		oa->len = be16_to_cpu(attr->attr_bytes);
 257		inc = _osd_req_alist_elem_size(or, oa->len);
 258		if (inc > max_bytes)
 259			return -1;
 260
 261		oa->attr_page = be32_to_cpu(attr->attr_page);
 262		oa->attr_id = be32_to_cpu(attr->attr_id);
 263
 264		/* OSD1: On empty attributes we return a pointer to 2 bytes
 265		 * of zeros. This keeps similar behaviour with OSD2.
 266		 * (See below)
 267		 */
 268		oa->val_ptr = likely(oa->len) ? attr->attr_val :
 269						(u8 *)&attr->attr_bytes;
 270	} else {
 271		struct osdv2_attributes_list_element *attr = cur_p;
 272
 273		if (max_bytes < sizeof(*attr))
 274			return -1;
 275
 276		oa->len = be16_to_cpu(attr->attr_bytes);
 277		inc = _osd_req_alist_elem_size(or, oa->len);
 278		if (inc > max_bytes)
 279			return -1;
 280
 281		oa->attr_page = be32_to_cpu(attr->attr_page);
 282		oa->attr_id = be32_to_cpu(attr->attr_id);
 283
 284		/* OSD2: For convenience, on empty attributes, we return 8 bytes
 285		 * of zeros here. This keeps the same behaviour with OSD2r04,
 286		 * and is nice with null terminating ASCII fields.
 287		 * oa->val_ptr == NULL marks the end-of-list, or error.
 288		 */
 289		oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved;
 290	}
 291	return inc;
 292}
 293
 294static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
 295{
 296	return osd_req_is_ver1(or) ?
 297		osdv1_list_size(list_head) :
 298		osdv2_list_size(list_head);
 299}
 300
 301static unsigned _osd_req_sizeof_alist_header(struct osd_request *or)
 302{
 303	return osd_req_is_ver1(or) ?
 304		sizeof(struct osdv1_attributes_list_header) :
 305		sizeof(struct osdv2_attributes_list_header);
 306}
 307
 308static void _osd_req_set_alist_type(struct osd_request *or,
 309	void *list, int list_type)
 310{
 311	if (osd_req_is_ver1(or)) {
 312		struct osdv1_attributes_list_header *attr_list = list;
 313
 314		memset(attr_list, 0, sizeof(*attr_list));
 315		attr_list->type = list_type;
 316	} else {
 317		struct osdv2_attributes_list_header *attr_list = list;
 318
 319		memset(attr_list, 0, sizeof(*attr_list));
 320		attr_list->type = list_type;
 321	}
 322}
 323
 324static bool _osd_req_is_alist_type(struct osd_request *or,
 325	void *list, int list_type)
 326{
 327	if (!list)
 328		return false;
 329
 330	if (osd_req_is_ver1(or)) {
 331		struct osdv1_attributes_list_header *attr_list = list;
 332
 333		return attr_list->type == list_type;
 334	} else {
 335		struct osdv2_attributes_list_header *attr_list = list;
 336
 337		return attr_list->type == list_type;
 338	}
 339}
 340
 341/* This is for List-objects not Attributes-Lists */
 342static void _osd_req_encode_olist(struct osd_request *or,
 343	struct osd_obj_id_list *list)
 344{
 345	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
 346
 347	if (osd_req_is_ver1(or)) {
 348		cdbh->v1.list_identifier = list->list_identifier;
 349		cdbh->v1.start_address = list->continuation_id;
 350	} else {
 351		cdbh->v2.list_identifier = list->list_identifier;
 352		cdbh->v2.start_address = list->continuation_id;
 353	}
 354}
 355
 356static osd_cdb_offset osd_req_encode_offset(struct osd_request *or,
 357	u64 offset, unsigned *padding)
 358{
 359	return __osd_encode_offset(offset, padding,
 360			osd_req_is_ver1(or) ?
 361				OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT,
 362			OSD_OFFSET_MAX_SHIFT);
 363}
 364
 365static struct osd_security_parameters *
 366_osd_req_sec_params(struct osd_request *or)
 367{
 368	struct osd_cdb *ocdb = &or->cdb;
 369
 370	if (osd_req_is_ver1(or))
 371		return (struct osd_security_parameters *)&ocdb->v1.sec_params;
 372	else
 373		return (struct osd_security_parameters *)&ocdb->v2.sec_params;
 374}
 375
 376void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
 377{
 378	memset(osdd, 0, sizeof(*osdd));
 379	osdd->scsi_device = scsi_device;
 380	osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT;
 381#ifdef OSD_VER1_SUPPORT
 382	osdd->version = OSD_VER2;
 383#endif
 384	/* TODO: Allocate pools for osd_request attributes ... */
 385}
 386EXPORT_SYMBOL(osd_dev_init);
 387
 388void osd_dev_fini(struct osd_dev *osdd)
 389{
 390	/* TODO: De-allocate pools */
 391
 392	osdd->scsi_device = NULL;
 393}
 394EXPORT_SYMBOL(osd_dev_fini);
 395
 396static struct osd_request *_osd_request_alloc(gfp_t gfp)
 397{
 398	struct osd_request *or;
 399
 400	/* TODO: Use mempool with one saved request */
 401	or = kzalloc(sizeof(*or), gfp);
 402	return or;
 403}
 404
 405static void _osd_request_free(struct osd_request *or)
 406{
 407	kfree(or);
 408}
 409
 410struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp)
 411{
 412	struct osd_request *or;
 413
 414	or = _osd_request_alloc(gfp);
 415	if (!or)
 416		return NULL;
 417
 418	or->osd_dev = dev;
 419	or->alloc_flags = gfp;
 420	or->timeout = dev->def_timeout;
 421	or->retries = OSD_REQ_RETRIES;
 422
 423	return or;
 424}
 425EXPORT_SYMBOL(osd_start_request);
 426
 427static void _osd_free_seg(struct osd_request *or __unused,
 428	struct _osd_req_data_segment *seg)
 429{
 430	if (!seg->buff || !seg->alloc_size)
 431		return;
 432
 433	kfree(seg->buff);
 434	seg->buff = NULL;
 435	seg->alloc_size = 0;
 436}
 437
 438static void _put_request(struct request *rq)
 439{
 440	/*
 441	 * If osd_finalize_request() was called but the request was not
 442	 * executed through the block layer, then we must release BIOs.
 443	 * TODO: Keep error code in or->async_error. Need to audit all
 444	 *       code paths.
 445	 */
 446	if (unlikely(rq->bio))
 447		blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
 448	else
 449		blk_put_request(rq);
 450}
 451
 452void osd_end_request(struct osd_request *or)
 453{
 454	struct request *rq = or->request;
 455
 456	if (rq) {
 457		if (rq->next_rq) {
 458			_put_request(rq->next_rq);
 459			rq->next_rq = NULL;
 460		}
 461
 462		_put_request(rq);
 463	}
 464
 465	_osd_free_seg(or, &or->get_attr);
 466	_osd_free_seg(or, &or->enc_get_attr);
 467	_osd_free_seg(or, &or->set_attr);
 468	_osd_free_seg(or, &or->cdb_cont);
 469
 470	_osd_request_free(or);
 471}
 472EXPORT_SYMBOL(osd_end_request);
 473
 474static void _set_error_resid(struct osd_request *or, struct request *req,
 475			     int error)
 476{
 477	or->async_error = error;
 478	or->req_errors = req->errors ? : error;
 479	or->sense_len = req->sense_len;
 480	if (or->out.req)
 481		or->out.residual = or->out.req->resid_len;
 482	if (or->in.req)
 483		or->in.residual = or->in.req->resid_len;
 484}
 485
 486int osd_execute_request(struct osd_request *or)
 487{
 488	int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
 489
 490	_set_error_resid(or, or->request, error);
 491	return error;
 492}
 493EXPORT_SYMBOL(osd_execute_request);
 494
 495static void osd_request_async_done(struct request *req, int error)
 496{
 497	struct osd_request *or = req->end_io_data;
 498
 499	_set_error_resid(or, req, error);
 500	if (req->next_rq) {
 501		__blk_put_request(req->q, req->next_rq);
 502		req->next_rq = NULL;
 503	}
 504
 505	__blk_put_request(req->q, req);
 506	or->request = NULL;
 507	or->in.req = NULL;
 508	or->out.req = NULL;
 509
 510	if (or->async_done)
 511		or->async_done(or, or->async_private);
 512	else
 513		osd_end_request(or);
 514}
 515
 516int osd_execute_request_async(struct osd_request *or,
 517	osd_req_done_fn *done, void *private)
 518{
 519	or->request->end_io_data = or;
 520	or->async_private = private;
 521	or->async_done = done;
 522
 523	blk_execute_rq_nowait(or->request->q, NULL, or->request, 0,
 524			      osd_request_async_done);
 525	return 0;
 526}
 527EXPORT_SYMBOL(osd_execute_request_async);
 528
 529u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
 530u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
 531
 532static int _osd_realloc_seg(struct osd_request *or,
 533	struct _osd_req_data_segment *seg, unsigned max_bytes)
 534{
 535	void *buff;
 536
 537	if (seg->alloc_size >= max_bytes)
 538		return 0;
 539
 540	buff = krealloc(seg->buff, max_bytes, or->alloc_flags);
 541	if (!buff) {
 542		OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
 543			seg->alloc_size);
 544		return -ENOMEM;
 545	}
 546
 547	memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size);
 548	seg->buff = buff;
 549	seg->alloc_size = max_bytes;
 550	return 0;
 551}
 552
 553static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
 554{
 555	OSD_DEBUG("total_bytes=%d\n", total_bytes);
 556	return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
 557}
 558
 559static int _alloc_set_attr_list(struct osd_request *or,
 560	const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
 561{
 562	unsigned total_bytes = add_bytes;
 563
 564	for (; nelem; --nelem, ++oa)
 565		total_bytes += _osd_req_alist_elem_size(or, oa->len);
 566
 567	OSD_DEBUG("total_bytes=%d\n", total_bytes);
 568	return _osd_realloc_seg(or, &or->set_attr, total_bytes);
 569}
 570
 571static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes)
 572{
 573	OSD_DEBUG("total_bytes=%d\n", max_bytes);
 574	return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes);
 575}
 576
 577static int _alloc_get_attr_list(struct osd_request *or)
 578{
 579	OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes);
 580	return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes);
 581}
 582
 583/*
 584 * Common to all OSD commands
 585 */
 586
 587static void _osdv1_req_encode_common(struct osd_request *or,
 588	__be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
 589{
 590	struct osdv1_cdb *ocdb = &or->cdb.v1;
 591
 592	/*
 593	 * For speed, the commands
 594	 *	OSD_ACT_PERFORM_SCSI_COMMAND	, V1 0x8F7E, V2 0x8F7C
 595	 *	OSD_ACT_SCSI_TASK_MANAGEMENT	, V1 0x8F7F, V2 0x8F7D
 596	 * are not supported here. Should pass zero and set after the call
 597	 */
 598	act &= cpu_to_be16(~0x0080); /* V1 action code */
 599
 600	OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act));
 601
 602	ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
 603	ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
 604	ocdb->h.varlen_cdb.service_action = act;
 605
 606	ocdb->h.partition = cpu_to_be64(obj->partition);
 607	ocdb->h.object = cpu_to_be64(obj->id);
 608	ocdb->h.v1.length = cpu_to_be64(len);
 609	ocdb->h.v1.start_address = cpu_to_be64(offset);
 610}
 611
 612static void _osdv2_req_encode_common(struct osd_request *or,
 613	 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
 614{
 615	struct osdv2_cdb *ocdb = &or->cdb.v2;
 616
 617	OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act));
 618
 619	ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
 620	ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
 621	ocdb->h.varlen_cdb.service_action = act;
 622
 623	ocdb->h.partition = cpu_to_be64(obj->partition);
 624	ocdb->h.object = cpu_to_be64(obj->id);
 625	ocdb->h.v2.length = cpu_to_be64(len);
 626	ocdb->h.v2.start_address = cpu_to_be64(offset);
 627}
 628
 629static void _osd_req_encode_common(struct osd_request *or,
 630	__be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
 631{
 632	if (osd_req_is_ver1(or))
 633		_osdv1_req_encode_common(or, act, obj, offset, len);
 634	else
 635		_osdv2_req_encode_common(or, act, obj, offset, len);
 636}
 637
 638/*
 639 * Device commands
 640 */
 641/*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */
 642/*TODO: void osd_req_set_master_key(struct osd_request *, ...); */
 643
 644void osd_req_format(struct osd_request *or, u64 tot_capacity)
 645{
 646	_osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0,
 647				tot_capacity);
 648}
 649EXPORT_SYMBOL(osd_req_format);
 650
 651int osd_req_list_dev_partitions(struct osd_request *or,
 652	osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem)
 653{
 654	return osd_req_list_partition_objects(or, 0, initial_id, list, nelem);
 655}
 656EXPORT_SYMBOL(osd_req_list_dev_partitions);
 657
 658static void _osd_req_encode_flush(struct osd_request *or,
 659	enum osd_options_flush_scope_values op)
 660{
 661	struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb);
 662
 663	ocdb->command_specific_options = op;
 664}
 665
 666void osd_req_flush_obsd(struct osd_request *or,
 667	enum osd_options_flush_scope_values op)
 668{
 669	_osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0);
 670	_osd_req_encode_flush(or, op);
 671}
 672EXPORT_SYMBOL(osd_req_flush_obsd);
 673
 674/*TODO: void osd_req_perform_scsi_command(struct osd_request *,
 675	const u8 *cdb, ...); */
 676/*TODO: void osd_req_task_management(struct osd_request *, ...); */
 677
 678/*
 679 * Partition commands
 680 */
 681static void _osd_req_encode_partition(struct osd_request *or,
 682	__be16 act, osd_id partition)
 683{
 684	struct osd_obj_id par = {
 685		.partition = partition,
 686		.id = 0,
 687	};
 688
 689	_osd_req_encode_common(or, act, &par, 0, 0);
 690}
 691
 692void osd_req_create_partition(struct osd_request *or, osd_id partition)
 693{
 694	_osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition);
 695}
 696EXPORT_SYMBOL(osd_req_create_partition);
 697
 698void osd_req_remove_partition(struct osd_request *or, osd_id partition)
 699{
 700	_osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition);
 701}
 702EXPORT_SYMBOL(osd_req_remove_partition);
 703
 704/*TODO: void osd_req_set_partition_key(struct osd_request *,
 705	osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
 706	u8 seed[OSD_CRYPTO_SEED_SIZE]); */
 707
 708static int _osd_req_list_objects(struct osd_request *or,
 709	__be16 action, const struct osd_obj_id *obj, osd_id initial_id,
 710	struct osd_obj_id_list *list, unsigned nelem)
 711{
 712	struct request_queue *q = osd_request_queue(or->osd_dev);
 713	u64 len = nelem * sizeof(osd_id) + sizeof(*list);
 714	struct bio *bio;
 715
 716	_osd_req_encode_common(or, action, obj, (u64)initial_id, len);
 717
 718	if (list->list_identifier)
 719		_osd_req_encode_olist(or, list);
 720
 721	WARN_ON(or->in.bio);
 722	bio = bio_map_kern(q, list, len, or->alloc_flags);
 723	if (IS_ERR(bio)) {
 724		OSD_ERR("!!! Failed to allocate list_objects BIO\n");
 725		return PTR_ERR(bio);
 726	}
 727
 728	bio->bi_rw &= ~REQ_WRITE;
 729	or->in.bio = bio;
 730	or->in.total_bytes = bio->bi_size;
 731	return 0;
 732}
 733
 734int osd_req_list_partition_collections(struct osd_request *or,
 735	osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
 736	unsigned nelem)
 737{
 738	struct osd_obj_id par = {
 739		.partition = partition,
 740		.id = 0,
 741	};
 742
 743	return osd_req_list_collection_objects(or, &par, initial_id, list,
 744					       nelem);
 745}
 746EXPORT_SYMBOL(osd_req_list_partition_collections);
 747
 748int osd_req_list_partition_objects(struct osd_request *or,
 749	osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
 750	unsigned nelem)
 751{
 752	struct osd_obj_id par = {
 753		.partition = partition,
 754		.id = 0,
 755	};
 756
 757	return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list,
 758				     nelem);
 759}
 760EXPORT_SYMBOL(osd_req_list_partition_objects);
 761
 762void osd_req_flush_partition(struct osd_request *or,
 763	osd_id partition, enum osd_options_flush_scope_values op)
 764{
 765	_osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition);
 766	_osd_req_encode_flush(or, op);
 767}
 768EXPORT_SYMBOL(osd_req_flush_partition);
 769
 770/*
 771 * Collection commands
 772 */
 773/*TODO: void osd_req_create_collection(struct osd_request *,
 774	const struct osd_obj_id *); */
 775/*TODO: void osd_req_remove_collection(struct osd_request *,
 776	const struct osd_obj_id *); */
 777
 778int osd_req_list_collection_objects(struct osd_request *or,
 779	const struct osd_obj_id *obj, osd_id initial_id,
 780	struct osd_obj_id_list *list, unsigned nelem)
 781{
 782	return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj,
 783				     initial_id, list, nelem);
 784}
 785EXPORT_SYMBOL(osd_req_list_collection_objects);
 786
 787/*TODO: void query(struct osd_request *, ...); V2 */
 788
 789void osd_req_flush_collection(struct osd_request *or,
 790	const struct osd_obj_id *obj, enum osd_options_flush_scope_values op)
 791{
 792	_osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0);
 793	_osd_req_encode_flush(or, op);
 794}
 795EXPORT_SYMBOL(osd_req_flush_collection);
 796
 797/*TODO: void get_member_attrs(struct osd_request *, ...); V2 */
 798/*TODO: void set_member_attrs(struct osd_request *, ...); V2 */
 799
 800/*
 801 * Object commands
 802 */
 803void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj)
 804{
 805	_osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0);
 806}
 807EXPORT_SYMBOL(osd_req_create_object);
 808
 809void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj)
 810{
 811	_osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0);
 812}
 813EXPORT_SYMBOL(osd_req_remove_object);
 814
 815
 816/*TODO: void osd_req_create_multi(struct osd_request *or,
 817	struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem);
 818*/
 819
 820void osd_req_write(struct osd_request *or,
 821	const struct osd_obj_id *obj, u64 offset,
 822	struct bio *bio, u64 len)
 823{
 824	_osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
 825	WARN_ON(or->out.bio || or->out.total_bytes);
 826	WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
 827	or->out.bio = bio;
 828	or->out.total_bytes = len;
 829}
 830EXPORT_SYMBOL(osd_req_write);
 831
 832int osd_req_write_kern(struct osd_request *or,
 833	const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
 834{
 835	struct request_queue *req_q = osd_request_queue(or->osd_dev);
 836	struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
 837
 838	if (IS_ERR(bio))
 839		return PTR_ERR(bio);
 840
 841	bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
 842	osd_req_write(or, obj, offset, bio, len);
 843	return 0;
 844}
 845EXPORT_SYMBOL(osd_req_write_kern);
 846
 847/*TODO: void osd_req_append(struct osd_request *,
 848	const struct osd_obj_id *, struct bio *data_out); */
 849/*TODO: void osd_req_create_write(struct osd_request *,
 850	const struct osd_obj_id *, struct bio *data_out, u64 offset); */
 851/*TODO: void osd_req_clear(struct osd_request *,
 852	const struct osd_obj_id *, u64 offset, u64 len); */
 853/*TODO: void osd_req_punch(struct osd_request *,
 854	const struct osd_obj_id *, u64 offset, u64 len); V2 */
 855
 856void osd_req_flush_object(struct osd_request *or,
 857	const struct osd_obj_id *obj, enum osd_options_flush_scope_values op,
 858	/*V2*/ u64 offset, /*V2*/ u64 len)
 859{
 860	if (unlikely(osd_req_is_ver1(or) && (offset || len))) {
 861		OSD_DEBUG("OSD Ver1 flush on specific range ignored\n");
 862		offset = 0;
 863		len = 0;
 864	}
 865
 866	_osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len);
 867	_osd_req_encode_flush(or, op);
 868}
 869EXPORT_SYMBOL(osd_req_flush_object);
 870
 871void osd_req_read(struct osd_request *or,
 872	const struct osd_obj_id *obj, u64 offset,
 873	struct bio *bio, u64 len)
 874{
 875	_osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
 876	WARN_ON(or->in.bio || or->in.total_bytes);
 877	WARN_ON(bio->bi_rw & REQ_WRITE);
 878	or->in.bio = bio;
 879	or->in.total_bytes = len;
 880}
 881EXPORT_SYMBOL(osd_req_read);
 882
 883int osd_req_read_kern(struct osd_request *or,
 884	const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
 885{
 886	struct request_queue *req_q = osd_request_queue(or->osd_dev);
 887	struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
 888
 889	if (IS_ERR(bio))
 890		return PTR_ERR(bio);
 891
 892	osd_req_read(or, obj, offset, bio, len);
 893	return 0;
 894}
 895EXPORT_SYMBOL(osd_req_read_kern);
 896
 897static int _add_sg_continuation_descriptor(struct osd_request *or,
 898	const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
 899{
 900	struct osd_sg_continuation_descriptor *oscd;
 901	u32 oscd_size;
 902	unsigned i;
 903	int ret;
 904
 905	oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
 906
 907	if (!or->cdb_cont.total_bytes) {
 908		/* First time, jump over the header, we will write to:
 909		 *	cdb_cont.buff + cdb_cont.total_bytes
 910		 */
 911		or->cdb_cont.total_bytes =
 912				sizeof(struct osd_continuation_segment_header);
 913	}
 914
 915	ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
 916	if (unlikely(ret))
 917		return ret;
 918
 919	oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
 920	oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
 921	oscd->hdr.pad_length = 0;
 922	oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
 923
 924	*len = 0;
 925	/* copy the sg entries and convert to network byte order */
 926	for (i = 0; i < numentries; i++) {
 927		oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
 928		oscd->entries[i].len    = cpu_to_be64(sglist[i].len);
 929		*len += sglist[i].len;
 930	}
 931
 932	or->cdb_cont.total_bytes += oscd_size;
 933	OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
 934		  or->cdb_cont.total_bytes, oscd_size, numentries);
 935	return 0;
 936}
 937
 938static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
 939{
 940	struct request_queue *req_q = osd_request_queue(or->osd_dev);
 941	struct bio *bio;
 942	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
 943	struct osd_continuation_segment_header *cont_seg_hdr;
 944
 945	if (!or->cdb_cont.total_bytes)
 946		return 0;
 947
 948	cont_seg_hdr = or->cdb_cont.buff;
 949	cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
 950	cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
 951
 952	/* create a bio for continuation segment */
 953	bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
 954			   GFP_KERNEL);
 955	if (IS_ERR(bio))
 956		return PTR_ERR(bio);
 957
 958	bio->bi_rw |= REQ_WRITE;
 959
 960	/* integrity check the continuation before the bio is linked
 961	 * with the other data segments since the continuation
 962	 * integrity is separate from the other data segments.
 963	 */
 964	osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
 965
 966	cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
 967
 968	/* we can't use _req_append_segment, because we need to link in the
 969	 * continuation bio to the head of the bio list - the
 970	 * continuation segment (if it exists) is always the first segment in
 971	 * the out data buffer.
 972	 */
 973	bio->bi_next = or->out.bio;
 974	or->out.bio = bio;
 975	or->out.total_bytes += or->cdb_cont.total_bytes;
 976
 977	return 0;
 978}
 979
 980/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
 981 * @sglist that has the scatter gather entries. Scatter-gather enables a write
 982 * of multiple none-contiguous areas of an object, in a single call. The extents
 983 * may overlap and/or be in any order. The only constrain is that:
 984 *	total_bytes(sglist) >= total_bytes(bio)
 985 */
 986int osd_req_write_sg(struct osd_request *or,
 987	const struct osd_obj_id *obj, struct bio *bio,
 988	const struct osd_sg_entry *sglist, unsigned numentries)
 989{
 990	u64 len;
 991	int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
 992
 993	if (ret)
 994		return ret;
 995	osd_req_write(or, obj, 0, bio, len);
 996
 997	return 0;
 998}
 999EXPORT_SYMBOL(osd_req_write_sg);
1000
1001/* osd_req_read_sg: Read multiple extents of an object into @bio
1002 * See osd_req_write_sg
1003 */
1004int osd_req_read_sg(struct osd_request *or,
1005	const struct osd_obj_id *obj, struct bio *bio,
1006	const struct osd_sg_entry *sglist, unsigned numentries)
1007{
1008	u64 len;
1009	u64 off;
1010	int ret;
1011
1012	if (numentries > 1) {
1013		off = 0;
1014		ret = _add_sg_continuation_descriptor(or, sglist, numentries,
1015						      &len);
1016		if (ret)
1017			return ret;
1018	} else {
1019		/* Optimize the case of single segment, read_sg is a
1020		 * bidi operation.
1021		 */
1022		len = sglist->len;
1023		off = sglist->offset;
1024	}
1025	osd_req_read(or, obj, off, bio, len);
1026
1027	return 0;
1028}
1029EXPORT_SYMBOL(osd_req_read_sg);
1030
1031/* SG-list write/read Kern API
1032 *
1033 * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
1034 * of sg_entries. @numentries indicates how many pointers and sg_entries there
1035 * are.  By requiring an array of buff pointers. This allows a caller to do a
1036 * single write/read and scatter into multiple buffers.
1037 * NOTE: Each buffer + len should not cross a page boundary.
1038 */
1039static struct bio *_create_sg_bios(struct osd_request *or,
1040	void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
1041{
1042	struct request_queue *q = osd_request_queue(or->osd_dev);
1043	struct bio *bio;
1044	unsigned i;
1045
1046	bio = bio_kmalloc(GFP_KERNEL, numentries);
1047	if (unlikely(!bio)) {
1048		OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries);
1049		return ERR_PTR(-ENOMEM);
1050	}
1051
1052	for (i = 0; i < numentries; i++) {
1053		unsigned offset = offset_in_page(buff[i]);
1054		struct page *page = virt_to_page(buff[i]);
1055		unsigned len = sglist[i].len;
1056		unsigned added_len;
1057
1058		BUG_ON(offset + len > PAGE_SIZE);
1059		added_len = bio_add_pc_page(q, bio, page, len, offset);
1060		if (unlikely(len != added_len)) {
1061			OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
1062				  len, added_len);
1063			bio_put(bio);
1064			return ERR_PTR(-ENOMEM);
1065		}
1066	}
1067
1068	return bio;
1069}
1070
1071int osd_req_write_sg_kern(struct osd_request *or,
1072	const struct osd_obj_id *obj, void **buff,
1073	const struct osd_sg_entry *sglist, unsigned numentries)
1074{
1075	struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1076	if (IS_ERR(bio))
1077		return PTR_ERR(bio);
1078
1079	bio->bi_rw |= REQ_WRITE;
1080	osd_req_write_sg(or, obj, bio, sglist, numentries);
1081
1082	return 0;
1083}
1084EXPORT_SYMBOL(osd_req_write_sg_kern);
1085
1086int osd_req_read_sg_kern(struct osd_request *or,
1087	const struct osd_obj_id *obj, void **buff,
1088	const struct osd_sg_entry *sglist, unsigned numentries)
1089{
1090	struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1091	if (IS_ERR(bio))
1092		return PTR_ERR(bio);
1093
1094	osd_req_read_sg(or, obj, bio, sglist, numentries);
1095
1096	return 0;
1097}
1098EXPORT_SYMBOL(osd_req_read_sg_kern);
1099
1100
1101
1102void osd_req_get_attributes(struct osd_request *or,
1103	const struct osd_obj_id *obj)
1104{
1105	_osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0);
1106}
1107EXPORT_SYMBOL(osd_req_get_attributes);
1108
1109void osd_req_set_attributes(struct osd_request *or,
1110	const struct osd_obj_id *obj)
1111{
1112	_osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0);
1113}
1114EXPORT_SYMBOL(osd_req_set_attributes);
1115
1116/*
1117 * Attributes List-mode
1118 */
1119
1120int osd_req_add_set_attr_list(struct osd_request *or,
1121	const struct osd_attr *oa, unsigned nelem)
1122{
1123	unsigned total_bytes = or->set_attr.total_bytes;
1124	void *attr_last;
1125	int ret;
1126
1127	if (or->attributes_mode &&
1128	    or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
1129		WARN_ON(1);
1130		return -EINVAL;
1131	}
1132	or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1133
1134	if (!total_bytes) { /* first-time: allocate and put list header */
1135		total_bytes = _osd_req_sizeof_alist_header(or);
1136		ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
1137		if (ret)
1138			return ret;
1139		_osd_req_set_alist_type(or, or->set_attr.buff,
1140					OSD_ATTR_LIST_SET_RETRIEVE);
1141	}
1142	attr_last = or->set_attr.buff + total_bytes;
1143
1144	for (; nelem; --nelem) {
1145		unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
1146
1147		total_bytes += elem_size;
1148		if (unlikely(or->set_attr.alloc_size < total_bytes)) {
1149			or->set_attr.total_bytes = total_bytes - elem_size;
1150			ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
1151			if (ret)
1152				return ret;
1153			attr_last =
1154				or->set_attr.buff + or->set_attr.total_bytes;
1155		}
1156
1157		_osd_req_alist_elem_encode(or, attr_last, oa);
1158
1159		attr_last += elem_size;
1160		++oa;
1161	}
1162
1163	or->set_attr.total_bytes = total_bytes;
1164	return 0;
1165}
1166EXPORT_SYMBOL(osd_req_add_set_attr_list);
1167
1168static int _req_append_segment(struct osd_request *or,
1169	unsigned padding, struct _osd_req_data_segment *seg,
1170	struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
1171{
1172	void *pad_buff;
1173	int ret;
1174
1175	if (padding) {
1176		/* check if we can just add it to last buffer */
1177		if (last_seg &&
1178		    (padding <= last_seg->alloc_size - last_seg->total_bytes))
1179			pad_buff = last_seg->buff + last_seg->total_bytes;
1180		else
1181			pad_buff = io->pad_buff;
1182
1183		ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
1184				       or->alloc_flags);
1185		if (ret)
1186			return ret;
1187		io->total_bytes += padding;
1188	}
1189
1190	ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
1191			       or->alloc_flags);
1192	if (ret)
1193		return ret;
1194
1195	io->total_bytes += seg->total_bytes;
1196	OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff,
1197		  seg->total_bytes);
1198	return 0;
1199}
1200
1201static int _osd_req_finalize_set_attr_list(struct osd_request *or)
1202{
1203	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1204	unsigned padding;
1205	int ret;
1206
1207	if (!or->set_attr.total_bytes) {
1208		cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED;
1209		return 0;
1210	}
1211
1212	cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes);
1213	cdbh->attrs_list.set_attr_offset =
1214		osd_req_encode_offset(or, or->out.total_bytes, &padding);
1215
1216	ret = _req_append_segment(or, padding, &or->set_attr,
1217				  or->out.last_seg, &or->out);
1218	if (ret)
1219		return ret;
1220
1221	or->out.last_seg = &or->set_attr;
1222	return 0;
1223}
1224
1225int osd_req_add_get_attr_list(struct osd_request *or,
1226	const struct osd_attr *oa, unsigned nelem)
1227{
1228	unsigned total_bytes = or->enc_get_attr.total_bytes;
1229	void *attr_last;
1230	int ret;
1231
1232	if (or->attributes_mode &&
1233	    or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
1234		WARN_ON(1);
1235		return -EINVAL;
1236	}
1237	or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1238
1239	/* first time calc data-in list header size */
1240	if (!or->get_attr.total_bytes)
1241		or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or);
1242
1243	/* calc data-out info */
1244	if (!total_bytes) { /* first-time: allocate and put list header */
1245		unsigned max_bytes;
1246
1247		total_bytes = _osd_req_sizeof_alist_header(or);
1248		max_bytes = total_bytes +
1249			nelem * sizeof(struct osd_attributes_list_attrid);
1250		ret = _alloc_get_attr_desc(or, max_bytes);
1251		if (ret)
1252			return ret;
1253
1254		_osd_req_set_alist_type(or, or->enc_get_attr.buff,
1255					OSD_ATTR_LIST_GET);
1256	}
1257	attr_last = or->enc_get_attr.buff + total_bytes;
1258
1259	for (; nelem; --nelem) {
1260		struct osd_attributes_list_attrid *attrid;
1261		const unsigned cur_size = sizeof(*attrid);
1262
1263		total_bytes += cur_size;
1264		if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) {
1265			or->enc_get_attr.total_bytes = total_bytes - cur_size;
1266			ret = _alloc_get_attr_desc(or,
1267					total_bytes + nelem * sizeof(*attrid));
1268			if (ret)
1269				return ret;
1270			attr_last = or->enc_get_attr.buff +
1271				or->enc_get_attr.total_bytes;
1272		}
1273
1274		attrid = attr_last;
1275		attrid->attr_page = cpu_to_be32(oa->attr_page);
1276		attrid->attr_id = cpu_to_be32(oa->attr_id);
1277
1278		attr_last += cur_size;
1279
1280		/* calc data-in size */
1281		or->get_attr.total_bytes +=
1282			_osd_req_alist_elem_size(or, oa->len);
1283		++oa;
1284	}
1285
1286	or->enc_get_attr.total_bytes = total_bytes;
1287
1288	OSD_DEBUG(
1289	       "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%Zu)\n",
1290	       or->get_attr.total_bytes,
1291	       or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or),
1292	       or->enc_get_attr.total_bytes,
1293	       (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or))
1294			/ sizeof(struct osd_attributes_list_attrid));
1295
1296	return 0;
1297}
1298EXPORT_SYMBOL(osd_req_add_get_attr_list);
1299
1300static int _osd_req_finalize_get_attr_list(struct osd_request *or)
1301{
1302	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1303	unsigned out_padding;
1304	unsigned in_padding;
1305	int ret;
1306
1307	if (!or->enc_get_attr.total_bytes) {
1308		cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED;
1309		cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED;
1310		return 0;
1311	}
1312
1313	ret = _alloc_get_attr_list(or);
1314	if (ret)
1315		return ret;
1316
1317	/* The out-going buffer info update */
1318	OSD_DEBUG("out-going\n");
1319	cdbh->attrs_list.get_attr_desc_bytes =
1320		cpu_to_be32(or->enc_get_attr.total_bytes);
1321
1322	cdbh->attrs_list.get_attr_desc_offset =
1323		osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1324
1325	ret = _req_append_segment(or, out_padding, &or->enc_get_attr,
1326				  or->out.last_seg, &or->out);
1327	if (ret)
1328		return ret;
1329	or->out.last_seg = &or->enc_get_attr;
1330
1331	/* The incoming buffer info update */
1332	OSD_DEBUG("in-coming\n");
1333	cdbh->attrs_list.get_attr_alloc_length =
1334		cpu_to_be32(or->get_attr.total_bytes);
1335
1336	cdbh->attrs_list.get_attr_offset =
1337		osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1338
1339	ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1340				  &or->in);
1341	if (ret)
1342		return ret;
1343	or->in.last_seg = &or->get_attr;
1344
1345	return 0;
1346}
1347
1348int osd_req_decode_get_attr_list(struct osd_request *or,
1349	struct osd_attr *oa, int *nelem, void **iterator)
1350{
1351	unsigned cur_bytes, returned_bytes;
1352	int n;
1353	const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or);
1354	void *cur_p;
1355
1356	if (!_osd_req_is_alist_type(or, or->get_attr.buff,
1357				    OSD_ATTR_LIST_SET_RETRIEVE)) {
1358		oa->attr_page = 0;
1359		oa->attr_id = 0;
1360		oa->val_ptr = NULL;
1361		oa->len = 0;
1362		*iterator = NULL;
1363		return 0;
1364	}
1365
1366	if (*iterator) {
1367		BUG_ON((*iterator < or->get_attr.buff) ||
1368		     (or->get_attr.buff + or->get_attr.alloc_size < *iterator));
1369		cur_p = *iterator;
1370		cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list;
1371		returned_bytes = or->get_attr.total_bytes;
1372	} else { /* first time decode the list header */
1373		cur_bytes = sizeof_attr_list;
1374		returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) +
1375					sizeof_attr_list;
1376
1377		cur_p = or->get_attr.buff + sizeof_attr_list;
1378
1379		if (returned_bytes > or->get_attr.alloc_size) {
1380			OSD_DEBUG("target report: space was not big enough! "
1381				  "Allocate=%u Needed=%u\n",
1382				  or->get_attr.alloc_size,
1383				  returned_bytes + sizeof_attr_list);
1384
1385			returned_bytes =
1386				or->get_attr.alloc_size - sizeof_attr_list;
1387		}
1388		or->get_attr.total_bytes = returned_bytes;
1389	}
1390
1391	for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
1392		int inc = _osd_req_alist_elem_decode(or, cur_p, oa,
1393						 returned_bytes - cur_bytes);
1394
1395		if (inc < 0) {
1396			OSD_ERR("BAD FOOD from target. list not valid!"
1397				"c=%d r=%d n=%d\n",
1398				cur_bytes, returned_bytes, n);
1399			oa->val_ptr = NULL;
1400			cur_bytes = returned_bytes; /* break the caller loop */
1401			break;
1402		}
1403
1404		cur_bytes += inc;
1405		cur_p += inc;
1406		++oa;
1407	}
1408
1409	*iterator = (returned_bytes - cur_bytes) ? cur_p : NULL;
1410	*nelem = n;
1411	return returned_bytes - cur_bytes;
1412}
1413EXPORT_SYMBOL(osd_req_decode_get_attr_list);
1414
1415/*
1416 * Attributes Page-mode
1417 */
1418
1419int osd_req_add_get_attr_page(struct osd_request *or,
1420	u32 page_id, void *attar_page, unsigned max_page_len,
1421	const struct osd_attr *set_one_attr)
1422{
1423	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1424
1425	if (or->attributes_mode &&
1426	    or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1427		WARN_ON(1);
1428		return -EINVAL;
1429	}
1430	or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE;
1431
1432	or->get_attr.buff = attar_page;
1433	or->get_attr.total_bytes = max_page_len;
1434
1435	cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
1436	cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
1437
1438	if (!set_one_attr || !set_one_attr->attr_page)
1439		return 0; /* The set is optional */
1440
1441	or->set_attr.buff = set_one_attr->val_ptr;
1442	or->set_attr.total_bytes = set_one_attr->len;
1443
1444	cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
1445	cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
1446	cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
1447	return 0;
1448}
1449EXPORT_SYMBOL(osd_req_add_get_attr_page);
1450
1451static int _osd_req_finalize_attr_page(struct osd_request *or)
1452{
1453	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1454	unsigned in_padding, out_padding;
1455	int ret;
1456
1457	/* returned page */
1458	cdbh->attrs_page.get_attr_offset =
1459		osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1460
1461	ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1462				  &or->in);
1463	if (ret)
1464		return ret;
1465
1466	if (or->set_attr.total_bytes == 0)
1467		return 0;
1468
1469	/* set one value */
1470	cdbh->attrs_page.set_attr_offset =
1471		osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1472
1473	ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
1474				  &or->out);
1475	return ret;
1476}
1477
1478static inline void osd_sec_parms_set_out_offset(bool is_v1,
1479	struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
1480{
1481	if (is_v1)
1482		sec_parms->v1.data_out_integrity_check_offset = offset;
1483	else
1484		sec_parms->v2.data_out_integrity_check_offset = offset;
1485}
1486
1487static inline void osd_sec_parms_set_in_offset(bool is_v1,
1488	struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
1489{
1490	if (is_v1)
1491		sec_parms->v1.data_in_integrity_check_offset = offset;
1492	else
1493		sec_parms->v2.data_in_integrity_check_offset = offset;
1494}
1495
1496static int _osd_req_finalize_data_integrity(struct osd_request *or,
1497	bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
1498	const u8 *cap_key)
1499{
1500	struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1501	int ret;
1502
1503	if (!osd_is_sec_alldata(sec_parms))
1504		return 0;
1505
1506	if (has_out) {
1507		struct _osd_req_data_segment seg = {
1508			.buff = &or->out_data_integ,
1509			.total_bytes = sizeof(or->out_data_integ),
1510		};
1511		unsigned pad;
1512
1513		or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
1514		or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1515			or->set_attr.total_bytes);
1516		or->out_data_integ.get_attributes_bytes = cpu_to_be64(
1517			or->enc_get_attr.total_bytes);
1518
1519		osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms,
1520			osd_req_encode_offset(or, or->out.total_bytes, &pad));
1521
1522		ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
1523					  &or->out);
1524		if (ret)
1525			return ret;
1526		or->out.last_seg = NULL;
1527
1528		/* they are now all chained to request sign them all together */
1529		osd_sec_sign_data(&or->out_data_integ, out_data_bio,
1530				  cap_key);
1531	}
1532
1533	if (has_in) {
1534		struct _osd_req_data_segment seg = {
1535			.buff = &or->in_data_integ,
1536			.total_bytes = sizeof(or->in_data_integ),
1537		};
1538		unsigned pad;
1539
1540		osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms,
1541			osd_req_encode_offset(or, or->in.total_bytes, &pad));
1542
1543		ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
1544					  &or->in);
1545		if (ret)
1546			return ret;
1547
1548		or->in.last_seg = NULL;
1549	}
1550
1551	return 0;
1552}
1553
1554/*
1555 * osd_finalize_request and helpers
1556 */
1557static struct request *_make_request(struct request_queue *q, bool has_write,
1558			      struct _osd_io_info *oii, gfp_t flags)
1559{
1560	if (oii->bio)
1561		return blk_make_request(q, oii->bio, flags);
1562	else {
1563		struct request *req;
1564
1565		req = blk_get_request(q, has_write ? WRITE : READ, flags);
1566		if (unlikely(!req))
1567			return ERR_PTR(-ENOMEM);
1568
1569		return req;
1570	}
1571}
1572
1573static int _init_blk_request(struct osd_request *or,
1574	bool has_in, bool has_out)
1575{
1576	gfp_t flags = or->alloc_flags;
1577	struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1578	struct request_queue *q = scsi_device->request_queue;
1579	struct request *req;
1580	int ret;
1581
1582	req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1583	if (IS_ERR(req)) {
1584		ret = PTR_ERR(req);
1585		goto out;
1586	}
1587
1588	or->request = req;
1589	req->cmd_type = REQ_TYPE_BLOCK_PC;
1590	req->cmd_flags |= REQ_QUIET;
1591
1592	req->timeout = or->timeout;
1593	req->retries = or->retries;
1594	req->sense = or->sense;
1595	req->sense_len = 0;
1596
1597	if (has_out) {
1598		or->out.req = req;
1599		if (has_in) {
1600			/* allocate bidi request */
1601			req = _make_request(q, false, &or->in, flags);
1602			if (IS_ERR(req)) {
1603				OSD_DEBUG("blk_get_request for bidi failed\n");
1604				ret = PTR_ERR(req);
1605				goto out;
1606			}
1607			req->cmd_type = REQ_TYPE_BLOCK_PC;
1608			or->in.req = or->request->next_rq = req;
1609		}
1610	} else if (has_in)
1611		or->in.req = req;
1612
1613	ret = 0;
1614out:
1615	OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n",
1616			or, has_in, has_out, ret, or->request);
1617	return ret;
1618}
1619
1620int osd_finalize_request(struct osd_request *or,
1621	u8 options, const void *cap, const u8 *cap_key)
1622{
1623	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1624	bool has_in, has_out;
1625	 /* Save for data_integrity without the cdb_continuation */
1626	struct bio *out_data_bio = or->out.bio;
1627	u64 out_data_bytes = or->out.total_bytes;
1628	int ret;
1629
1630	if (options & OSD_REQ_FUA)
1631		cdbh->options |= OSD_CDB_FUA;
1632
1633	if (options & OSD_REQ_DPO)
1634		cdbh->options |= OSD_CDB_DPO;
1635
1636	if (options & OSD_REQ_BYPASS_TIMESTAMPS)
1637		cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS;
1638
1639	osd_set_caps(&or->cdb, cap);
1640
1641	has_in = or->in.bio || or->get_attr.total_bytes;
1642	has_out = or->out.bio || or->cdb_cont.total_bytes ||
1643		or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
1644
1645	ret = _osd_req_finalize_cdb_cont(or, cap_key);
1646	if (ret) {
1647		OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
1648		return ret;
1649	}
1650	ret = _init_blk_request(or, has_in, has_out);
1651	if (ret) {
1652		OSD_DEBUG("_init_blk_request failed\n");
1653		return ret;
1654	}
1655
1656	or->out.pad_buff = sg_out_pad_buffer;
1657	or->in.pad_buff = sg_in_pad_buffer;
1658
1659	if (!or->attributes_mode)
1660		or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1661	cdbh->command_specific_options |= or->attributes_mode;
1662	if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1663		ret = _osd_req_finalize_attr_page(or);
1664		if (ret) {
1665			OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
1666			return ret;
1667		}
1668	} else {
1669		/* TODO: I think that for the GET_ATTR command these 2 should
1670		 * be reversed to keep them in execution order (for embeded
1671		 * targets with low memory footprint)
1672		 */
1673		ret = _osd_req_finalize_set_attr_list(or);
1674		if (ret) {
1675			OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n");
1676			return ret;
1677		}
1678
1679		ret = _osd_req_finalize_get_attr_list(or);
1680		if (ret) {
1681			OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n");
1682			return ret;
1683		}
1684	}
1685
1686	ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1687					       out_data_bio, out_data_bytes,
1688					       cap_key);
1689	if (ret)
1690		return ret;
1691
1692	osd_sec_sign_cdb(&or->cdb, cap_key);
1693
1694	or->request->cmd = or->cdb.buff;
1695	or->request->cmd_len = _osd_req_cdb_len(or);
1696
1697	return 0;
1698}
1699EXPORT_SYMBOL(osd_finalize_request);
1700
1701static bool _is_osd_security_code(int code)
1702{
1703	return	(code == osd_security_audit_value_frozen) ||
1704		(code == osd_security_working_key_frozen) ||
1705		(code == osd_nonce_not_unique) ||
1706		(code == osd_nonce_timestamp_out_of_range) ||
1707		(code == osd_invalid_dataout_buffer_integrity_check_value);
1708}
1709
1710#define OSD_SENSE_PRINT1(fmt, a...) \
1711	do { \
1712		if (__cur_sense_need_output) \
1713			OSD_ERR(fmt, ##a); \
1714	} while (0)
1715
1716#define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1("    " fmt, ##a)
1717
1718int osd_req_decode_sense_full(struct osd_request *or,
1719	struct osd_sense_info *osi, bool silent,
1720	struct osd_obj_id *bad_obj_list __unused, int max_obj __unused,
1721	struct osd_attr *bad_attr_list, int max_attr)
1722{
1723	int sense_len, original_sense_len;
1724	struct osd_sense_info local_osi;
1725	struct scsi_sense_descriptor_based *ssdb;
1726	void *cur_descriptor;
1727#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0)
1728	const bool __cur_sense_need_output = false;
1729#else
1730	bool __cur_sense_need_output = !silent;
1731#endif
1732	int ret;
1733
1734	if (likely(!or->req_errors))
1735		return 0;
1736
1737	osi = osi ? : &local_osi;
1738	memset(osi, 0, sizeof(*osi));
1739
1740	ssdb = (typeof(ssdb))or->sense;
1741	sense_len = or->sense_len;
1742	if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
1743		OSD_ERR("Block-layer returned error(0x%x) but "
1744			"sense_len(%u) || key(%d) is empty\n",
1745			or->req_errors, sense_len, ssdb->sense_key);
1746		goto analyze;
1747	}
1748
1749	if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
1750		OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
1751			ssdb->response_code, sense_len);
1752		goto analyze;
1753	}
1754
1755	osi->key = ssdb->sense_key;
1756	osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
1757	original_sense_len = ssdb->additional_sense_length + 8;
1758
1759#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1)
1760	if (__cur_sense_need_output)
1761		__cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
1762#endif
1763	OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
1764			"additional_code=0x%x async_error=%d errors=0x%x\n",
1765			osi->key, original_sense_len, sense_len,
1766			osi->additional_code, or->async_error,
1767			or->req_errors);
1768
1769	if (original_sense_len < sense_len)
1770		sense_len = original_sense_len;
1771
1772	cur_descriptor = ssdb->ssd;
1773	sense_len -= sizeof(*ssdb);
1774	while (sense_len > 0) {
1775		struct scsi_sense_descriptor *ssd = cur_descriptor;
1776		int cur_len = ssd->additional_length + 2;
1777
1778		sense_len -= cur_len;
1779
1780		if (sense_len < 0)
1781			break; /* sense was truncated */
1782
1783		switch (ssd->descriptor_type) {
1784		case scsi_sense_information:
1785		case scsi_sense_command_specific_information:
1786		{
1787			struct scsi_sense_command_specific_data_descriptor
1788				*sscd = cur_descriptor;
1789
1790			osi->command_info =
1791				get_unaligned_be64(&sscd->information) ;
1792			OSD_SENSE_PRINT2(
1793				"command_specific_information 0x%llx \n",
1794				_LLU(osi->command_info));
1795			break;
1796		}
1797		case scsi_sense_key_specific:
1798		{
1799			struct scsi_sense_key_specific_data_descriptor
1800				*ssks = cur_descriptor;
1801
1802			osi->sense_info = get_unaligned_be16(&ssks->value);
1803			OSD_SENSE_PRINT2(
1804				"sense_key_specific_information %u"
1805				"sksv_cd_bpv_bp (0x%x)\n",
1806				osi->sense_info, ssks->sksv_cd_bpv_bp);
1807			break;
1808		}
1809		case osd_sense_object_identification:
1810		{ /*FIXME: Keep first not last, Store in array*/
1811			struct osd_sense_identification_data_descriptor
1812				*osidd = cur_descriptor;
1813
1814			osi->not_initiated_command_functions =
1815				le32_to_cpu(osidd->not_initiated_functions);
1816			osi->completed_command_functions =
1817				le32_to_cpu(osidd->completed_functions);
1818			osi->obj.partition = be64_to_cpu(osidd->partition_id);
1819			osi->obj.id = be64_to_cpu(osidd->object_id);
1820			OSD_SENSE_PRINT2(
1821				"object_identification pid=0x%llx oid=0x%llx\n",
1822				_LLU(osi->obj.partition), _LLU(osi->obj.id));
1823			OSD_SENSE_PRINT2(
1824				"not_initiated_bits(%x) "
1825				"completed_command_bits(%x)\n",
1826				osi->not_initiated_command_functions,
1827				osi->completed_command_functions);
1828			break;
1829		}
1830		case osd_sense_response_integrity_check:
1831		{
1832			struct osd_sense_response_integrity_check_descriptor
1833				*osricd = cur_descriptor;
1834			const unsigned len =
1835					  sizeof(osricd->integrity_check_value);
1836			char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */
1837
1838			hex_dump_to_buffer(osricd->integrity_check_value, len,
1839				       32, 1, key_dump, sizeof(key_dump), true);
1840			OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump);
1841		}
1842		case osd_sense_attribute_identification:
1843		{
1844			struct osd_sense_attributes_data_descriptor
1845				*osadd = cur_descriptor;
1846			unsigned len = min(cur_len, sense_len);
1847			struct osd_sense_attr *pattr = osadd->sense_attrs;
1848
1849			while (len >= sizeof(*pattr)) {
1850				u32 attr_page = be32_to_cpu(pattr->attr_page);
1851				u32 attr_id = be32_to_cpu(pattr->attr_id);
1852
1853				if (!osi->attr.attr_page) {
1854					osi->attr.attr_page = attr_page;
1855					osi->attr.attr_id = attr_id;
1856				}
1857
1858				if (bad_attr_list && max_attr) {
1859					bad_attr_list->attr_page = attr_page;
1860					bad_attr_list->attr_id = attr_id;
1861					bad_attr_list++;
1862					max_attr--;
1863				}
1864
1865				len -= sizeof(*pattr);
1866				OSD_SENSE_PRINT2(
1867					"osd_sense_attribute_identification"
1868					"attr_page=0x%x attr_id=0x%x\n",
1869					attr_page, attr_id);
1870			}
1871		}
1872		/*These are not legal for OSD*/
1873		case scsi_sense_field_replaceable_unit:
1874			OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n");
1875			break;
1876		case scsi_sense_stream_commands:
1877			OSD_SENSE_PRINT2("scsi_sense_stream_commands\n");
1878			break;
1879		case scsi_sense_block_commands:
1880			OSD_SENSE_PRINT2("scsi_sense_block_commands\n");
1881			break;
1882		case scsi_sense_ata_return:
1883			OSD_SENSE_PRINT2("scsi_sense_ata_return\n");
1884			break;
1885		default:
1886			if (ssd->descriptor_type <= scsi_sense_Reserved_last)
1887				OSD_SENSE_PRINT2(
1888					"scsi_sense Reserved descriptor (0x%x)",
1889					ssd->descriptor_type);
1890			else
1891				OSD_SENSE_PRINT2(
1892					"scsi_sense Vendor descriptor (0x%x)",
1893					ssd->descriptor_type);
1894		}
1895
1896		cur_descriptor += cur_len;
1897	}
1898
1899analyze:
1900	if (!osi->key) {
1901		/* scsi sense is Empty, the request was never issued to target
1902		 * linux return code might tell us what happened.
1903		 */
1904		if (or->async_error == -ENOMEM)
1905			osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
1906		else
1907			osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
1908		ret = or->async_error;
1909	} else if (osi->key <= scsi_sk_recovered_error) {
1910		osi->osd_err_pri = 0;
1911		ret = 0;
1912	} else if (osi->additional_code == scsi_invalid_field_in_cdb) {
1913		if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
1914			osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
1915			ret = -EFAULT; /* caller should recover from this */
1916		} else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
1917			osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
1918			ret = -ENOENT;
1919		} else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
1920			osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
1921			ret = -EACCES;
1922		} else {
1923			osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1924			ret = -EINVAL;
1925		}
1926	} else if (osi->additional_code == osd_quota_error) {
1927		osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
1928		ret = -ENOSPC;
1929	} else if (_is_osd_security_code(osi->additional_code)) {
1930		osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1931		ret = -EINVAL;
1932	} else {
1933		osi->osd_err_pri = OSD_ERR_PRI_EIO;
1934		ret = -EIO;
1935	}
1936
1937	if (!or->out.residual)
1938		or->out.residual = or->out.total_bytes;
1939	if (!or->in.residual)
1940		or->in.residual = or->in.total_bytes;
1941
1942	return ret;
1943}
1944EXPORT_SYMBOL(osd_req_decode_sense_full);
1945
1946/*
1947 * Implementation of osd_sec.h API
1948 * TODO: Move to a separate osd_sec.c file at a later stage.
1949 */
1950
1951enum { OSD_SEC_CAP_V1_ALL_CAPS =
1952	OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE   |
1953	OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR |
1954	OSD_SEC_CAP_WRITE  | OSD_SEC_CAP_READ     | OSD_SEC_CAP_POL_SEC  |
1955	OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT
1956};
1957
1958enum { OSD_SEC_CAP_V2_ALL_CAPS =
1959	OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT
1960};
1961
1962void osd_sec_init_nosec_doall_caps(void *caps,
1963	const struct osd_obj_id *obj, bool is_collection, const bool is_v1)
1964{
1965	struct osd_capability *cap = caps;
1966	u8 type;
1967	u8 descriptor_type;
1968
1969	if (likely(obj->id)) {
1970		if (unlikely(is_collection)) {
1971			type = OSD_SEC_OBJ_COLLECTION;
1972			descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ :
1973						  OSD_SEC_OBJ_DESC_COL;
1974		} else {
1975			type = OSD_SEC_OBJ_USER;
1976			descriptor_type = OSD_SEC_OBJ_DESC_OBJ;
1977		}
1978		WARN_ON(!obj->partition);
1979	} else {
1980		type = obj->partition ? OSD_SEC_OBJ_PARTITION :
1981					OSD_SEC_OBJ_ROOT;
1982		descriptor_type = OSD_SEC_OBJ_DESC_PAR;
1983	}
1984
1985	memset(cap, 0, sizeof(*cap));
1986
1987	cap->h.format = OSD_SEC_CAP_FORMAT_VER1;
1988	cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */
1989	cap->h.security_method = OSD_SEC_NOSEC;
1990/*	cap->expiration_time;
1991	cap->AUDIT[30-10];
1992	cap->discriminator[42-30];
1993	cap->object_created_time; */
1994	cap->h.object_type = type;
1995	osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS);
1996	cap->h.object_descriptor_type = descriptor_type;
1997	cap->od.obj_desc.policy_access_tag = 0;
1998	cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition);
1999	cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id);
2000}
2001EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
2002
2003/* FIXME: Extract version from caps pointer.
2004 *        Also Pete's target only supports caps from OSDv1 for now
2005 */
2006void osd_set_caps(struct osd_cdb *cdb, const void *caps)
2007{
2008	bool is_ver1 = true;
2009	/* NOTE: They start at same address */
2010	memcpy(&cdb->v1.caps, caps, is_ver1 ? OSDv1_CAP_LEN : OSD_CAP_LEN);
2011}
2012
2013bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
2014{
2015	return false;
2016}
2017
2018void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused)
2019{
2020}
2021
2022void osd_sec_sign_data(void *data_integ __unused,
2023		       struct bio *bio __unused, const u8 *cap_key __unused)
2024{
2025}
2026
2027/*
2028 * Declared in osd_protocol.h
2029 * 4.12.5 Data-In and Data-Out buffer offsets
2030 * byte offset = mantissa * (2^(exponent+8))
2031 * Returns the smallest allowed encoded offset that contains given @offset
2032 * The actual encoded offset returned is @offset + *@padding.
2033 */
2034osd_cdb_offset __osd_encode_offset(
2035	u64 offset, unsigned *padding, int min_shift, int max_shift)
2036{
2037	u64 try_offset = -1, mod, align;
2038	osd_cdb_offset be32_offset;
2039	int shift;
2040
2041	*padding = 0;
2042	if (!offset)
2043		return 0;
2044
2045	for (shift = min_shift; shift < max_shift; ++shift) {
2046		try_offset = offset >> shift;
2047		if (try_offset < (1 << OSD_OFFSET_MAX_BITS))
2048			break;
2049	}
2050
2051	BUG_ON(shift == max_shift);
2052
2053	align = 1 << shift;
2054	mod = offset & (align - 1);
2055	if (mod) {
2056		*padding = align - mod;
2057		try_offset += 1;
2058	}
2059
2060	try_offset |= ((shift - 8) & 0xf) << 28;
2061	be32_offset = cpu_to_be32((u32)try_offset);
2062
2063	OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n",
2064		 _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift,
2065		 be32_offset, *padding);
2066	return be32_offset;
2067}