Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* QLogic iSCSI Offload Driver
  3 * Copyright (c) 2016 Cavium Inc.
  4 */
  5
  6#include <linux/types.h>
  7#include <asm/byteorder.h>
  8#include "qedi_hsi.h"
  9#include <linux/qed/qed_if.h>
 10
 11#include "qedi_fw_iscsi.h"
 12#include "qedi_fw_scsi.h"
 13
 14#define SCSI_NUM_SGES_IN_CACHE 0x4
 15
 16static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
 17{
 18	return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
 19}
 20
 21static
 22void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
 23			   struct scsi_cached_sges *ctx_data_desc,
 24			   struct scsi_sgl_task_params *sgl_task_params)
 25{
 26	u8 sge_index;
 27	u8 num_sges;
 28	u32 val;
 29
 30	num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
 31			     SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
 32
 33	/* sgl params */
 34	val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
 35	ctx_sgl_params->sgl_addr.lo = val;
 36	val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
 37	ctx_sgl_params->sgl_addr.hi = val;
 38	val = cpu_to_le32(sgl_task_params->total_buffer_size);
 39	ctx_sgl_params->sgl_total_length = val;
 40	ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
 41
 42	for (sge_index = 0; sge_index < num_sges; sge_index++) {
 43		val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
 44		ctx_data_desc->sge[sge_index].sge_addr.lo = val;
 45		val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
 46		ctx_data_desc->sge[sge_index].sge_addr.hi = val;
 47		val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
 48		ctx_data_desc->sge[sge_index].sge_len = val;
 49	}
 50}
 51
 52static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
 53			     enum iscsi_task_type task_type,
 54			     struct scsi_sgl_task_params *sgl_task_params,
 55			     struct scsi_dif_task_params *dif_task_params)
 56{
 57	u32 io_size;
 58
 59	if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
 60	    task_type == ISCSI_TASK_TYPE_TARGET_READ)
 61		io_size = task_params->tx_io_size;
 62	else
 63		io_size = task_params->rx_io_size;
 64
 65	if (!io_size)
 66		return 0;
 67
 68	if (!dif_task_params)
 69		return io_size;
 70
 71	return !dif_task_params->dif_on_network ?
 72	       io_size : sgl_task_params->total_buffer_size;
 73}
 74
 75static void
 76init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
 77		       struct scsi_dif_task_params *dif_task_params)
 78{
 79	if (!dif_task_params)
 80		return;
 81
 82	SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
 83		  dif_task_params->dif_block_size_log);
 84	SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
 85		  dif_task_params->dif_on_network ? 1 : 0);
 86	SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
 87		  dif_task_params->dif_on_host ? 1 : 0);
 88}
 89
 90static void init_sqe(struct iscsi_task_params *task_params,
 91		     struct scsi_sgl_task_params *sgl_task_params,
 92		     struct scsi_dif_task_params *dif_task_params,
 93		     struct iscsi_common_hdr *pdu_header,
 94		     struct scsi_initiator_cmd_params *cmd_params,
 95		     enum iscsi_task_type task_type,
 96		     bool is_cleanup)
 97{
 98	if (!task_params->sqe)
 99		return;
100
101	memset(task_params->sqe, 0, sizeof(*task_params->sqe));
102	task_params->sqe->task_id = cpu_to_le16(task_params->itid);
103	if (is_cleanup) {
104		SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
105			  ISCSI_WQE_TYPE_TASK_CLEANUP);
106		return;
107	}
108
109	switch (task_type) {
110	case ISCSI_TASK_TYPE_INITIATOR_WRITE:
111	{
112		u32 buf_size = 0;
113		u32 num_sges = 0;
114
115		init_dif_context_flags(&task_params->sqe->prot_flags,
116				       dif_task_params);
117
118		SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
119			  ISCSI_WQE_TYPE_NORMAL);
120
121		if (task_params->tx_io_size) {
122			buf_size = calc_rw_task_size(task_params, task_type,
123						     sgl_task_params,
124						     dif_task_params);
125
126			if (scsi_is_slow_sgl(sgl_task_params->num_sges,
127					     sgl_task_params->small_mid_sge))
128				num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
129			else
130				num_sges = min(sgl_task_params->num_sges,
131					       (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
132		}
133
134		SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
135			  num_sges);
136		SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
137			  buf_size);
138
139		if (GET_FIELD(pdu_header->hdr_second_dword,
140			      ISCSI_CMD_HDR_TOTAL_AHS_LEN))
141			SET_FIELD(task_params->sqe->contlen_cdbsize,
142				  ISCSI_WQE_CDB_SIZE,
143				  cmd_params->extended_cdb_sge.sge_len);
144	}
145		break;
146	case ISCSI_TASK_TYPE_INITIATOR_READ:
147		SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
148			  ISCSI_WQE_TYPE_NORMAL);
149
150		if (GET_FIELD(pdu_header->hdr_second_dword,
151			      ISCSI_CMD_HDR_TOTAL_AHS_LEN))
152			SET_FIELD(task_params->sqe->contlen_cdbsize,
153				  ISCSI_WQE_CDB_SIZE,
154				  cmd_params->extended_cdb_sge.sge_len);
155		break;
156	case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
157	case ISCSI_TASK_TYPE_MIDPATH:
158	{
159		bool advance_statsn = true;
160
161		if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
162			SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
163				  ISCSI_WQE_TYPE_LOGIN);
164		else
165			SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
166				  ISCSI_WQE_TYPE_MIDDLE_PATH);
167
168		if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
169			u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
170					      ISCSI_COMMON_HDR_OPCODE);
171
172			if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
173			    (opcode != ISCSI_OPCODE_NOP_IN ||
174			    pdu_header->itt == ISCSI_TTT_ALL_ONES))
175				advance_statsn = false;
176		}
177
178		SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
179			  advance_statsn ? 1 : 0);
180
181		if (task_params->tx_io_size) {
182			SET_FIELD(task_params->sqe->contlen_cdbsize,
183				  ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
184
185		if (scsi_is_slow_sgl(sgl_task_params->num_sges,
186				     sgl_task_params->small_mid_sge))
187			SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
188				  ISCSI_WQE_NUM_SGES_SLOWIO);
189		else
190			SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
191				  min(sgl_task_params->num_sges,
192				      (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
193		}
194	}
195		break;
196	default:
197		break;
198	}
199}
200
201static void init_default_iscsi_task(struct iscsi_task_params *task_params,
202				    struct data_hdr *pdu_header,
203				    enum iscsi_task_type task_type)
204{
205	struct iscsi_task_context *context;
206	u32 val;
207	u16 index;
208	u8 val_byte;
209
210	context = task_params->context;
211	val_byte = context->mstorm_ag_context.cdu_validation;
212	memset(context, 0, sizeof(*context));
213	context->mstorm_ag_context.cdu_validation = val_byte;
214
215	for (index = 0; index <
216	     ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
217	     index++) {
218		val = cpu_to_le32(pdu_header->data[index]);
219		context->ystorm_st_context.pdu_hdr.data.data[index] = val;
220	}
221
222	context->mstorm_st_context.task_type = task_type;
223	context->mstorm_ag_context.task_cid =
224					    cpu_to_le16(task_params->conn_icid);
225
226	SET_FIELD(context->ustorm_ag_context.flags1,
227		  USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
228
229	context->ustorm_st_context.task_type = task_type;
230	context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
231	context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
232}
233
234static
235void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
236					  struct scsi_initiator_cmd_params *cmd)
237{
238	union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
239	u32 val;
240
241	if (!cmd->extended_cdb_sge.sge_len)
242		return;
243
244	SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
245		  ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
246		  cmd->extended_cdb_sge.sge_len);
247	val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
248	ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
249	val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
250	ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
251	val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
252	ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len  = val;
253}
254
255static
256void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
257			struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
258			u32 remaining_recv_len, u32 expected_data_transfer_len,
259			u8 num_sges, bool tx_dif_conn_err_en)
260{
261	u32 val;
262
263	ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
264	ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
265	val = cpu_to_le32(expected_data_transfer_len);
266	ustorm_st_cxt->exp_data_transfer_len = val;
267	SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
268	SET_FIELD(ustorm_ag_cxt->flags2,
269		  USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
270		  tx_dif_conn_err_en ? 1 : 0);
271}
272
273static
274void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
275					struct iscsi_conn_params  *conn_params,
276					enum iscsi_task_type task_type,
277					u32 task_size,
278					u32 exp_data_transfer_len,
279					u8 total_ahs_length)
280{
281	u32 max_unsolicited_data = 0, val;
282
283	if (total_ahs_length &&
284	    (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
285	     task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
286		SET_FIELD(context->ustorm_st_context.flags2,
287			  USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
288
289	switch (task_type) {
290	case ISCSI_TASK_TYPE_INITIATOR_WRITE:
291		if (!conn_params->initial_r2t)
292			max_unsolicited_data = conn_params->first_burst_length;
293		else if (conn_params->immediate_data)
294			max_unsolicited_data =
295					  min(conn_params->first_burst_length,
296					      conn_params->max_send_pdu_length);
297
298		context->ustorm_ag_context.exp_data_acked =
299				   cpu_to_le32(total_ahs_length == 0 ?
300						min(exp_data_transfer_len,
301						    max_unsolicited_data) :
302						((u32)(total_ahs_length +
303						       ISCSI_AHS_CNTL_SIZE)));
304		break;
305	case ISCSI_TASK_TYPE_TARGET_READ:
306		val = cpu_to_le32(exp_data_transfer_len);
307		context->ustorm_ag_context.exp_data_acked = val;
308		break;
309	case ISCSI_TASK_TYPE_INITIATOR_READ:
310		context->ustorm_ag_context.exp_data_acked =
311					cpu_to_le32((total_ahs_length == 0 ? 0 :
312						     total_ahs_length +
313						     ISCSI_AHS_CNTL_SIZE));
314		break;
315	case ISCSI_TASK_TYPE_TARGET_WRITE:
316		val = cpu_to_le32(task_size);
317		context->ustorm_ag_context.exp_cont_len = val;
318		break;
319	default:
320		break;
321	}
322}
323
324static
325void init_rtdif_task_context(struct rdif_task_context *rdif_context,
326			     struct tdif_task_context *tdif_context,
327			     struct scsi_dif_task_params *dif_task_params,
328			     enum iscsi_task_type task_type)
329{
330	u32 val;
331
332	if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
333		return;
334
335	if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
336	    task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
337		rdif_context->app_tag_value =
338				  cpu_to_le16(dif_task_params->application_tag);
339		rdif_context->partial_crc_value = cpu_to_le16(0xffff);
340		val = cpu_to_le32(dif_task_params->initial_ref_tag);
341		rdif_context->initial_ref_tag = val;
342		rdif_context->app_tag_mask =
343			     cpu_to_le16(dif_task_params->application_tag_mask);
344		SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
345			  dif_task_params->crc_seed ? 1 : 0);
346		SET_FIELD(rdif_context->flags0,
347			  RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
348			  dif_task_params->host_guard_type);
349		SET_FIELD(rdif_context->flags0,
350			  RDIF_TASK_CONTEXT_PROTECTION_TYPE,
351			  dif_task_params->protection_type);
352		SET_FIELD(rdif_context->flags0,
353			  RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
354		SET_FIELD(rdif_context->flags0,
355			  RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
356			  dif_task_params->keep_ref_tag_const ? 1 : 0);
357		SET_FIELD(rdif_context->flags1,
358			  RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
359			  (dif_task_params->validate_app_tag &&
360			  dif_task_params->dif_on_network) ? 1 : 0);
361		SET_FIELD(rdif_context->flags1,
362			  RDIF_TASK_CONTEXT_VALIDATE_GUARD,
363			  (dif_task_params->validate_guard &&
364			  dif_task_params->dif_on_network) ? 1 : 0);
365		SET_FIELD(rdif_context->flags1,
366			  RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
367			  (dif_task_params->validate_ref_tag &&
368			  dif_task_params->dif_on_network) ? 1 : 0);
369		SET_FIELD(rdif_context->flags1,
370			  RDIF_TASK_CONTEXT_HOST_INTERFACE,
371			  dif_task_params->dif_on_host ? 1 : 0);
372		SET_FIELD(rdif_context->flags1,
373			  RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
374			  dif_task_params->dif_on_network ? 1 : 0);
375		SET_FIELD(rdif_context->flags1,
376			  RDIF_TASK_CONTEXT_FORWARD_GUARD,
377			  dif_task_params->forward_guard ? 1 : 0);
378		SET_FIELD(rdif_context->flags1,
379			  RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
380			  dif_task_params->forward_app_tag ? 1 : 0);
381		SET_FIELD(rdif_context->flags1,
382			  RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
383			  dif_task_params->forward_ref_tag ? 1 : 0);
384		SET_FIELD(rdif_context->flags1,
385			  RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
386			  dif_task_params->forward_app_tag_with_mask ? 1 : 0);
387		SET_FIELD(rdif_context->flags1,
388			  RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
389			  dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
390		SET_FIELD(rdif_context->flags1,
391			  RDIF_TASK_CONTEXT_INTERVAL_SIZE,
392			  dif_task_params->dif_block_size_log - 9);
393		SET_FIELD(rdif_context->state,
394			  RDIF_TASK_CONTEXT_REF_TAG_MASK,
395			  dif_task_params->ref_tag_mask);
396		SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
397			  dif_task_params->ignore_app_tag);
398	}
399
400	if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
401	    task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
402		tdif_context->app_tag_value =
403				  cpu_to_le16(dif_task_params->application_tag);
404		tdif_context->partial_crc_value_b =
405		       cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
406		tdif_context->partial_crc_value_a =
407		       cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
408		SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
409			  dif_task_params->crc_seed ? 1 : 0);
410
411		SET_FIELD(tdif_context->flags0,
412			  TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
413			  dif_task_params->tx_dif_conn_err_en ? 1 : 0);
414		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
415			  dif_task_params->forward_guard   ? 1 : 0);
416		SET_FIELD(tdif_context->flags1,
417			  TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
418			  dif_task_params->forward_app_tag ? 1 : 0);
419		SET_FIELD(tdif_context->flags1,
420			  TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
421			  dif_task_params->forward_ref_tag ? 1 : 0);
422		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
423			  dif_task_params->dif_block_size_log - 9);
424		SET_FIELD(tdif_context->flags1,
425			  TDIF_TASK_CONTEXT_HOST_INTERFACE,
426			  dif_task_params->dif_on_host    ? 1 : 0);
427		SET_FIELD(tdif_context->flags1,
428			  TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
429			  dif_task_params->dif_on_network ? 1 : 0);
430		val = cpu_to_le32(dif_task_params->initial_ref_tag);
431		tdif_context->initial_ref_tag = val;
432		tdif_context->app_tag_mask =
433			     cpu_to_le16(dif_task_params->application_tag_mask);
434		SET_FIELD(tdif_context->flags0,
435			  TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
436			  dif_task_params->host_guard_type);
437		SET_FIELD(tdif_context->flags0,
438			  TDIF_TASK_CONTEXT_PROTECTION_TYPE,
439			  dif_task_params->protection_type);
440		SET_FIELD(tdif_context->flags0,
441			  TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
442			  dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
443		SET_FIELD(tdif_context->flags0,
444			  TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
445			  dif_task_params->keep_ref_tag_const ? 1 : 0);
446		SET_FIELD(tdif_context->flags1,
447			  TDIF_TASK_CONTEXT_VALIDATE_GUARD,
448			  (dif_task_params->validate_guard &&
449			   dif_task_params->dif_on_host) ? 1 : 0);
450		SET_FIELD(tdif_context->flags1,
451			  TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
452			  (dif_task_params->validate_app_tag &&
453			  dif_task_params->dif_on_host) ? 1 : 0);
454		SET_FIELD(tdif_context->flags1,
455			  TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
456			  (dif_task_params->validate_ref_tag &&
457			   dif_task_params->dif_on_host) ? 1 : 0);
458		SET_FIELD(tdif_context->flags1,
459			  TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
460			  dif_task_params->forward_app_tag_with_mask ? 1 : 0);
461		SET_FIELD(tdif_context->flags1,
462			  TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
463			  dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
464		SET_FIELD(tdif_context->flags1,
465			  TDIF_TASK_CONTEXT_REF_TAG_MASK,
466			  dif_task_params->ref_tag_mask);
467		SET_FIELD(tdif_context->flags0,
468			  TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
469			  dif_task_params->ignore_app_tag ? 1 : 0);
470	}
471}
472
473static void set_local_completion_context(struct iscsi_task_context *context)
474{
475	SET_FIELD(context->ystorm_st_context.state.flags,
476		  YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
477	SET_FIELD(context->ustorm_st_context.flags,
478		  USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
479}
480
481static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
482			      enum iscsi_task_type task_type,
483			      struct iscsi_conn_params *conn_params,
484			      struct iscsi_common_hdr *pdu_header,
485			      struct scsi_sgl_task_params *sgl_task_params,
486			      struct scsi_initiator_cmd_params *cmd_params,
487			      struct scsi_dif_task_params *dif_task_params)
488{
489	u32 exp_data_transfer_len = conn_params->max_burst_length;
490	struct iscsi_task_context *cxt;
491	bool slow_io = false;
492	u32 task_size, val;
493	u8 num_sges = 0;
494
495	task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
496				      dif_task_params);
497
498	init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
499				task_type);
500
501	cxt = task_params->context;
502
503
504	if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
505		set_local_completion_context(cxt);
506	} else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
507		val = cpu_to_le32(task_size +
508			   ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
509		cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
510		cxt->mstorm_st_context.expected_itt =
511						   cpu_to_le32(pdu_header->itt);
512	} else {
513		val = cpu_to_le32(task_size);
514		cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
515									    val;
516		init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
517						     cmd_params);
518		val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
519		cxt->mstorm_st_context.sense_db.lo = val;
520
521		val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
522		cxt->mstorm_st_context.sense_db.hi = val;
523	}
524
525	if (task_params->tx_io_size) {
526		init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
527				       dif_task_params);
528		init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
529				       dif_task_params);
530		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
531				      &cxt->ystorm_st_context.state.data_desc,
532				      sgl_task_params);
533
534		slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
535					   sgl_task_params->small_mid_sge);
536
537		num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
538					    (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
539				      ISCSI_WQE_NUM_SGES_SLOWIO;
540
541		if (slow_io) {
542			SET_FIELD(cxt->ystorm_st_context.state.flags,
543				  YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
544		}
545	} else if (task_params->rx_io_size) {
546		init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
547				       dif_task_params);
548		init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
549				      &cxt->mstorm_st_context.data_desc,
550				      sgl_task_params);
551		num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
552				sgl_task_params->small_mid_sge) ?
553				min_t(u16, sgl_task_params->num_sges,
554				      (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
555				ISCSI_WQE_NUM_SGES_SLOWIO;
556		cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
557	}
558
559	if (exp_data_transfer_len > task_size  ||
560	    task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
561		exp_data_transfer_len = task_size;
562
563	init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
564				  &task_params->context->ustorm_ag_context,
565				  task_size, exp_data_transfer_len, num_sges,
566				  dif_task_params ?
567				  dif_task_params->tx_dif_conn_err_en : false);
568
569	set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
570					   task_type, task_size,
571					   exp_data_transfer_len,
572					GET_FIELD(pdu_header->hdr_second_dword,
573						  ISCSI_CMD_HDR_TOTAL_AHS_LEN));
574
575	if (dif_task_params)
576		init_rtdif_task_context(&task_params->context->rdif_context,
577					&task_params->context->tdif_context,
578					dif_task_params, task_type);
579
580	init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
581		 cmd_params, task_type, false);
582
583	return 0;
584}
585
586int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
587				 struct iscsi_conn_params *conn_params,
588				 struct scsi_initiator_cmd_params *cmd_params,
589				 struct iscsi_cmd_hdr *cmd_header,
590				 struct scsi_sgl_task_params *tx_sgl_params,
591				 struct scsi_sgl_task_params *rx_sgl_params,
592				 struct scsi_dif_task_params *dif_task_params)
593{
594	if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
595		return init_rw_iscsi_task(task_params,
596					  ISCSI_TASK_TYPE_INITIATOR_WRITE,
597					  conn_params,
598					  (struct iscsi_common_hdr *)cmd_header,
599					  tx_sgl_params, cmd_params,
600					  dif_task_params);
601	else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ) ||
602		 (task_params->rx_io_size == 0 && task_params->tx_io_size == 0))
603		return init_rw_iscsi_task(task_params,
604					  ISCSI_TASK_TYPE_INITIATOR_READ,
605					  conn_params,
606					  (struct iscsi_common_hdr *)cmd_header,
607					  rx_sgl_params, cmd_params,
608					  dif_task_params);
609	else
610		return -1;
611}
612
613int init_initiator_login_request_task(struct iscsi_task_params *task_params,
614				      struct iscsi_login_req_hdr  *login_header,
615				      struct scsi_sgl_task_params *tx_params,
616				      struct scsi_sgl_task_params *rx_params)
617{
618	struct iscsi_task_context *cxt;
619
620	cxt = task_params->context;
621
622	init_default_iscsi_task(task_params,
623				(struct data_hdr *)login_header,
624				ISCSI_TASK_TYPE_MIDPATH);
625
626	init_ustorm_task_contexts(&cxt->ustorm_st_context,
627				  &cxt->ustorm_ag_context,
628				  task_params->rx_io_size ?
629				  rx_params->total_buffer_size : 0,
630				  task_params->tx_io_size ?
631				  tx_params->total_buffer_size : 0, 0,
632				  0);
633
634	if (task_params->tx_io_size)
635		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
636				      &cxt->ystorm_st_context.state.data_desc,
637				      tx_params);
638
639	if (task_params->rx_io_size)
640		init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
641				      &cxt->mstorm_st_context.data_desc,
642				      rx_params);
643
644	cxt->mstorm_st_context.rem_task_size =
645			cpu_to_le32(task_params->rx_io_size ?
646				    rx_params->total_buffer_size : 0);
647
648	init_sqe(task_params, tx_params, NULL,
649		 (struct iscsi_common_hdr *)login_header, NULL,
650		 ISCSI_TASK_TYPE_MIDPATH, false);
651
652	return 0;
653}
654
655int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
656				struct iscsi_nop_out_hdr *nop_out_pdu_header,
657				struct scsi_sgl_task_params *tx_sgl_task_params,
658				struct scsi_sgl_task_params *rx_sgl_task_params)
659{
660	struct iscsi_task_context *cxt;
661
662	cxt = task_params->context;
663
664	init_default_iscsi_task(task_params,
665				(struct data_hdr *)nop_out_pdu_header,
666				ISCSI_TASK_TYPE_MIDPATH);
667
668	if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
669		set_local_completion_context(task_params->context);
670
671	if (task_params->tx_io_size)
672		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
673				      &cxt->ystorm_st_context.state.data_desc,
674				      tx_sgl_task_params);
675
676	if (task_params->rx_io_size)
677		init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
678				      &cxt->mstorm_st_context.data_desc,
679				      rx_sgl_task_params);
680
681	init_ustorm_task_contexts(&cxt->ustorm_st_context,
682				  &cxt->ustorm_ag_context,
683				  task_params->rx_io_size ?
684				  rx_sgl_task_params->total_buffer_size : 0,
685				  task_params->tx_io_size ?
686				  tx_sgl_task_params->total_buffer_size : 0,
687				  0, 0);
688
689	cxt->mstorm_st_context.rem_task_size =
690				cpu_to_le32(task_params->rx_io_size ?
691					rx_sgl_task_params->total_buffer_size :
692					0);
693
694	init_sqe(task_params, tx_sgl_task_params, NULL,
695		 (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
696		 ISCSI_TASK_TYPE_MIDPATH, false);
697
698	return 0;
699}
700
701int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
702				       struct iscsi_logout_req_hdr *logout_hdr,
703				       struct scsi_sgl_task_params *tx_params,
704				       struct scsi_sgl_task_params *rx_params)
705{
706	struct iscsi_task_context *cxt;
707
708	cxt = task_params->context;
709
710	init_default_iscsi_task(task_params,
711				(struct data_hdr *)logout_hdr,
712				ISCSI_TASK_TYPE_MIDPATH);
713
714	if (task_params->tx_io_size)
715		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
716				      &cxt->ystorm_st_context.state.data_desc,
717				      tx_params);
718
719	if (task_params->rx_io_size)
720		init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
721				      &cxt->mstorm_st_context.data_desc,
722				      rx_params);
723
724	init_ustorm_task_contexts(&cxt->ustorm_st_context,
725				  &cxt->ustorm_ag_context,
726				  task_params->rx_io_size ?
727				  rx_params->total_buffer_size : 0,
728				  task_params->tx_io_size ?
729				  tx_params->total_buffer_size : 0,
730				  0, 0);
731
732	cxt->mstorm_st_context.rem_task_size =
733					cpu_to_le32(task_params->rx_io_size ?
734					rx_params->total_buffer_size : 0);
735
736	init_sqe(task_params, tx_params, NULL,
737		 (struct iscsi_common_hdr *)logout_hdr, NULL,
738		 ISCSI_TASK_TYPE_MIDPATH, false);
739
740	return 0;
741}
742
743int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
744				    struct iscsi_tmf_request_hdr *tmf_header)
745{
746	init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
747				ISCSI_TASK_TYPE_MIDPATH);
748
749	init_sqe(task_params, NULL, NULL,
750		 (struct iscsi_common_hdr *)tmf_header, NULL,
751		 ISCSI_TASK_TYPE_MIDPATH, false);
752
753	return 0;
754}
755
756int init_initiator_text_request_task(struct iscsi_task_params *task_params,
757				     struct iscsi_text_request_hdr *text_header,
758				     struct scsi_sgl_task_params *tx_params,
759				     struct scsi_sgl_task_params *rx_params)
760{
761	struct iscsi_task_context *cxt;
762
763	cxt = task_params->context;
764
765	init_default_iscsi_task(task_params,
766				(struct data_hdr *)text_header,
767				ISCSI_TASK_TYPE_MIDPATH);
768
769	if (task_params->tx_io_size)
770		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
771				      &cxt->ystorm_st_context.state.data_desc,
772				      tx_params);
773
774	if (task_params->rx_io_size)
775		init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
776				      &cxt->mstorm_st_context.data_desc,
777				      rx_params);
778
779	cxt->mstorm_st_context.rem_task_size =
780				cpu_to_le32(task_params->rx_io_size ?
781					rx_params->total_buffer_size : 0);
782
783	init_ustorm_task_contexts(&cxt->ustorm_st_context,
784				  &cxt->ustorm_ag_context,
785				  task_params->rx_io_size ?
786				  rx_params->total_buffer_size : 0,
787				  task_params->tx_io_size ?
788				  tx_params->total_buffer_size : 0, 0, 0);
789
790	init_sqe(task_params, tx_params, NULL,
791		 (struct iscsi_common_hdr *)text_header, NULL,
792		 ISCSI_TASK_TYPE_MIDPATH, false);
793
794	return 0;
795}
796
797int init_cleanup_task(struct iscsi_task_params *task_params)
798{
799	init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
800		 true);
801	return 0;
802}