Loading...
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23
24#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
25#define LPFC_NVMET_RQE_DEF_COUNT 512
26#define LPFC_NVMET_SUCCESS_LEN 12
27
28#define LPFC_NVMET_MRQ_OFF 0xffff
29#define LPFC_NVMET_MRQ_AUTO 0
30#define LPFC_NVMET_MRQ_MAX 16
31
32/* Used for NVME Target */
33struct lpfc_nvmet_tgtport {
34 struct lpfc_hba *phba;
35 struct completion tport_unreg_done;
36
37 /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
38 atomic_t rcv_ls_req_in;
39 atomic_t rcv_ls_req_out;
40 atomic_t rcv_ls_req_drop;
41 atomic_t xmt_ls_abort;
42 atomic_t xmt_ls_abort_cmpl;
43
44 /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
45 atomic_t xmt_ls_rsp;
46 atomic_t xmt_ls_drop;
47
48 /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
49 atomic_t xmt_ls_rsp_error;
50 atomic_t xmt_ls_rsp_aborted;
51 atomic_t xmt_ls_rsp_xb_set;
52 atomic_t xmt_ls_rsp_cmpl;
53
54 /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
55 atomic_t rcv_fcp_cmd_in;
56 atomic_t rcv_fcp_cmd_out;
57 atomic_t rcv_fcp_cmd_drop;
58 atomic_t rcv_fcp_cmd_defer;
59 atomic_t xmt_fcp_release;
60
61 /* Stats counters - lpfc_nvmet_xmt_fcp_op */
62 atomic_t xmt_fcp_drop;
63 atomic_t xmt_fcp_read_rsp;
64 atomic_t xmt_fcp_read;
65 atomic_t xmt_fcp_write;
66 atomic_t xmt_fcp_rsp;
67
68 /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
69 atomic_t xmt_fcp_rsp_xb_set;
70 atomic_t xmt_fcp_rsp_cmpl;
71 atomic_t xmt_fcp_rsp_error;
72 atomic_t xmt_fcp_rsp_aborted;
73 atomic_t xmt_fcp_rsp_drop;
74
75 /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
76 atomic_t xmt_fcp_xri_abort_cqe;
77 atomic_t xmt_fcp_abort;
78 atomic_t xmt_fcp_abort_cmpl;
79 atomic_t xmt_abort_sol;
80 atomic_t xmt_abort_unsol;
81 atomic_t xmt_abort_rsp;
82 atomic_t xmt_abort_rsp_error;
83
84 /* Stats counters - defer IO */
85 atomic_t defer_ctx;
86 atomic_t defer_fod;
87 atomic_t defer_wqfull;
88};
89
90struct lpfc_nvmet_ctx_info {
91 struct list_head nvmet_ctx_list;
92 spinlock_t nvmet_ctx_list_lock; /* lock per CPU */
93 struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
94 struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
95 uint16_t nvmet_ctx_list_cnt;
96 char pad[16]; /* pad to a cache-line */
97};
98
99/* This retrieves the context info associated with the specified cpu / mrq */
100#define lpfc_get_ctx_list(phba, cpu, mrq) \
101 (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
102
103struct lpfc_nvmet_rcv_ctx {
104 union {
105 struct nvmefc_tgt_ls_req ls_req;
106 struct nvmefc_tgt_fcp_req fcp_req;
107 } ctx;
108 struct list_head list;
109 struct lpfc_hba *phba;
110 struct lpfc_iocbq *wqeq;
111 struct lpfc_iocbq *abort_wqeq;
112 dma_addr_t txrdy_phys;
113 spinlock_t ctxlock; /* protect flag access */
114 uint32_t *txrdy;
115 uint32_t sid;
116 uint32_t offset;
117 uint16_t oxid;
118 uint16_t size;
119 uint16_t entry_cnt;
120 uint16_t cpu;
121 uint16_t idx;
122 uint16_t state;
123 /* States */
124#define LPFC_NVMET_STE_LS_RCV 1
125#define LPFC_NVMET_STE_LS_ABORT 2
126#define LPFC_NVMET_STE_LS_RSP 3
127#define LPFC_NVMET_STE_RCV 4
128#define LPFC_NVMET_STE_DATA 5
129#define LPFC_NVMET_STE_ABORT 6
130#define LPFC_NVMET_STE_DONE 7
131#define LPFC_NVMET_STE_FREE 0xff
132 uint16_t flag;
133#define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */
134#define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */
135#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
136#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
137#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
138#define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
139 struct rqb_dmabuf *rqb_buffer;
140 struct lpfc_nvmet_ctxbuf *ctxbuf;
141
142#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
143 uint64_t ts_isr_cmd;
144 uint64_t ts_cmd_nvme;
145 uint64_t ts_nvme_data;
146 uint64_t ts_data_wqput;
147 uint64_t ts_isr_data;
148 uint64_t ts_data_nvme;
149 uint64_t ts_nvme_status;
150 uint64_t ts_status_wqput;
151 uint64_t ts_isr_status;
152 uint64_t ts_status_nvme;
153#endif
154};