Loading...
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <asm/param.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/interrupt.h>
39#include <linux/kernel.h>
40#include <linux/log2.h>
41#include <linux/module.h>
42#include <linux/pci.h>
43#include <linux/slab.h>
44#include <linux/stddef.h>
45#include <linux/string.h>
46#include <linux/workqueue.h>
47#include <linux/errno.h>
48#include <linux/list.h>
49#include <linux/spinlock.h>
50#define __PREVENT_DUMP_MEM_ARR__
51#define __PREVENT_PXP_GLOBAL_WIN__
52#include "qed.h"
53#include "qed_cxt.h"
54#include "qed_dev_api.h"
55#include "qed_fcoe.h"
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
59#include "qed_ll2.h"
60#include "qed_mcp.h"
61#include "qed_reg_addr.h"
62#include "qed_sp.h"
63#include "qed_sriov.h"
64#include <linux/qed/qed_fcoe_if.h>
65
66struct qed_fcoe_conn {
67 struct list_head list_entry;
68 bool free_on_delete;
69
70 u16 conn_id;
71 u32 icid;
72 u32 fw_cid;
73 u8 layer_code;
74
75 dma_addr_t sq_pbl_addr;
76 dma_addr_t sq_curr_page_addr;
77 dma_addr_t sq_next_page_addr;
78 dma_addr_t xferq_pbl_addr;
79 void *xferq_pbl_addr_virt_addr;
80 dma_addr_t xferq_addr[4];
81 void *xferq_addr_virt_addr[4];
82 dma_addr_t confq_pbl_addr;
83 void *confq_pbl_addr_virt_addr;
84 dma_addr_t confq_addr[2];
85 void *confq_addr_virt_addr[2];
86
87 dma_addr_t terminate_params;
88
89 u16 dst_mac_addr_lo;
90 u16 dst_mac_addr_mid;
91 u16 dst_mac_addr_hi;
92 u16 src_mac_addr_lo;
93 u16 src_mac_addr_mid;
94 u16 src_mac_addr_hi;
95
96 u16 tx_max_fc_pay_len;
97 u16 e_d_tov_timer_val;
98 u16 rec_tov_timer_val;
99 u16 rx_max_fc_pay_len;
100 u16 vlan_tag;
101 u16 physical_q0;
102
103 struct fc_addr_nw s_id;
104 u8 max_conc_seqs_c3;
105 struct fc_addr_nw d_id;
106 u8 flags;
107 u8 def_q_idx;
108};
109
110static int
111qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
112 enum spq_mode comp_mode,
113 struct qed_spq_comp_cb *p_comp_addr)
114{
115 struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
116 struct fcoe_init_ramrod_params *p_ramrod = NULL;
117 struct fcoe_init_func_ramrod_data *p_data;
118 struct e4_fcoe_conn_context *p_cxt = NULL;
119 struct qed_spq_entry *p_ent = NULL;
120 struct qed_sp_init_data init_data;
121 struct qed_cxt_info cxt_info;
122 u32 dummy_cid;
123 int rc = 0;
124 u16 tmp;
125 u8 i;
126
127 /* Get SPQ entry */
128 memset(&init_data, 0, sizeof(init_data));
129 init_data.cid = qed_spq_get_cid(p_hwfn);
130 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
131 init_data.comp_mode = comp_mode;
132 init_data.p_comp_data = p_comp_addr;
133
134 rc = qed_sp_init_request(p_hwfn, &p_ent,
135 FCOE_RAMROD_CMD_ID_INIT_FUNC,
136 PROTOCOLID_FCOE, &init_data);
137 if (rc)
138 return rc;
139
140 p_ramrod = &p_ent->ramrod.fcoe_init;
141 p_data = &p_ramrod->init_ramrod_data;
142 fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
143
144 /* Sanity */
145 if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) {
146 DP_ERR(p_hwfn,
147 "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
148 fcoe_pf_params->num_cqs,
149 p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
150 return -EINVAL;
151 }
152
153 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
154 tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
155 p_data->sq_num_pages_in_pbl = tmp;
156
157 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
158 if (rc)
159 return rc;
160
161 cxt_info.iid = dummy_cid;
162 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
163 if (rc) {
164 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
165 dummy_cid);
166 return rc;
167 }
168 p_cxt = cxt_info.p_cxt;
169 SET_FIELD(p_cxt->tstorm_ag_context.flags3,
170 E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
171
172 fcoe_pf_params->dummy_icid = (u16)dummy_cid;
173
174 tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
175 p_data->func_params.num_tasks = tmp;
176 p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
177 p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
178
179 DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
180 fcoe_pf_params->glbl_q_params_addr);
181
182 tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
183 p_data->q_params.cq_num_entries = tmp;
184
185 tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
186 p_data->q_params.cmdq_num_entries = tmp;
187
188 tmp = fcoe_pf_params->num_cqs;
189 p_data->q_params.num_queues = (u8)tmp;
190
191 tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
192 p_data->q_params.queue_relative_offset = (u8)tmp;
193
194 for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
195 u16 igu_sb_id;
196
197 igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
198 tmp = cpu_to_le16(igu_sb_id);
199 p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
200 }
201
202 p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
203 p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
204
205 p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
206
207 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
208 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
209 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
210 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
211 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
212 p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
213 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
214 p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
215
216 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
217 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
218 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
219 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
220 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
221 p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
222 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
223 p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
224 tmp = fcoe_pf_params->rq_buffer_size;
225 p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
226
227 if (fcoe_pf_params->is_target) {
228 SET_FIELD(p_data->q_params.q_validity,
229 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
230 if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
231 SET_FIELD(p_data->q_params.q_validity,
232 SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
233 SET_FIELD(p_data->q_params.q_validity,
234 SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
235 } else {
236 SET_FIELD(p_data->q_params.q_validity,
237 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
238 }
239
240 rc = qed_spq_post(p_hwfn, p_ent, NULL);
241
242 return rc;
243}
244
245static int
246qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
247 struct qed_fcoe_conn *p_conn,
248 enum spq_mode comp_mode,
249 struct qed_spq_comp_cb *p_comp_addr)
250{
251 struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
252 struct fcoe_conn_offload_ramrod_data *p_data;
253 struct qed_spq_entry *p_ent = NULL;
254 struct qed_sp_init_data init_data;
255 u16 physical_q0, tmp;
256 int rc;
257
258 /* Get SPQ entry */
259 memset(&init_data, 0, sizeof(init_data));
260 init_data.cid = p_conn->icid;
261 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
262 init_data.comp_mode = comp_mode;
263 init_data.p_comp_data = p_comp_addr;
264
265 rc = qed_sp_init_request(p_hwfn, &p_ent,
266 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
267 PROTOCOLID_FCOE, &init_data);
268 if (rc)
269 return rc;
270
271 p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
272 p_data = &p_ramrod->offload_ramrod_data;
273
274 /* Transmission PQ is the first of the PF */
275 physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
276 p_conn->physical_q0 = cpu_to_le16(physical_q0);
277 p_data->physical_q0 = cpu_to_le16(physical_q0);
278
279 p_data->conn_id = cpu_to_le16(p_conn->conn_id);
280 DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
281 DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
282 DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
283 DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
284 DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
285 DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
286
287 DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
288 DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
289 DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
290
291 p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
292 p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
293 p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
294 p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
295 p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
296 p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
297
298 tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
299 p_data->tx_max_fc_pay_len = tmp;
300 tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
301 p_data->e_d_tov_timer_val = tmp;
302 tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
303 p_data->rec_rr_tov_timer_val = tmp;
304 tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
305 p_data->rx_max_fc_pay_len = tmp;
306
307 p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
308 p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
309 p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
310 p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
311 p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
312 p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
313 p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
314 p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
315 p_data->flags = p_conn->flags;
316 p_data->def_q_idx = p_conn->def_q_idx;
317
318 return qed_spq_post(p_hwfn, p_ent, NULL);
319}
320
321static int
322qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
323 struct qed_fcoe_conn *p_conn,
324 enum spq_mode comp_mode,
325 struct qed_spq_comp_cb *p_comp_addr)
326{
327 struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
328 struct qed_spq_entry *p_ent = NULL;
329 struct qed_sp_init_data init_data;
330 int rc = 0;
331
332 /* Get SPQ entry */
333 memset(&init_data, 0, sizeof(init_data));
334 init_data.cid = p_conn->icid;
335 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
336 init_data.comp_mode = comp_mode;
337 init_data.p_comp_data = p_comp_addr;
338
339 rc = qed_sp_init_request(p_hwfn, &p_ent,
340 FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
341 PROTOCOLID_FCOE, &init_data);
342 if (rc)
343 return rc;
344
345 p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
346 DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
347 p_conn->terminate_params);
348
349 return qed_spq_post(p_hwfn, p_ent, NULL);
350}
351
352static int
353qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
354 struct qed_ptt *p_ptt,
355 enum spq_mode comp_mode,
356 struct qed_spq_comp_cb *p_comp_addr)
357{
358 struct qed_spq_entry *p_ent = NULL;
359 struct qed_sp_init_data init_data;
360 u32 active_segs = 0;
361 int rc = 0;
362
363 /* Get SPQ entry */
364 memset(&init_data, 0, sizeof(init_data));
365 init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
366 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
367 init_data.comp_mode = comp_mode;
368 init_data.p_comp_data = p_comp_addr;
369
370 rc = qed_sp_init_request(p_hwfn, &p_ent,
371 FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
372 PROTOCOLID_FCOE, &init_data);
373 if (rc)
374 return rc;
375
376 active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
377 active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
378 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
379
380 return qed_spq_post(p_hwfn, p_ent, NULL);
381}
382
383static int
384qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
385 struct qed_fcoe_conn **p_out_conn)
386{
387 struct qed_fcoe_conn *p_conn = NULL;
388 void *p_addr;
389 u32 i;
390
391 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
392 if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
393 p_conn =
394 list_first_entry(&p_hwfn->p_fcoe_info->free_list,
395 struct qed_fcoe_conn, list_entry);
396 if (p_conn) {
397 list_del(&p_conn->list_entry);
398 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
399 *p_out_conn = p_conn;
400 return 0;
401 }
402 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
403
404 p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
405 if (!p_conn)
406 return -ENOMEM;
407
408 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
409 QED_CHAIN_PAGE_SIZE,
410 &p_conn->xferq_pbl_addr, GFP_KERNEL);
411 if (!p_addr)
412 goto nomem_pbl_xferq;
413 p_conn->xferq_pbl_addr_virt_addr = p_addr;
414
415 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
416 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
417 QED_CHAIN_PAGE_SIZE,
418 &p_conn->xferq_addr[i], GFP_KERNEL);
419 if (!p_addr)
420 goto nomem_xferq;
421 p_conn->xferq_addr_virt_addr[i] = p_addr;
422
423 p_addr = p_conn->xferq_pbl_addr_virt_addr;
424 ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
425 }
426
427 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
428 QED_CHAIN_PAGE_SIZE,
429 &p_conn->confq_pbl_addr, GFP_KERNEL);
430 if (!p_addr)
431 goto nomem_xferq;
432 p_conn->confq_pbl_addr_virt_addr = p_addr;
433
434 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
435 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
436 QED_CHAIN_PAGE_SIZE,
437 &p_conn->confq_addr[i], GFP_KERNEL);
438 if (!p_addr)
439 goto nomem_confq;
440 p_conn->confq_addr_virt_addr[i] = p_addr;
441
442 p_addr = p_conn->confq_pbl_addr_virt_addr;
443 ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
444 }
445
446 p_conn->free_on_delete = true;
447 *p_out_conn = p_conn;
448 return 0;
449
450nomem_confq:
451 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
452 QED_CHAIN_PAGE_SIZE,
453 p_conn->confq_pbl_addr_virt_addr,
454 p_conn->confq_pbl_addr);
455 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
456 if (p_conn->confq_addr_virt_addr[i])
457 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
458 QED_CHAIN_PAGE_SIZE,
459 p_conn->confq_addr_virt_addr[i],
460 p_conn->confq_addr[i]);
461nomem_xferq:
462 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
463 QED_CHAIN_PAGE_SIZE,
464 p_conn->xferq_pbl_addr_virt_addr,
465 p_conn->xferq_pbl_addr);
466 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
467 if (p_conn->xferq_addr_virt_addr[i])
468 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
469 QED_CHAIN_PAGE_SIZE,
470 p_conn->xferq_addr_virt_addr[i],
471 p_conn->xferq_addr[i]);
472nomem_pbl_xferq:
473 kfree(p_conn);
474 return -ENOMEM;
475}
476
477static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
478 struct qed_fcoe_conn *p_conn)
479{
480 u32 i;
481
482 if (!p_conn)
483 return;
484
485 if (p_conn->confq_pbl_addr_virt_addr)
486 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
487 QED_CHAIN_PAGE_SIZE,
488 p_conn->confq_pbl_addr_virt_addr,
489 p_conn->confq_pbl_addr);
490
491 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
492 if (!p_conn->confq_addr_virt_addr[i])
493 continue;
494 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
495 QED_CHAIN_PAGE_SIZE,
496 p_conn->confq_addr_virt_addr[i],
497 p_conn->confq_addr[i]);
498 }
499
500 if (p_conn->xferq_pbl_addr_virt_addr)
501 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
502 QED_CHAIN_PAGE_SIZE,
503 p_conn->xferq_pbl_addr_virt_addr,
504 p_conn->xferq_pbl_addr);
505
506 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
507 if (!p_conn->xferq_addr_virt_addr[i])
508 continue;
509 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
510 QED_CHAIN_PAGE_SIZE,
511 p_conn->xferq_addr_virt_addr[i],
512 p_conn->xferq_addr[i]);
513 }
514 kfree(p_conn);
515}
516
517static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
518{
519 return (u8 __iomem *)p_hwfn->doorbells +
520 qed_db_addr(cid, DQ_DEMS_LEGACY);
521}
522
523static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
524 u8 bdq_id)
525{
526 if (RESC_NUM(p_hwfn, QED_BDQ)) {
527 return (u8 __iomem *)p_hwfn->regview +
528 GTT_BAR0_MAP_REG_MSDM_RAM +
529 MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
530 QED_BDQ),
531 bdq_id);
532 } else {
533 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
534 return NULL;
535 }
536}
537
538static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
539 u8 bdq_id)
540{
541 if (RESC_NUM(p_hwfn, QED_BDQ)) {
542 return (u8 __iomem *)p_hwfn->regview +
543 GTT_BAR0_MAP_REG_TSDM_RAM +
544 TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
545 QED_BDQ),
546 bdq_id);
547 } else {
548 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
549 return NULL;
550 }
551}
552
553int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
554{
555 struct qed_fcoe_info *p_fcoe_info;
556
557 /* Allocate LL2's set struct */
558 p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
559 if (!p_fcoe_info) {
560 DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
561 return -ENOMEM;
562 }
563 INIT_LIST_HEAD(&p_fcoe_info->free_list);
564
565 p_hwfn->p_fcoe_info = p_fcoe_info;
566 return 0;
567}
568
569void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
570{
571 struct e4_fcoe_task_context *p_task_ctx = NULL;
572 int rc;
573 u32 i;
574
575 spin_lock_init(&p_hwfn->p_fcoe_info->lock);
576 for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
577 rc = qed_cxt_get_task_ctx(p_hwfn, i,
578 QED_CTX_WORKING_MEM,
579 (void **)&p_task_ctx);
580 if (rc)
581 continue;
582
583 memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
584 SET_FIELD(p_task_ctx->timer_context.logical_client_0,
585 TIMERS_CONTEXT_VALIDLC0, 1);
586 SET_FIELD(p_task_ctx->timer_context.logical_client_1,
587 TIMERS_CONTEXT_VALIDLC1, 1);
588 SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
589 E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
590 }
591}
592
593void qed_fcoe_free(struct qed_hwfn *p_hwfn)
594{
595 struct qed_fcoe_conn *p_conn = NULL;
596
597 if (!p_hwfn->p_fcoe_info)
598 return;
599
600 while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
601 p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
602 struct qed_fcoe_conn, list_entry);
603 if (!p_conn)
604 break;
605 list_del(&p_conn->list_entry);
606 qed_fcoe_free_connection(p_hwfn, p_conn);
607 }
608
609 kfree(p_hwfn->p_fcoe_info);
610 p_hwfn->p_fcoe_info = NULL;
611}
612
613static int
614qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
615 struct qed_fcoe_conn *p_in_conn,
616 struct qed_fcoe_conn **p_out_conn)
617{
618 struct qed_fcoe_conn *p_conn = NULL;
619 int rc = 0;
620 u32 icid;
621
622 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
623 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
624 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
625 if (rc)
626 return rc;
627
628 /* Use input connection [if provided] or allocate a new one */
629 if (p_in_conn) {
630 p_conn = p_in_conn;
631 } else {
632 rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
633 if (rc) {
634 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
635 qed_cxt_release_cid(p_hwfn, icid);
636 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
637 return rc;
638 }
639 }
640
641 p_conn->icid = icid;
642 p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
643 *p_out_conn = p_conn;
644
645 return rc;
646}
647
648static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
649 struct qed_fcoe_conn *p_conn)
650{
651 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
652 list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
653 qed_cxt_release_cid(p_hwfn, p_conn->icid);
654 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
655}
656
657static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
658 struct qed_ptt *p_ptt,
659 struct qed_fcoe_stats *p_stats)
660{
661 struct fcoe_rx_stat tstats;
662 u32 tstats_addr;
663
664 memset(&tstats, 0, sizeof(tstats));
665 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
666 TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
667 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
668
669 p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
670 p_stats->fcoe_rx_data_pkt_cnt =
671 HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
672 p_stats->fcoe_rx_xfer_pkt_cnt =
673 HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
674 p_stats->fcoe_rx_other_pkt_cnt =
675 HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
676
677 p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
678 le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
679 p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
680 le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
681 p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
682 le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
683 p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
684 le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
685 p_stats->fcoe_silent_drop_total_pkt_cnt =
686 le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
687}
688
689static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
690 struct qed_ptt *p_ptt,
691 struct qed_fcoe_stats *p_stats)
692{
693 struct fcoe_tx_stat pstats;
694 u32 pstats_addr;
695
696 memset(&pstats, 0, sizeof(pstats));
697 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
698 PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
699 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
700
701 p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
702 p_stats->fcoe_tx_data_pkt_cnt =
703 HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
704 p_stats->fcoe_tx_xfer_pkt_cnt =
705 HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
706 p_stats->fcoe_tx_other_pkt_cnt =
707 HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
708}
709
710static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
711 struct qed_fcoe_stats *p_stats)
712{
713 struct qed_ptt *p_ptt;
714
715 memset(p_stats, 0, sizeof(*p_stats));
716
717 p_ptt = qed_ptt_acquire(p_hwfn);
718
719 if (!p_ptt) {
720 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
721 return -EINVAL;
722 }
723
724 _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
725 _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
726
727 qed_ptt_release(p_hwfn, p_ptt);
728
729 return 0;
730}
731
732struct qed_hash_fcoe_con {
733 struct hlist_node node;
734 struct qed_fcoe_conn *con;
735};
736
737static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
738 struct qed_dev_fcoe_info *info)
739{
740 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
741 int rc;
742
743 memset(info, 0, sizeof(*info));
744 rc = qed_fill_dev_info(cdev, &info->common);
745
746 info->primary_dbq_rq_addr =
747 qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
748 info->secondary_bdq_rq_addr =
749 qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
750
751 info->wwpn = hwfn->mcp_info->func_info.wwn_port;
752 info->wwnn = hwfn->mcp_info->func_info.wwn_node;
753
754 info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ);
755
756 return rc;
757}
758
759static void qed_register_fcoe_ops(struct qed_dev *cdev,
760 struct qed_fcoe_cb_ops *ops, void *cookie)
761{
762 cdev->protocol_ops.fcoe = ops;
763 cdev->ops_cookie = cookie;
764}
765
766static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
767 u32 handle)
768{
769 struct qed_hash_fcoe_con *hash_con = NULL;
770
771 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
772 return NULL;
773
774 hash_for_each_possible(cdev->connections, hash_con, node, handle) {
775 if (hash_con->con->icid == handle)
776 break;
777 }
778
779 if (!hash_con || (hash_con->con->icid != handle))
780 return NULL;
781
782 return hash_con;
783}
784
785static int qed_fcoe_stop(struct qed_dev *cdev)
786{
787 struct qed_ptt *p_ptt;
788 int rc;
789
790 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
791 DP_NOTICE(cdev, "fcoe already stopped\n");
792 return 0;
793 }
794
795 if (!hash_empty(cdev->connections)) {
796 DP_NOTICE(cdev,
797 "Can't stop fcoe - not all connections were returned\n");
798 return -EINVAL;
799 }
800
801 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
802 if (!p_ptt)
803 return -EAGAIN;
804
805 /* Stop the fcoe */
806 rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
807 QED_SPQ_MODE_EBLOCK, NULL);
808 cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
809 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
810
811 return rc;
812}
813
814static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
815{
816 int rc;
817
818 if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
819 DP_NOTICE(cdev, "fcoe already started;\n");
820 return 0;
821 }
822
823 rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
824 QED_SPQ_MODE_EBLOCK, NULL);
825 if (rc) {
826 DP_NOTICE(cdev, "Failed to start fcoe\n");
827 return rc;
828 }
829
830 cdev->flags |= QED_FLAG_STORAGE_STARTED;
831 hash_init(cdev->connections);
832
833 if (tasks) {
834 struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
835 GFP_ATOMIC);
836
837 if (!tid_info) {
838 DP_NOTICE(cdev,
839 "Failed to allocate tasks information\n");
840 qed_fcoe_stop(cdev);
841 return -ENOMEM;
842 }
843
844 rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
845 if (rc) {
846 DP_NOTICE(cdev, "Failed to gather task information\n");
847 qed_fcoe_stop(cdev);
848 kfree(tid_info);
849 return rc;
850 }
851
852 /* Fill task information */
853 tasks->size = tid_info->tid_size;
854 tasks->num_tids_per_block = tid_info->num_tids_per_block;
855 memcpy(tasks->blocks, tid_info->blocks,
856 MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
857
858 kfree(tid_info);
859 }
860
861 return 0;
862}
863
864static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
865 u32 *handle,
866 u32 *fw_cid, void __iomem **p_doorbell)
867{
868 struct qed_hash_fcoe_con *hash_con;
869 int rc;
870
871 /* Allocate a hashed connection */
872 hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
873 if (!hash_con) {
874 DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
875 return -ENOMEM;
876 }
877
878 /* Acquire the connection */
879 rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
880 &hash_con->con);
881 if (rc) {
882 DP_NOTICE(cdev, "Failed to acquire Connection\n");
883 kfree(hash_con);
884 return rc;
885 }
886
887 /* Added the connection to hash table */
888 *handle = hash_con->con->icid;
889 *fw_cid = hash_con->con->fw_cid;
890 hash_add(cdev->connections, &hash_con->node, *handle);
891
892 if (p_doorbell)
893 *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
894 *handle);
895
896 return 0;
897}
898
899static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
900{
901 struct qed_hash_fcoe_con *hash_con;
902
903 hash_con = qed_fcoe_get_hash(cdev, handle);
904 if (!hash_con) {
905 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
906 handle);
907 return -EINVAL;
908 }
909
910 hlist_del(&hash_con->node);
911 qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
912 kfree(hash_con);
913
914 return 0;
915}
916
917static int qed_fcoe_offload_conn(struct qed_dev *cdev,
918 u32 handle,
919 struct qed_fcoe_params_offload *conn_info)
920{
921 struct qed_hash_fcoe_con *hash_con;
922 struct qed_fcoe_conn *con;
923
924 hash_con = qed_fcoe_get_hash(cdev, handle);
925 if (!hash_con) {
926 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
927 handle);
928 return -EINVAL;
929 }
930
931 /* Update the connection with information from the params */
932 con = hash_con->con;
933
934 con->sq_pbl_addr = conn_info->sq_pbl_addr;
935 con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
936 con->sq_next_page_addr = conn_info->sq_next_page_addr;
937 con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
938 con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
939 con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
940 con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
941 con->vlan_tag = conn_info->vlan_tag;
942 con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
943 con->flags = conn_info->flags;
944 con->def_q_idx = conn_info->def_q_idx;
945
946 con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
947 conn_info->src_mac[4];
948 con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
949 conn_info->src_mac[2];
950 con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
951 conn_info->src_mac[0];
952 con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
953 conn_info->dst_mac[4];
954 con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
955 conn_info->dst_mac[2];
956 con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
957 conn_info->dst_mac[0];
958
959 con->s_id.addr_hi = conn_info->s_id.addr_hi;
960 con->s_id.addr_mid = conn_info->s_id.addr_mid;
961 con->s_id.addr_lo = conn_info->s_id.addr_lo;
962 con->d_id.addr_hi = conn_info->d_id.addr_hi;
963 con->d_id.addr_mid = conn_info->d_id.addr_mid;
964 con->d_id.addr_lo = conn_info->d_id.addr_lo;
965
966 return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
967 QED_SPQ_MODE_EBLOCK, NULL);
968}
969
970static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
971 u32 handle, dma_addr_t terminate_params)
972{
973 struct qed_hash_fcoe_con *hash_con;
974 struct qed_fcoe_conn *con;
975
976 hash_con = qed_fcoe_get_hash(cdev, handle);
977 if (!hash_con) {
978 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
979 handle);
980 return -EINVAL;
981 }
982
983 /* Update the connection with information from the params */
984 con = hash_con->con;
985 con->terminate_params = terminate_params;
986
987 return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
988 QED_SPQ_MODE_EBLOCK, NULL);
989}
990
991static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
992{
993 return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
994}
995
996void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
997 struct qed_mcp_fcoe_stats *stats)
998{
999 struct qed_fcoe_stats proto_stats;
1000
1001 /* Retrieve FW statistics */
1002 memset(&proto_stats, 0, sizeof(proto_stats));
1003 if (qed_fcoe_stats(cdev, &proto_stats)) {
1004 DP_VERBOSE(cdev, QED_MSG_STORAGE,
1005 "Failed to collect FCoE statistics\n");
1006 return;
1007 }
1008
1009 /* Translate FW statistics into struct */
1010 stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
1011 proto_stats.fcoe_rx_xfer_pkt_cnt +
1012 proto_stats.fcoe_rx_other_pkt_cnt;
1013 stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
1014 proto_stats.fcoe_tx_xfer_pkt_cnt +
1015 proto_stats.fcoe_tx_other_pkt_cnt;
1016 stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
1017
1018 /* Request protocol driver to fill-in the rest */
1019 if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
1020 struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
1021 void *cookie = cdev->ops_cookie;
1022
1023 if (ops->get_login_failures)
1024 stats->login_failure = ops->get_login_failures(cookie);
1025 }
1026}
1027
1028static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
1029 .common = &qed_common_ops_pass,
1030 .ll2 = &qed_ll2_ops_pass,
1031 .fill_dev_info = &qed_fill_fcoe_dev_info,
1032 .start = &qed_fcoe_start,
1033 .stop = &qed_fcoe_stop,
1034 .register_ops = &qed_register_fcoe_ops,
1035 .acquire_conn = &qed_fcoe_acquire_conn,
1036 .release_conn = &qed_fcoe_release_conn,
1037 .offload_conn = &qed_fcoe_offload_conn,
1038 .destroy_conn = &qed_fcoe_destroy_conn,
1039 .get_stats = &qed_fcoe_stats,
1040};
1041
1042const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
1043{
1044 return &qed_fcoe_ops_pass;
1045}
1046EXPORT_SYMBOL(qed_get_fcoe_ops);
1047
1048void qed_put_fcoe_ops(void)
1049{
1050}
1051EXPORT_SYMBOL(qed_put_fcoe_ops);
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <asm/param.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/interrupt.h>
39#include <linux/kernel.h>
40#include <linux/log2.h>
41#include <linux/module.h>
42#include <linux/pci.h>
43#include <linux/slab.h>
44#include <linux/stddef.h>
45#include <linux/string.h>
46#include <linux/workqueue.h>
47#include <linux/errno.h>
48#include <linux/list.h>
49#include <linux/spinlock.h>
50#define __PREVENT_DUMP_MEM_ARR__
51#define __PREVENT_PXP_GLOBAL_WIN__
52#include "qed.h"
53#include "qed_cxt.h"
54#include "qed_dev_api.h"
55#include "qed_fcoe.h"
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
59#include "qed_ll2.h"
60#include "qed_mcp.h"
61#include "qed_reg_addr.h"
62#include "qed_sp.h"
63#include "qed_sriov.h"
64#include <linux/qed/qed_fcoe_if.h>
65
66struct qed_fcoe_conn {
67 struct list_head list_entry;
68 bool free_on_delete;
69
70 u16 conn_id;
71 u32 icid;
72 u32 fw_cid;
73 u8 layer_code;
74
75 dma_addr_t sq_pbl_addr;
76 dma_addr_t sq_curr_page_addr;
77 dma_addr_t sq_next_page_addr;
78 dma_addr_t xferq_pbl_addr;
79 void *xferq_pbl_addr_virt_addr;
80 dma_addr_t xferq_addr[4];
81 void *xferq_addr_virt_addr[4];
82 dma_addr_t confq_pbl_addr;
83 void *confq_pbl_addr_virt_addr;
84 dma_addr_t confq_addr[2];
85 void *confq_addr_virt_addr[2];
86
87 dma_addr_t terminate_params;
88
89 u16 dst_mac_addr_lo;
90 u16 dst_mac_addr_mid;
91 u16 dst_mac_addr_hi;
92 u16 src_mac_addr_lo;
93 u16 src_mac_addr_mid;
94 u16 src_mac_addr_hi;
95
96 u16 tx_max_fc_pay_len;
97 u16 e_d_tov_timer_val;
98 u16 rec_tov_timer_val;
99 u16 rx_max_fc_pay_len;
100 u16 vlan_tag;
101 u16 physical_q0;
102
103 struct fc_addr_nw s_id;
104 u8 max_conc_seqs_c3;
105 struct fc_addr_nw d_id;
106 u8 flags;
107 u8 def_q_idx;
108};
109
110static int
111qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
112 enum spq_mode comp_mode,
113 struct qed_spq_comp_cb *p_comp_addr)
114{
115 struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
116 struct fcoe_init_ramrod_params *p_ramrod = NULL;
117 struct fcoe_init_func_ramrod_data *p_data;
118 struct e4_fcoe_conn_context *p_cxt = NULL;
119 struct qed_spq_entry *p_ent = NULL;
120 struct qed_sp_init_data init_data;
121 struct qed_cxt_info cxt_info;
122 u32 dummy_cid;
123 int rc = 0;
124 u16 tmp;
125 u8 i;
126
127 /* Get SPQ entry */
128 memset(&init_data, 0, sizeof(init_data));
129 init_data.cid = qed_spq_get_cid(p_hwfn);
130 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
131 init_data.comp_mode = comp_mode;
132 init_data.p_comp_data = p_comp_addr;
133
134 rc = qed_sp_init_request(p_hwfn, &p_ent,
135 FCOE_RAMROD_CMD_ID_INIT_FUNC,
136 PROTOCOLID_FCOE, &init_data);
137 if (rc)
138 return rc;
139
140 p_ramrod = &p_ent->ramrod.fcoe_init;
141 p_data = &p_ramrod->init_ramrod_data;
142 fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
143
144 /* Sanity */
145 if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) {
146 DP_ERR(p_hwfn,
147 "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
148 fcoe_pf_params->num_cqs,
149 p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
150 rc = -EINVAL;
151 goto err;
152 }
153
154 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
155 tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
156 p_data->sq_num_pages_in_pbl = tmp;
157
158 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
159 if (rc)
160 goto err;
161
162 cxt_info.iid = dummy_cid;
163 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
164 if (rc) {
165 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
166 dummy_cid);
167 goto err;
168 }
169 p_cxt = cxt_info.p_cxt;
170 SET_FIELD(p_cxt->tstorm_ag_context.flags3,
171 E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
172
173 fcoe_pf_params->dummy_icid = (u16)dummy_cid;
174
175 tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
176 p_data->func_params.num_tasks = tmp;
177 p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
178 p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
179
180 DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
181 fcoe_pf_params->glbl_q_params_addr);
182
183 tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
184 p_data->q_params.cq_num_entries = tmp;
185
186 tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
187 p_data->q_params.cmdq_num_entries = tmp;
188
189 tmp = fcoe_pf_params->num_cqs;
190 p_data->q_params.num_queues = (u8)tmp;
191
192 tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
193 p_data->q_params.queue_relative_offset = (u8)tmp;
194
195 for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
196 u16 igu_sb_id;
197
198 igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
199 tmp = cpu_to_le16(igu_sb_id);
200 p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
201 }
202
203 p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
204 p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
205
206 p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
207
208 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
209 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
210 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
211 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
212 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
213 p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
214 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
215 p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
216
217 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
218 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
219 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
220 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
221 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
222 p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
223 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
224 p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
225 tmp = fcoe_pf_params->rq_buffer_size;
226 p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
227
228 if (fcoe_pf_params->is_target) {
229 SET_FIELD(p_data->q_params.q_validity,
230 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
231 if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
232 SET_FIELD(p_data->q_params.q_validity,
233 SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
234 SET_FIELD(p_data->q_params.q_validity,
235 SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
236 } else {
237 SET_FIELD(p_data->q_params.q_validity,
238 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
239 }
240
241 rc = qed_spq_post(p_hwfn, p_ent, NULL);
242
243 return rc;
244
245err:
246 qed_sp_destroy_request(p_hwfn, p_ent);
247 return rc;
248}
249
250static int
251qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
252 struct qed_fcoe_conn *p_conn,
253 enum spq_mode comp_mode,
254 struct qed_spq_comp_cb *p_comp_addr)
255{
256 struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
257 struct fcoe_conn_offload_ramrod_data *p_data;
258 struct qed_spq_entry *p_ent = NULL;
259 struct qed_sp_init_data init_data;
260 u16 physical_q0, tmp;
261 int rc;
262
263 /* Get SPQ entry */
264 memset(&init_data, 0, sizeof(init_data));
265 init_data.cid = p_conn->icid;
266 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
267 init_data.comp_mode = comp_mode;
268 init_data.p_comp_data = p_comp_addr;
269
270 rc = qed_sp_init_request(p_hwfn, &p_ent,
271 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
272 PROTOCOLID_FCOE, &init_data);
273 if (rc)
274 return rc;
275
276 p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
277 p_data = &p_ramrod->offload_ramrod_data;
278
279 /* Transmission PQ is the first of the PF */
280 physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
281 p_conn->physical_q0 = cpu_to_le16(physical_q0);
282 p_data->physical_q0 = cpu_to_le16(physical_q0);
283
284 p_data->conn_id = cpu_to_le16(p_conn->conn_id);
285 DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
286 DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
287 DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
288 DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
289 DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
290 DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
291
292 DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
293 DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
294 DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
295
296 p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
297 p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
298 p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
299 p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
300 p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
301 p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
302
303 tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
304 p_data->tx_max_fc_pay_len = tmp;
305 tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
306 p_data->e_d_tov_timer_val = tmp;
307 tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
308 p_data->rec_rr_tov_timer_val = tmp;
309 tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
310 p_data->rx_max_fc_pay_len = tmp;
311
312 p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
313 p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
314 p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
315 p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
316 p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
317 p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
318 p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
319 p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
320 p_data->flags = p_conn->flags;
321 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
322 SET_FIELD(p_data->flags,
323 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
324 p_data->def_q_idx = p_conn->def_q_idx;
325
326 return qed_spq_post(p_hwfn, p_ent, NULL);
327}
328
329static int
330qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
331 struct qed_fcoe_conn *p_conn,
332 enum spq_mode comp_mode,
333 struct qed_spq_comp_cb *p_comp_addr)
334{
335 struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
336 struct qed_spq_entry *p_ent = NULL;
337 struct qed_sp_init_data init_data;
338 int rc = 0;
339
340 /* Get SPQ entry */
341 memset(&init_data, 0, sizeof(init_data));
342 init_data.cid = p_conn->icid;
343 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
344 init_data.comp_mode = comp_mode;
345 init_data.p_comp_data = p_comp_addr;
346
347 rc = qed_sp_init_request(p_hwfn, &p_ent,
348 FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
349 PROTOCOLID_FCOE, &init_data);
350 if (rc)
351 return rc;
352
353 p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
354 DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
355 p_conn->terminate_params);
356
357 return qed_spq_post(p_hwfn, p_ent, NULL);
358}
359
360static int
361qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
362 struct qed_ptt *p_ptt,
363 enum spq_mode comp_mode,
364 struct qed_spq_comp_cb *p_comp_addr)
365{
366 struct qed_spq_entry *p_ent = NULL;
367 struct qed_sp_init_data init_data;
368 u32 active_segs = 0;
369 int rc = 0;
370
371 /* Get SPQ entry */
372 memset(&init_data, 0, sizeof(init_data));
373 init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
374 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
375 init_data.comp_mode = comp_mode;
376 init_data.p_comp_data = p_comp_addr;
377
378 rc = qed_sp_init_request(p_hwfn, &p_ent,
379 FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
380 PROTOCOLID_FCOE, &init_data);
381 if (rc)
382 return rc;
383
384 active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
385 active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
386 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
387
388 return qed_spq_post(p_hwfn, p_ent, NULL);
389}
390
391static int
392qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
393 struct qed_fcoe_conn **p_out_conn)
394{
395 struct qed_fcoe_conn *p_conn = NULL;
396 void *p_addr;
397 u32 i;
398
399 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
400 if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
401 p_conn =
402 list_first_entry(&p_hwfn->p_fcoe_info->free_list,
403 struct qed_fcoe_conn, list_entry);
404 if (p_conn) {
405 list_del(&p_conn->list_entry);
406 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
407 *p_out_conn = p_conn;
408 return 0;
409 }
410 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
411
412 p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
413 if (!p_conn)
414 return -ENOMEM;
415
416 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
417 QED_CHAIN_PAGE_SIZE,
418 &p_conn->xferq_pbl_addr, GFP_KERNEL);
419 if (!p_addr)
420 goto nomem_pbl_xferq;
421 p_conn->xferq_pbl_addr_virt_addr = p_addr;
422
423 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
424 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
425 QED_CHAIN_PAGE_SIZE,
426 &p_conn->xferq_addr[i], GFP_KERNEL);
427 if (!p_addr)
428 goto nomem_xferq;
429 p_conn->xferq_addr_virt_addr[i] = p_addr;
430
431 p_addr = p_conn->xferq_pbl_addr_virt_addr;
432 ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
433 }
434
435 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
436 QED_CHAIN_PAGE_SIZE,
437 &p_conn->confq_pbl_addr, GFP_KERNEL);
438 if (!p_addr)
439 goto nomem_xferq;
440 p_conn->confq_pbl_addr_virt_addr = p_addr;
441
442 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
443 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
444 QED_CHAIN_PAGE_SIZE,
445 &p_conn->confq_addr[i], GFP_KERNEL);
446 if (!p_addr)
447 goto nomem_confq;
448 p_conn->confq_addr_virt_addr[i] = p_addr;
449
450 p_addr = p_conn->confq_pbl_addr_virt_addr;
451 ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
452 }
453
454 p_conn->free_on_delete = true;
455 *p_out_conn = p_conn;
456 return 0;
457
458nomem_confq:
459 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
460 QED_CHAIN_PAGE_SIZE,
461 p_conn->confq_pbl_addr_virt_addr,
462 p_conn->confq_pbl_addr);
463 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
464 if (p_conn->confq_addr_virt_addr[i])
465 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
466 QED_CHAIN_PAGE_SIZE,
467 p_conn->confq_addr_virt_addr[i],
468 p_conn->confq_addr[i]);
469nomem_xferq:
470 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
471 QED_CHAIN_PAGE_SIZE,
472 p_conn->xferq_pbl_addr_virt_addr,
473 p_conn->xferq_pbl_addr);
474 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
475 if (p_conn->xferq_addr_virt_addr[i])
476 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
477 QED_CHAIN_PAGE_SIZE,
478 p_conn->xferq_addr_virt_addr[i],
479 p_conn->xferq_addr[i]);
480nomem_pbl_xferq:
481 kfree(p_conn);
482 return -ENOMEM;
483}
484
485static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
486 struct qed_fcoe_conn *p_conn)
487{
488 u32 i;
489
490 if (!p_conn)
491 return;
492
493 if (p_conn->confq_pbl_addr_virt_addr)
494 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
495 QED_CHAIN_PAGE_SIZE,
496 p_conn->confq_pbl_addr_virt_addr,
497 p_conn->confq_pbl_addr);
498
499 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
500 if (!p_conn->confq_addr_virt_addr[i])
501 continue;
502 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
503 QED_CHAIN_PAGE_SIZE,
504 p_conn->confq_addr_virt_addr[i],
505 p_conn->confq_addr[i]);
506 }
507
508 if (p_conn->xferq_pbl_addr_virt_addr)
509 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
510 QED_CHAIN_PAGE_SIZE,
511 p_conn->xferq_pbl_addr_virt_addr,
512 p_conn->xferq_pbl_addr);
513
514 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
515 if (!p_conn->xferq_addr_virt_addr[i])
516 continue;
517 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
518 QED_CHAIN_PAGE_SIZE,
519 p_conn->xferq_addr_virt_addr[i],
520 p_conn->xferq_addr[i]);
521 }
522 kfree(p_conn);
523}
524
525static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
526{
527 return (u8 __iomem *)p_hwfn->doorbells +
528 qed_db_addr(cid, DQ_DEMS_LEGACY);
529}
530
531static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
532 u8 bdq_id)
533{
534 if (RESC_NUM(p_hwfn, QED_BDQ)) {
535 return (u8 __iomem *)p_hwfn->regview +
536 GTT_BAR0_MAP_REG_MSDM_RAM +
537 MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
538 QED_BDQ),
539 bdq_id);
540 } else {
541 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
542 return NULL;
543 }
544}
545
546static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
547 u8 bdq_id)
548{
549 if (RESC_NUM(p_hwfn, QED_BDQ)) {
550 return (u8 __iomem *)p_hwfn->regview +
551 GTT_BAR0_MAP_REG_TSDM_RAM +
552 TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
553 QED_BDQ),
554 bdq_id);
555 } else {
556 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
557 return NULL;
558 }
559}
560
561int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
562{
563 struct qed_fcoe_info *p_fcoe_info;
564
565 /* Allocate LL2's set struct */
566 p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
567 if (!p_fcoe_info) {
568 DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
569 return -ENOMEM;
570 }
571 INIT_LIST_HEAD(&p_fcoe_info->free_list);
572
573 p_hwfn->p_fcoe_info = p_fcoe_info;
574 return 0;
575}
576
577void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
578{
579 struct e4_fcoe_task_context *p_task_ctx = NULL;
580 int rc;
581 u32 i;
582
583 spin_lock_init(&p_hwfn->p_fcoe_info->lock);
584 for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
585 rc = qed_cxt_get_task_ctx(p_hwfn, i,
586 QED_CTX_WORKING_MEM,
587 (void **)&p_task_ctx);
588 if (rc)
589 continue;
590
591 memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
592 SET_FIELD(p_task_ctx->timer_context.logical_client_0,
593 TIMERS_CONTEXT_VALIDLC0, 1);
594 SET_FIELD(p_task_ctx->timer_context.logical_client_1,
595 TIMERS_CONTEXT_VALIDLC1, 1);
596 SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
597 E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
598 }
599}
600
601void qed_fcoe_free(struct qed_hwfn *p_hwfn)
602{
603 struct qed_fcoe_conn *p_conn = NULL;
604
605 if (!p_hwfn->p_fcoe_info)
606 return;
607
608 while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
609 p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
610 struct qed_fcoe_conn, list_entry);
611 if (!p_conn)
612 break;
613 list_del(&p_conn->list_entry);
614 qed_fcoe_free_connection(p_hwfn, p_conn);
615 }
616
617 kfree(p_hwfn->p_fcoe_info);
618 p_hwfn->p_fcoe_info = NULL;
619}
620
621static int
622qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
623 struct qed_fcoe_conn *p_in_conn,
624 struct qed_fcoe_conn **p_out_conn)
625{
626 struct qed_fcoe_conn *p_conn = NULL;
627 int rc = 0;
628 u32 icid;
629
630 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
631 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
632 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
633 if (rc)
634 return rc;
635
636 /* Use input connection [if provided] or allocate a new one */
637 if (p_in_conn) {
638 p_conn = p_in_conn;
639 } else {
640 rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
641 if (rc) {
642 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
643 qed_cxt_release_cid(p_hwfn, icid);
644 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
645 return rc;
646 }
647 }
648
649 p_conn->icid = icid;
650 p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
651 *p_out_conn = p_conn;
652
653 return rc;
654}
655
656static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
657 struct qed_fcoe_conn *p_conn)
658{
659 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
660 list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
661 qed_cxt_release_cid(p_hwfn, p_conn->icid);
662 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
663}
664
665static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
666 struct qed_ptt *p_ptt,
667 struct qed_fcoe_stats *p_stats)
668{
669 struct fcoe_rx_stat tstats;
670 u32 tstats_addr;
671
672 memset(&tstats, 0, sizeof(tstats));
673 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
674 TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
675 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
676
677 p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
678 p_stats->fcoe_rx_data_pkt_cnt =
679 HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
680 p_stats->fcoe_rx_xfer_pkt_cnt =
681 HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
682 p_stats->fcoe_rx_other_pkt_cnt =
683 HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
684
685 p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
686 le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
687 p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
688 le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
689 p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
690 le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
691 p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
692 le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
693 p_stats->fcoe_silent_drop_total_pkt_cnt =
694 le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
695}
696
697static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
698 struct qed_ptt *p_ptt,
699 struct qed_fcoe_stats *p_stats)
700{
701 struct fcoe_tx_stat pstats;
702 u32 pstats_addr;
703
704 memset(&pstats, 0, sizeof(pstats));
705 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
706 PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
707 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
708
709 p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
710 p_stats->fcoe_tx_data_pkt_cnt =
711 HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
712 p_stats->fcoe_tx_xfer_pkt_cnt =
713 HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
714 p_stats->fcoe_tx_other_pkt_cnt =
715 HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
716}
717
718static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
719 struct qed_fcoe_stats *p_stats)
720{
721 struct qed_ptt *p_ptt;
722
723 memset(p_stats, 0, sizeof(*p_stats));
724
725 p_ptt = qed_ptt_acquire(p_hwfn);
726
727 if (!p_ptt) {
728 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
729 return -EINVAL;
730 }
731
732 _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
733 _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
734
735 qed_ptt_release(p_hwfn, p_ptt);
736
737 return 0;
738}
739
740struct qed_hash_fcoe_con {
741 struct hlist_node node;
742 struct qed_fcoe_conn *con;
743};
744
745static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
746 struct qed_dev_fcoe_info *info)
747{
748 struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
749 int rc;
750
751 memset(info, 0, sizeof(*info));
752 rc = qed_fill_dev_info(cdev, &info->common);
753
754 info->primary_dbq_rq_addr =
755 qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
756 info->secondary_bdq_rq_addr =
757 qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
758
759 info->wwpn = hwfn->mcp_info->func_info.wwn_port;
760 info->wwnn = hwfn->mcp_info->func_info.wwn_node;
761
762 info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ);
763
764 return rc;
765}
766
767static void qed_register_fcoe_ops(struct qed_dev *cdev,
768 struct qed_fcoe_cb_ops *ops, void *cookie)
769{
770 cdev->protocol_ops.fcoe = ops;
771 cdev->ops_cookie = cookie;
772}
773
774static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
775 u32 handle)
776{
777 struct qed_hash_fcoe_con *hash_con = NULL;
778
779 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
780 return NULL;
781
782 hash_for_each_possible(cdev->connections, hash_con, node, handle) {
783 if (hash_con->con->icid == handle)
784 break;
785 }
786
787 if (!hash_con || (hash_con->con->icid != handle))
788 return NULL;
789
790 return hash_con;
791}
792
793static int qed_fcoe_stop(struct qed_dev *cdev)
794{
795 struct qed_ptt *p_ptt;
796 int rc;
797
798 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
799 DP_NOTICE(cdev, "fcoe already stopped\n");
800 return 0;
801 }
802
803 if (!hash_empty(cdev->connections)) {
804 DP_NOTICE(cdev,
805 "Can't stop fcoe - not all connections were returned\n");
806 return -EINVAL;
807 }
808
809 p_ptt = qed_ptt_acquire(QED_AFFIN_HWFN(cdev));
810 if (!p_ptt)
811 return -EAGAIN;
812
813 /* Stop the fcoe */
814 rc = qed_sp_fcoe_func_stop(QED_AFFIN_HWFN(cdev), p_ptt,
815 QED_SPQ_MODE_EBLOCK, NULL);
816 cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
817 qed_ptt_release(QED_AFFIN_HWFN(cdev), p_ptt);
818
819 return rc;
820}
821
822static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
823{
824 int rc;
825
826 if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
827 DP_NOTICE(cdev, "fcoe already started;\n");
828 return 0;
829 }
830
831 rc = qed_sp_fcoe_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
832 NULL);
833 if (rc) {
834 DP_NOTICE(cdev, "Failed to start fcoe\n");
835 return rc;
836 }
837
838 cdev->flags |= QED_FLAG_STORAGE_STARTED;
839 hash_init(cdev->connections);
840
841 if (tasks) {
842 struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
843 GFP_ATOMIC);
844
845 if (!tid_info) {
846 DP_NOTICE(cdev,
847 "Failed to allocate tasks information\n");
848 qed_fcoe_stop(cdev);
849 return -ENOMEM;
850 }
851
852 rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
853 if (rc) {
854 DP_NOTICE(cdev, "Failed to gather task information\n");
855 qed_fcoe_stop(cdev);
856 kfree(tid_info);
857 return rc;
858 }
859
860 /* Fill task information */
861 tasks->size = tid_info->tid_size;
862 tasks->num_tids_per_block = tid_info->num_tids_per_block;
863 memcpy(tasks->blocks, tid_info->blocks,
864 MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
865
866 kfree(tid_info);
867 }
868
869 return 0;
870}
871
872static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
873 u32 *handle,
874 u32 *fw_cid, void __iomem **p_doorbell)
875{
876 struct qed_hash_fcoe_con *hash_con;
877 int rc;
878
879 /* Allocate a hashed connection */
880 hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
881 if (!hash_con) {
882 DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
883 return -ENOMEM;
884 }
885
886 /* Acquire the connection */
887 rc = qed_fcoe_acquire_connection(QED_AFFIN_HWFN(cdev), NULL,
888 &hash_con->con);
889 if (rc) {
890 DP_NOTICE(cdev, "Failed to acquire Connection\n");
891 kfree(hash_con);
892 return rc;
893 }
894
895 /* Added the connection to hash table */
896 *handle = hash_con->con->icid;
897 *fw_cid = hash_con->con->fw_cid;
898 hash_add(cdev->connections, &hash_con->node, *handle);
899
900 if (p_doorbell)
901 *p_doorbell = qed_fcoe_get_db_addr(QED_AFFIN_HWFN(cdev),
902 *handle);
903
904 return 0;
905}
906
907static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
908{
909 struct qed_hash_fcoe_con *hash_con;
910
911 hash_con = qed_fcoe_get_hash(cdev, handle);
912 if (!hash_con) {
913 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
914 handle);
915 return -EINVAL;
916 }
917
918 hlist_del(&hash_con->node);
919 qed_fcoe_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
920 kfree(hash_con);
921
922 return 0;
923}
924
925static int qed_fcoe_offload_conn(struct qed_dev *cdev,
926 u32 handle,
927 struct qed_fcoe_params_offload *conn_info)
928{
929 struct qed_hash_fcoe_con *hash_con;
930 struct qed_fcoe_conn *con;
931
932 hash_con = qed_fcoe_get_hash(cdev, handle);
933 if (!hash_con) {
934 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
935 handle);
936 return -EINVAL;
937 }
938
939 /* Update the connection with information from the params */
940 con = hash_con->con;
941
942 con->sq_pbl_addr = conn_info->sq_pbl_addr;
943 con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
944 con->sq_next_page_addr = conn_info->sq_next_page_addr;
945 con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
946 con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
947 con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
948 con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
949 con->vlan_tag = conn_info->vlan_tag;
950 con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
951 con->flags = conn_info->flags;
952 con->def_q_idx = conn_info->def_q_idx;
953
954 con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
955 conn_info->src_mac[4];
956 con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
957 conn_info->src_mac[2];
958 con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
959 conn_info->src_mac[0];
960 con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
961 conn_info->dst_mac[4];
962 con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
963 conn_info->dst_mac[2];
964 con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
965 conn_info->dst_mac[0];
966
967 con->s_id.addr_hi = conn_info->s_id.addr_hi;
968 con->s_id.addr_mid = conn_info->s_id.addr_mid;
969 con->s_id.addr_lo = conn_info->s_id.addr_lo;
970 con->d_id.addr_hi = conn_info->d_id.addr_hi;
971 con->d_id.addr_mid = conn_info->d_id.addr_mid;
972 con->d_id.addr_lo = conn_info->d_id.addr_lo;
973
974 return qed_sp_fcoe_conn_offload(QED_AFFIN_HWFN(cdev), con,
975 QED_SPQ_MODE_EBLOCK, NULL);
976}
977
978static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
979 u32 handle, dma_addr_t terminate_params)
980{
981 struct qed_hash_fcoe_con *hash_con;
982 struct qed_fcoe_conn *con;
983
984 hash_con = qed_fcoe_get_hash(cdev, handle);
985 if (!hash_con) {
986 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
987 handle);
988 return -EINVAL;
989 }
990
991 /* Update the connection with information from the params */
992 con = hash_con->con;
993 con->terminate_params = terminate_params;
994
995 return qed_sp_fcoe_conn_destroy(QED_AFFIN_HWFN(cdev), con,
996 QED_SPQ_MODE_EBLOCK, NULL);
997}
998
999static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
1000{
1001 return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
1002}
1003
1004void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
1005 struct qed_mcp_fcoe_stats *stats)
1006{
1007 struct qed_fcoe_stats proto_stats;
1008
1009 /* Retrieve FW statistics */
1010 memset(&proto_stats, 0, sizeof(proto_stats));
1011 if (qed_fcoe_stats(cdev, &proto_stats)) {
1012 DP_VERBOSE(cdev, QED_MSG_STORAGE,
1013 "Failed to collect FCoE statistics\n");
1014 return;
1015 }
1016
1017 /* Translate FW statistics into struct */
1018 stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
1019 proto_stats.fcoe_rx_xfer_pkt_cnt +
1020 proto_stats.fcoe_rx_other_pkt_cnt;
1021 stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
1022 proto_stats.fcoe_tx_xfer_pkt_cnt +
1023 proto_stats.fcoe_tx_other_pkt_cnt;
1024 stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
1025
1026 /* Request protocol driver to fill-in the rest */
1027 if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
1028 struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
1029 void *cookie = cdev->ops_cookie;
1030
1031 if (ops->get_login_failures)
1032 stats->login_failure = ops->get_login_failures(cookie);
1033 }
1034}
1035
1036static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
1037 .common = &qed_common_ops_pass,
1038 .ll2 = &qed_ll2_ops_pass,
1039 .fill_dev_info = &qed_fill_fcoe_dev_info,
1040 .start = &qed_fcoe_start,
1041 .stop = &qed_fcoe_stop,
1042 .register_ops = &qed_register_fcoe_ops,
1043 .acquire_conn = &qed_fcoe_acquire_conn,
1044 .release_conn = &qed_fcoe_release_conn,
1045 .offload_conn = &qed_fcoe_offload_conn,
1046 .destroy_conn = &qed_fcoe_destroy_conn,
1047 .get_stats = &qed_fcoe_stats,
1048};
1049
1050const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
1051{
1052 return &qed_fcoe_ops_pass;
1053}
1054EXPORT_SYMBOL(qed_get_fcoe_ops);
1055
1056void qed_put_fcoe_ops(void)
1057{
1058}
1059EXPORT_SYMBOL(qed_put_fcoe_ops);