Linux Audio

Check our new training course

Loading...
v3.1
  1/**
  2 * Copyright (C) 2005 - 2011 Emulex
  3 * All rights reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License version 2
  7 * as published by the Free Software Foundation.  The full GNU General
  8 * Public License is included in this distribution in the file called COPYING.
  9 *
 10 * Contact Information:
 11 * linux-drivers@emulex.com
 12 *
 13 * Emulex
 14 * 3333 Susan Street
 15 * Costa Mesa, CA 92626
 16 */
 17
 
 
 
 18#include "be.h"
 19#include "be_mgmt.h"
 20#include "be_main.h"
 21
 22int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
 23{
 24	u32 sreset;
 25	u8 *pci_reset_offset = 0;
 26	u8 *pci_online0_offset = 0;
 27	u8 *pci_online1_offset = 0;
 28	u32 pconline0 = 0;
 29	u32 pconline1 = 0;
 30	u32 i;
 31
 32	pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
 33	pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
 34	pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
 35	sreset = readl((void *)pci_reset_offset);
 36	sreset |= BE2_SET_RESET;
 37	writel(sreset, (void *)pci_reset_offset);
 38
 39	i = 0;
 40	while (sreset & BE2_SET_RESET) {
 41		if (i > 64)
 42			break;
 43		msleep(100);
 44		sreset = readl((void *)pci_reset_offset);
 45		i++;
 46	}
 47
 48	if (sreset & BE2_SET_RESET) {
 49		printk(KERN_ERR "Soft Reset  did not deassert\n");
 50		return -EIO;
 51	}
 52	pconline1 = BE2_MPU_IRAM_ONLINE;
 53	writel(pconline0, (void *)pci_online0_offset);
 54	writel(pconline1, (void *)pci_online1_offset);
 55
 56	sreset = BE2_SET_RESET;
 57	writel(sreset, (void *)pci_reset_offset);
 58
 59	i = 0;
 60	while (sreset & BE2_SET_RESET) {
 61		if (i > 64)
 62			break;
 63		msleep(1);
 64		sreset = readl((void *)pci_reset_offset);
 65		i++;
 66	}
 67	if (sreset & BE2_SET_RESET) {
 68		printk(KERN_ERR "MPU Online Soft Reset did not deassert\n");
 69		return -EIO;
 70	}
 71	return 0;
 72}
 73
 74int be_chk_reset_complete(struct beiscsi_hba *phba)
 75{
 76	unsigned int num_loop;
 77	u8 *mpu_sem = 0;
 78	u32 status;
 79
 80	num_loop = 1000;
 81	mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
 82	msleep(5000);
 83
 84	while (num_loop) {
 85		status = readl((void *)mpu_sem);
 86
 87		if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
 88			break;
 89		msleep(60);
 90		num_loop--;
 91	}
 92
 93	if ((status & 0x80000000) || (!num_loop)) {
 94		printk(KERN_ERR "Failed in be_chk_reset_complete"
 95		"status = 0x%x\n", status);
 96		return -EIO;
 97	}
 98
 99	return 0;
100}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
102void be_mcc_notify(struct beiscsi_hba *phba)
 
103{
104	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
105	u32 val = 0;
 
106
107	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
108	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
109	iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
110}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
112unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
113{
114	unsigned int tag = 0;
 
 
 
115
116	if (phba->ctrl.mcc_tag_available) {
117		tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
118		phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
119		phba->ctrl.mcc_numtag[tag] = 0;
120	}
121	if (tag) {
122		phba->ctrl.mcc_tag_available--;
123		if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
124			phba->ctrl.mcc_alloc_index = 0;
125		else
126			phba->ctrl.mcc_alloc_index++;
127	}
128	return tag;
129}
130
131void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
132{
133	spin_lock(&ctrl->mbox_lock);
134	tag = tag & 0x000000FF;
 
 
135	ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
136	if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
137		ctrl->mcc_free_index = 0;
138	else
139		ctrl->mcc_free_index++;
140	ctrl->mcc_tag_available++;
141	spin_unlock(&ctrl->mbox_lock);
 
142}
143
144bool is_link_state_evt(u32 trailer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145{
146	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
147		  ASYNC_TRAILER_EVENT_CODE_MASK) ==
148		  ASYNC_EVENT_CODE_LINK_STATE);
149}
 
 
 
 
 
 
 
 
150
151static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
152{
153	if (compl->flags != 0) {
154		compl->flags = le32_to_cpu(compl->flags);
155		WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
156		return true;
157	} else
158		return false;
159}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
161static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
162{
163	compl->flags = 0;
164}
165
166static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
167				struct be_mcc_compl *compl)
168{
169	u16 compl_status, extd_status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
171	be_dws_le_to_cpu(compl, 4);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
173	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
174					CQE_STATUS_COMPL_MASK;
175	if (compl_status != MCC_STATUS_SUCCESS) {
176		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
177						CQE_STATUS_EXTD_MASK;
178		dev_err(&ctrl->pdev->dev,
179			"error in cmd completion: status(compl/extd)=%d/%d\n",
180			compl_status, extd_status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181		return -EBUSY;
182	}
183	return 0;
 
 
 
 
184}
185
186int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
187				    struct be_mcc_compl *compl)
 
 
 
 
 
 
 
 
 
 
 
188{
 
 
 
189	u16 compl_status, extd_status;
190	unsigned short tag;
191
192	be_dws_le_to_cpu(compl, 4);
 
 
 
 
 
 
 
 
 
 
 
 
193
194	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
195					CQE_STATUS_COMPL_MASK;
196	/* The ctrl.mcc_numtag[tag] is filled with
197	 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
198	 * [7:0] = compl_status
199	 */
200	tag = (compl->tag0 & 0x000000FF);
 
 
201	extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
202					CQE_STATUS_EXTD_MASK;
203
204	ctrl->mcc_numtag[tag]  = 0x80000000;
205	ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
206	ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
207	ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
208	wake_up_interruptible(&ctrl->mcc_wait[tag]);
209	return 0;
210}
211
212static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
213{
214	struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
215	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
216
217	if (be_mcc_compl_is_new(compl)) {
218		queue_tail_inc(mcc_cq);
219		return compl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220	}
221	return NULL;
222}
223
224static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
225{
226	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227}
228
229void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
230		struct be_async_event_link_state *evt)
231{
232	switch (evt->port_link_status) {
233	case ASYNC_EVENT_LINK_DOWN:
234		SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d\n",
235				     evt->physical_port);
236		phba->state |= BE_ADAPTER_LINK_DOWN;
237		iscsi_host_for_each_session(phba->shost,
238					    be2iscsi_fail_session);
 
 
239		break;
240	case ASYNC_EVENT_LINK_UP:
241		phba->state = BE_ADAPTER_UP;
242		SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d\n",
243						evt->physical_port);
 
 
 
244		break;
245	default:
246		SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
247				    "Physical Port %d\n",
248				     evt->port_link_status,
249				     evt->physical_port);
250	}
251}
252
253static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
254		       u16 num_popped)
255{
256	u32 val = 0;
257	val |= qid & DB_CQ_RING_ID_MASK;
258	if (arm)
259		val |= 1 << DB_CQ_REARM_SHIFT;
260	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
261	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
262}
263
264
265int beiscsi_process_mcc(struct beiscsi_hba *phba)
266{
267	struct be_mcc_compl *compl;
268	int num = 0, status = 0;
269	struct be_ctrl_info *ctrl = &phba->ctrl;
 
270
271	spin_lock_bh(&phba->ctrl.mcc_cq_lock);
272	while ((compl = be_mcc_compl_get(phba))) {
273		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
274			/* Interpret flags as an async trailer */
275			if (is_link_state_evt(compl->flags))
276				/* Interpret compl as a async link evt */
277				beiscsi_async_link_state_process(phba,
278				   (struct be_async_event_link_state *) compl);
279			else
280				SE_DEBUG(DBG_LVL_1,
281					 " Unsupported Async Event, flags"
282					 " = 0x%08x\n", compl->flags);
283
284		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
285				status = be_mcc_compl_process(ctrl, compl);
286				atomic_dec(&phba->ctrl.mcc_obj.q.used);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287		}
288		be_mcc_compl_use(compl);
289		num++;
290	}
291
292	if (num)
293		beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
294
295	spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
296	return status;
297}
298
299/* Wait till no more pending mcc requests are present */
300static int be_mcc_wait_compl(struct beiscsi_hba *phba)
301{
302	int i, status;
303	for (i = 0; i < mcc_timeout; i++) {
304		status = beiscsi_process_mcc(phba);
305		if (status)
306			return status;
307
308		if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
309			break;
310		udelay(100);
 
 
 
311	}
312	if (i == mcc_timeout) {
313		dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
314		return -EBUSY;
 
 
 
315	}
 
 
316	return 0;
317}
318
319/* Notify MCC requests and wait for completion */
320int be_mcc_notify_wait(struct beiscsi_hba *phba)
321{
322	be_mcc_notify(phba);
323	return be_mcc_wait_compl(phba);
 
 
 
 
 
 
 
324}
325
326static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
 
 
 
 
 
 
 
 
 
 
 
327{
328#define long_delay 2000
 
329	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
330	int cnt = 0, wait = 5;	/* in usecs */
 
331	u32 ready;
332
 
 
 
 
 
333	do {
334		ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
335		if (ready)
336			break;
337
338		if (cnt > 12000000) {
339			dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
340			return -EBUSY;
341		}
342
343		if (cnt > 50) {
344			wait = long_delay;
345			mdelay(long_delay / 1000);
346		} else
347			udelay(wait);
348		cnt += wait;
349	} while (true);
350	return 0;
 
 
 
 
 
 
 
351}
352
353int be_mbox_notify(struct be_ctrl_info *ctrl)
 
 
 
 
 
 
 
 
 
 
 
354{
355	int status;
356	u32 val = 0;
357	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
358	struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
359	struct be_mcc_mailbox *mbox = mbox_mem->va;
360	struct be_mcc_compl *compl = &mbox->compl;
 
 
 
361
362	val &= ~MPU_MAILBOX_DB_RDY_MASK;
363	val |= MPU_MAILBOX_DB_HI_MASK;
364	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
365	iowrite32(val, db);
366
367	status = be_mbox_db_ready_wait(ctrl);
368	if (status != 0) {
369		SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
370		return status;
371	}
372	val = 0;
373	val &= ~MPU_MAILBOX_DB_RDY_MASK;
374	val &= ~MPU_MAILBOX_DB_HI_MASK;
375	val |= (u32) (mbox_mem->dma >> 4) << 2;
376	iowrite32(val, db);
377
378	status = be_mbox_db_ready_wait(ctrl);
379	if (status != 0) {
380		SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
381		return status;
382	}
383	if (be_mcc_compl_is_new(compl)) {
384		status = be_mcc_compl_process(ctrl, &mbox->compl);
385		be_mcc_compl_use(compl);
386		if (status) {
387			SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process\n");
388			return status;
389		}
390	} else {
391		dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
392		return -EBUSY;
393	}
394	return 0;
395}
396
397/*
398 * Insert the mailbox address into the doorbell in two steps
399 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
400 */
401static int be_mbox_notify_wait(struct beiscsi_hba *phba)
402{
403	int status;
404	u32 val = 0;
405	void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
406	struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
407	struct be_mcc_mailbox *mbox = mbox_mem->va;
408	struct be_mcc_compl *compl = &mbox->compl;
409	struct be_ctrl_info *ctrl = &phba->ctrl;
410
411	val |= MPU_MAILBOX_DB_HI_MASK;
412	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
413	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
414	iowrite32(val, db);
415
416	/* wait for ready to be set */
417	status = be_mbox_db_ready_wait(ctrl);
418	if (status != 0)
419		return status;
420
421	val = 0;
422	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
423	val |= (u32)(mbox_mem->dma >> 4) << 2;
424	iowrite32(val, db);
425
426	status = be_mbox_db_ready_wait(ctrl);
427	if (status != 0)
428		return status;
429
430	/* A cq entry has been made now */
431	if (be_mcc_compl_is_new(compl)) {
432		status = be_mcc_compl_process(ctrl, &mbox->compl);
433		be_mcc_compl_use(compl);
434		if (status)
435			return status;
436	} else {
437		dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
438		return -EBUSY;
439	}
440	return 0;
441}
442
443void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
444				bool embedded, u8 sge_cnt)
445{
446	if (embedded)
447		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
448	else
449		wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
450						MCC_WRB_SGE_CNT_SHIFT;
451	wrb->payload_length = payload_len;
452	be_dws_cpu_to_le(wrb, 8);
453}
454
455void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
456			u8 subsystem, u8 opcode, int cmd_len)
457{
458	req_hdr->opcode = opcode;
459	req_hdr->subsystem = subsystem;
460	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
461	req_hdr->timeout = 120;
462}
463
464static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
465							struct be_dma_mem *mem)
466{
467	int i, buf_pages;
468	u64 dma = (u64) mem->dma;
469
470	buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
471	for (i = 0; i < buf_pages; i++) {
472		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
473		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
474		dma += PAGE_SIZE_4K;
475	}
476}
477
478static u32 eq_delay_to_mult(u32 usec_delay)
479{
480#define MAX_INTR_RATE 651042
481	const u32 round = 10;
482	u32 multiplier;
483
484	if (usec_delay == 0)
485		multiplier = 0;
486	else {
487		u32 interrupt_rate = 1000000 / usec_delay;
488		if (interrupt_rate == 0)
489			multiplier = 1023;
490		else {
491			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
492			multiplier /= interrupt_rate;
493			multiplier = (multiplier + round / 2) / round;
494			multiplier = min(multiplier, (u32) 1023);
495		}
496	}
497	return multiplier;
498}
499
500struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
501{
502	return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
503}
504
505struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
506{
507	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
508	struct be_mcc_wrb *wrb;
509
510	BUG_ON(atomic_read(&mccq->used) >= mccq->len);
511	wrb = queue_head_node(mccq);
512	memset(wrb, 0, sizeof(*wrb));
513	wrb->tag0 = (mccq->head & 0x000000FF) << 16;
514	queue_head_inc(mccq);
515	atomic_inc(&mccq->used);
516	return wrb;
517}
518
519
520int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
521			  struct be_queue_info *eq, int eq_delay)
522{
523	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
524	struct be_cmd_req_eq_create *req = embedded_payload(wrb);
525	struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
526	struct be_dma_mem *q_mem = &eq->dma_mem;
527	int status;
528
529	SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
530	spin_lock(&ctrl->mbox_lock);
531	memset(wrb, 0, sizeof(*wrb));
532
533	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
534
535	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
536			OPCODE_COMMON_EQ_CREATE, sizeof(*req));
537
538	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
539
540	AMAP_SET_BITS(struct amap_eq_context, func, req->context,
541						PCI_FUNC(ctrl->pdev->devfn));
542	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
543	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
544	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
545					__ilog2_u32(eq->len / 256));
546	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
547					eq_delay_to_mult(eq_delay));
548	be_dws_cpu_to_le(req->context, sizeof(req->context));
549
550	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
551
552	status = be_mbox_notify(ctrl);
553	if (!status) {
554		eq->id = le16_to_cpu(resp->eq_id);
555		eq->created = true;
556	}
557	spin_unlock(&ctrl->mbox_lock);
558	return status;
559}
560
561int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
562{
563	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
564	int status;
565	u8 *endian_check;
566
567	SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
568	spin_lock(&ctrl->mbox_lock);
569	memset(wrb, 0, sizeof(*wrb));
570
571	endian_check = (u8 *) wrb;
572	*endian_check++ = 0xFF;
573	*endian_check++ = 0x12;
574	*endian_check++ = 0x34;
575	*endian_check++ = 0xFF;
576	*endian_check++ = 0xFF;
577	*endian_check++ = 0x56;
578	*endian_check++ = 0x78;
579	*endian_check++ = 0xFF;
580	be_dws_cpu_to_le(wrb, sizeof(*wrb));
581
582	status = be_mbox_notify(ctrl);
583	if (status)
584		SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed\n");
585
586	spin_unlock(&ctrl->mbox_lock);
587	return status;
588}
589
590int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
591			  struct be_queue_info *cq, struct be_queue_info *eq,
592			  bool sol_evts, bool no_delay, int coalesce_wm)
593{
594	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
595	struct be_cmd_req_cq_create *req = embedded_payload(wrb);
596	struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
 
597	struct be_dma_mem *q_mem = &cq->dma_mem;
598	void *ctxt = &req->context;
599	int status;
600
601	SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create\n");
602	spin_lock(&ctrl->mbox_lock);
603	memset(wrb, 0, sizeof(*wrb));
604
605	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
606
607	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
608			OPCODE_COMMON_CQ_CREATE, sizeof(*req));
609	if (!q_mem->va)
610		SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
611
612	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613
614	AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
615	AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
616	AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
617		      __ilog2_u32(cq->len / 256));
618	AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
619	AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
620	AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
621	AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
622	AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
623	AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
624		      PCI_FUNC(ctrl->pdev->devfn));
625	be_dws_cpu_to_le(ctxt, sizeof(req->context));
626
627	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
628
629	status = be_mbox_notify(ctrl);
630	if (!status) {
631		cq->id = le16_to_cpu(resp->cq_id);
632		cq->created = true;
633	} else
634		SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x\n",
635			status);
636	spin_unlock(&ctrl->mbox_lock);
 
 
637
638	return status;
639}
640
641static u32 be_encoded_q_len(int q_len)
642{
643	u32 len_encoded = fls(q_len);	/* log2(len) + 1 */
644	if (len_encoded == 16)
645		len_encoded = 0;
646	return len_encoded;
647}
648
649int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
650			struct be_queue_info *mccq,
651			struct be_queue_info *cq)
652{
653	struct be_mcc_wrb *wrb;
654	struct be_cmd_req_mcc_create *req;
655	struct be_dma_mem *q_mem = &mccq->dma_mem;
656	struct be_ctrl_info *ctrl;
657	void *ctxt;
658	int status;
659
660	spin_lock(&phba->ctrl.mbox_lock);
661	ctrl = &phba->ctrl;
662	wrb = wrb_from_mbox(&ctrl->mbox_mem);
 
663	req = embedded_payload(wrb);
664	ctxt = &req->context;
665
666	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
667
668	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
669			OPCODE_COMMON_MCC_CREATE, sizeof(*req));
670
671	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
 
 
 
672
673	AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
674		      PCI_FUNC(phba->pcidev->devfn));
675	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
676	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
677		be_encoded_q_len(mccq->len));
678	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
679
680	be_dws_cpu_to_le(ctxt, sizeof(req->context));
681
682	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
683
684	status = be_mbox_notify_wait(phba);
685	if (!status) {
686		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
687		mccq->id = le16_to_cpu(resp->id);
688		mccq->created = true;
689	}
690	spin_unlock(&phba->ctrl.mbox_lock);
691
692	return status;
693}
694
695int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
696			  int queue_type)
697{
698	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
699	struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
 
700	u8 subsys = 0, opcode = 0;
701	int status;
702
703	SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy\n");
704	spin_lock(&ctrl->mbox_lock);
 
 
 
705	memset(wrb, 0, sizeof(*wrb));
706	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
707
708	switch (queue_type) {
709	case QTYPE_EQ:
710		subsys = CMD_SUBSYSTEM_COMMON;
711		opcode = OPCODE_COMMON_EQ_DESTROY;
712		break;
713	case QTYPE_CQ:
714		subsys = CMD_SUBSYSTEM_COMMON;
715		opcode = OPCODE_COMMON_CQ_DESTROY;
716		break;
717	case QTYPE_MCCQ:
718		subsys = CMD_SUBSYSTEM_COMMON;
719		opcode = OPCODE_COMMON_MCC_DESTROY;
720		break;
721	case QTYPE_WRBQ:
722		subsys = CMD_SUBSYSTEM_ISCSI;
723		opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
724		break;
725	case QTYPE_DPDUQ:
726		subsys = CMD_SUBSYSTEM_ISCSI;
727		opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
728		break;
729	case QTYPE_SGL:
730		subsys = CMD_SUBSYSTEM_ISCSI;
731		opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
732		break;
733	default:
734		spin_unlock(&ctrl->mbox_lock);
735		BUG();
736		return -ENXIO;
737	}
738	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
739	if (queue_type != QTYPE_SGL)
740		req->id = cpu_to_le16(q->id);
741
742	status = be_mbox_notify(ctrl);
743
744	spin_unlock(&ctrl->mbox_lock);
745	return status;
746}
747
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
748int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
749				    struct be_queue_info *cq,
750				    struct be_queue_info *dq, int length,
751				    int entry_size)
 
752{
753	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
754	struct be_defq_create_req *req = embedded_payload(wrb);
755	struct be_dma_mem *q_mem = &dq->dma_mem;
 
756	void *ctxt = &req->context;
757	int status;
758
759	SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
760	spin_lock(&ctrl->mbox_lock);
761	memset(wrb, 0, sizeof(*wrb));
762
763	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
764
765	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
766			   OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
767
768	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
769	AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
770	AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
771		      1);
772	AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
773		      PCI_FUNC(ctrl->pdev->devfn));
774	AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
775		      be_encoded_q_len(length / sizeof(struct phys_addr)));
776	AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
777		      ctxt, entry_size);
778	AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
779		      cq->id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
780
781	be_dws_cpu_to_le(ctxt, sizeof(req->context));
782
783	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
784
785	status = be_mbox_notify(ctrl);
786	if (!status) {
 
787		struct be_defq_create_resp *resp = embedded_payload(wrb);
788
789		dq->id = le16_to_cpu(resp->id);
790		dq->created = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
791	}
792	spin_unlock(&ctrl->mbox_lock);
793
794	return status;
795}
796
797int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
798		       struct be_queue_info *wrbq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
799{
800	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
801	struct be_wrbq_create_req *req = embedded_payload(wrb);
802	struct be_wrbq_create_resp *resp = embedded_payload(wrb);
 
803	int status;
804
805	spin_lock(&ctrl->mbox_lock);
806	memset(wrb, 0, sizeof(*wrb));
807
808	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
809
810	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
811		OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
812	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
 
 
 
 
 
 
 
813	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
814
815	status = be_mbox_notify(ctrl);
816	if (!status) {
817		wrbq->id = le16_to_cpu(resp->cid);
818		wrbq->created = true;
 
 
 
 
 
 
 
 
 
819	}
820	spin_unlock(&ctrl->mbox_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
821	return status;
822}
823
824int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
825				struct be_dma_mem *q_mem,
826				u32 page_offset, u32 num_pages)
827{
828	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
829	struct be_post_sgl_pages_req *req = embedded_payload(wrb);
 
830	int status;
831	unsigned int curr_pages;
832	u32 internal_page_offset = 0;
833	u32 temp_num_pages = num_pages;
834
835	if (num_pages == 0xff)
836		num_pages = 1;
837
838	spin_lock(&ctrl->mbox_lock);
839	do {
840		memset(wrb, 0, sizeof(*wrb));
841		be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
842		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
843				   OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
844				   sizeof(*req));
845		curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
846						pages);
847		req->num_pages = min(num_pages, curr_pages);
848		req->page_offset = page_offset;
849		be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
850		q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
851		internal_page_offset += req->num_pages;
852		page_offset += req->num_pages;
853		num_pages -= req->num_pages;
854
855		if (temp_num_pages == 0xff)
856			req->num_pages = temp_num_pages;
857
858		status = be_mbox_notify(ctrl);
859		if (status) {
860			SE_DEBUG(DBG_LVL_1,
861				 "FW CMD to map iscsi frags failed.\n");
 
862			goto error;
863		}
864	} while (num_pages > 0);
865error:
866	spin_unlock(&ctrl->mbox_lock);
867	if (status != 0)
868		beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
869	return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
870}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2017 Broadcom. All Rights Reserved.
   4 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
 
 
 
 
   5 *
   6 * Contact Information:
   7 * linux-drivers@broadcom.com
 
 
 
 
   8 */
   9
  10#include <scsi/iscsi_proto.h>
  11
  12#include "be_main.h"
  13#include "be.h"
  14#include "be_mgmt.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  15
  16/* UE Status Low CSR */
  17static const char * const desc_ue_status_low[] = {
  18	"CEV",
  19	"CTX",
  20	"DBUF",
  21	"ERX",
  22	"Host",
  23	"MPU",
  24	"NDMA",
  25	"PTC ",
  26	"RDMA ",
  27	"RXF ",
  28	"RXIPS ",
  29	"RXULP0 ",
  30	"RXULP1 ",
  31	"RXULP2 ",
  32	"TIM ",
  33	"TPOST ",
  34	"TPRE ",
  35	"TXIPS ",
  36	"TXULP0 ",
  37	"TXULP1 ",
  38	"UC ",
  39	"WDMA ",
  40	"TXULP2 ",
  41	"HOST1 ",
  42	"P0_OB_LINK ",
  43	"P1_OB_LINK ",
  44	"HOST_GPIO ",
  45	"MBOX ",
  46	"AXGMAC0",
  47	"AXGMAC1",
  48	"JTAG",
  49	"MPU_INTPEND"
  50};
  51
  52/* UE Status High CSR */
  53static const char * const desc_ue_status_hi[] = {
  54	"LPCMEMHOST",
  55	"MGMT_MAC",
  56	"PCS0ONLINE",
  57	"MPU_IRAM",
  58	"PCS1ONLINE",
  59	"PCTL0",
  60	"PCTL1",
  61	"PMEM",
  62	"RR",
  63	"TXPB",
  64	"RXPP",
  65	"XAUI",
  66	"TXP",
  67	"ARM",
  68	"IPC",
  69	"HOST2",
  70	"HOST3",
  71	"HOST4",
  72	"HOST5",
  73	"HOST6",
  74	"HOST7",
  75	"HOST8",
  76	"HOST9",
  77	"NETC",
  78	"Unknown",
  79	"Unknown",
  80	"Unknown",
  81	"Unknown",
  82	"Unknown",
  83	"Unknown",
  84	"Unknown",
  85	"Unknown"
  86};
  87
  88struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
  89				 unsigned int *ref_tag)
  90{
  91	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  92	struct be_mcc_wrb *wrb = NULL;
  93	unsigned int tag;
  94
  95	spin_lock(&phba->ctrl.mcc_lock);
  96	if (mccq->used == mccq->len) {
  97		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
  98			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  99			    "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
 100			    mccq->used, phba->ctrl.mcc_tag_available);
 101		goto alloc_failed;
 102	}
 103
 104	if (!phba->ctrl.mcc_tag_available)
 105		goto alloc_failed;
 106
 107	tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
 108	if (!tag) {
 109		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
 110			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 111			    "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
 112			    phba->ctrl.mcc_tag_available,
 113			    phba->ctrl.mcc_alloc_index);
 114		goto alloc_failed;
 115	}
 116
 117	/* return this tag for further reference */
 118	*ref_tag = tag;
 119	phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
 120	phba->ctrl.mcc_tag_status[tag] = 0;
 121	phba->ctrl.ptag_state[tag].tag_state = 0;
 122	phba->ctrl.ptag_state[tag].cbfn = NULL;
 123	phba->ctrl.mcc_tag_available--;
 124	if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
 125		phba->ctrl.mcc_alloc_index = 0;
 126	else
 127		phba->ctrl.mcc_alloc_index++;
 128
 129	wrb = queue_head_node(mccq);
 130	memset(wrb, 0, sizeof(*wrb));
 131	wrb->tag0 = tag;
 132	wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
 133	queue_head_inc(mccq);
 134	mccq->used++;
 135
 136alloc_failed:
 137	spin_unlock(&phba->ctrl.mcc_lock);
 138	return wrb;
 
 
 
 
 
 
 
 
 
 
 139}
 140
 141void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
 142{
 143	struct be_queue_info *mccq = &ctrl->mcc_obj.q;
 144
 145	spin_lock(&ctrl->mcc_lock);
 146	tag = tag & MCC_Q_CMD_TAG_MASK;
 147	ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
 148	if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
 149		ctrl->mcc_free_index = 0;
 150	else
 151		ctrl->mcc_free_index++;
 152	ctrl->mcc_tag_available++;
 153	mccq->used--;
 154	spin_unlock(&ctrl->mcc_lock);
 155}
 156
 157/*
 158 * beiscsi_mcc_compl_status - Return the status of MCC completion
 159 * @phba: Driver private structure
 160 * @tag: Tag for the MBX Command
 161 * @wrb: the WRB used for the MBX Command
 162 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
 163 *
 164 * return
 165 * Success: 0
 166 * Failure: Non-Zero
 167 */
 168int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
 169			       unsigned int tag,
 170			       struct be_mcc_wrb **wrb,
 171			       struct be_dma_mem *mbx_cmd_mem)
 172{
 173	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 174	uint16_t status = 0, addl_status = 0, wrb_num = 0;
 175	struct be_cmd_resp_hdr *mbx_resp_hdr;
 176	struct be_cmd_req_hdr *mbx_hdr;
 177	struct be_mcc_wrb *temp_wrb;
 178	uint32_t mcc_tag_status;
 179	int rc = 0;
 180
 181	mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
 182	status = (mcc_tag_status & CQE_STATUS_MASK);
 183	addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
 184			CQE_STATUS_ADDL_SHIFT);
 185
 186	if (mbx_cmd_mem) {
 187		mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
 188	} else {
 189		wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
 190			  CQE_STATUS_WRB_SHIFT;
 191		temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
 192		mbx_hdr = embedded_payload(temp_wrb);
 193
 194		if (wrb)
 195			*wrb = temp_wrb;
 196	}
 197
 198	if (status || addl_status) {
 199		beiscsi_log(phba, KERN_WARNING,
 200			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 201			    BEISCSI_LOG_CONFIG,
 202			    "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
 203			    mbx_hdr->subsystem, mbx_hdr->opcode,
 204			    status, addl_status);
 205		rc = -EIO;
 206		if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
 207			mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
 208			beiscsi_log(phba, KERN_WARNING,
 209				    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 210				    BEISCSI_LOG_CONFIG,
 211				    "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
 212				    mbx_resp_hdr->response_length,
 213				    mbx_resp_hdr->actual_resp_len);
 214			rc = -EAGAIN;
 215		}
 216	}
 217
 218	return rc;
 
 
 219}
 220
 221/*
 222 * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
 223 * @phba: Driver private structure
 224 * @tag: Tag for the MBX Command
 225 * @wrb: the WRB used for the MBX Command
 226 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
 227 *
 228 * Waits for MBX completion with the passed TAG.
 229 *
 230 * return
 231 * Success: 0
 232 * Failure: Non-Zero
 233 **/
 234int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
 235			    unsigned int tag,
 236			    struct be_mcc_wrb **wrb,
 237			    struct be_dma_mem *mbx_cmd_mem)
 238{
 239	int rc = 0;
 240
 241	if (!tag || tag > MAX_MCC_CMD) {
 242		__beiscsi_log(phba, KERN_ERR,
 243			      "BC_%d : invalid tag %u\n", tag);
 244		return -EINVAL;
 245	}
 246
 247	if (beiscsi_hba_in_error(phba)) {
 248		clear_bit(MCC_TAG_STATE_RUNNING,
 249			  &phba->ctrl.ptag_state[tag].tag_state);
 250		return -EIO;
 251	}
 252
 253	/* wait for the mccq completion */
 254	rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
 255					      phba->ctrl.mcc_tag_status[tag],
 256					      msecs_to_jiffies(
 257						BEISCSI_HOST_MBX_TIMEOUT));
 258	/**
 259	 * Return EIO if port is being disabled. Associated DMA memory, if any,
 260	 * is freed by the caller. When port goes offline, MCCQ is cleaned up
 261	 * so does WRB.
 262	 */
 263	if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
 264		clear_bit(MCC_TAG_STATE_RUNNING,
 265			  &phba->ctrl.ptag_state[tag].tag_state);
 266		return -EIO;
 267	}
 268
 269	/**
 270	 * If MBOX cmd timeout expired, tag and resource allocated
 271	 * for cmd is not freed until FW returns completion.
 272	 */
 273	if (rc <= 0) {
 274		struct be_dma_mem *tag_mem;
 275
 276		/**
 277		 * PCI/DMA memory allocated and posted in non-embedded mode
 278		 * will have mbx_cmd_mem != NULL.
 279		 * Save virtual and bus addresses for the command so that it
 280		 * can be freed later.
 281		 **/
 282		tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
 283		if (mbx_cmd_mem) {
 284			tag_mem->size = mbx_cmd_mem->size;
 285			tag_mem->va = mbx_cmd_mem->va;
 286			tag_mem->dma = mbx_cmd_mem->dma;
 287		} else
 288			tag_mem->size = 0;
 289
 290		/* first make tag_mem_state visible to all */
 291		wmb();
 292		set_bit(MCC_TAG_STATE_TIMEOUT,
 293				&phba->ctrl.ptag_state[tag].tag_state);
 294
 295		beiscsi_log(phba, KERN_ERR,
 296			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 297			    BEISCSI_LOG_CONFIG,
 298			    "BC_%d : MBX Cmd Completion timed out\n");
 299		return -EBUSY;
 300	}
 301
 302	rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
 303
 304	free_mcc_wrb(&phba->ctrl, tag);
 305	return rc;
 306}
 307
 308/*
 309 * beiscsi_process_mbox_compl()- Check the MBX completion status
 310 * @ctrl: Function specific MBX data structure
 311 * @compl: Completion status of MBX Command
 312 *
 313 * Check for the MBX completion status when BMBX method used
 314 *
 315 * return
 316 * Success: Zero
 317 * Failure: Non-Zero
 318 **/
 319static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
 320				      struct be_mcc_compl *compl)
 321{
 322	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 323	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 324	struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
 325	u16 compl_status, extd_status;
 
 326
 327	/**
 328	 * To check if valid bit is set, check the entire word as we don't know
 329	 * the endianness of the data (old entry is host endian while a new
 330	 * entry is little endian)
 331	 */
 332	if (!compl->flags) {
 333		beiscsi_log(phba, KERN_ERR,
 334				BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 335				"BC_%d : BMBX busy, no completion\n");
 336		return -EBUSY;
 337	}
 338	compl->flags = le32_to_cpu(compl->flags);
 339	WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
 340
 341	/**
 342	 * Just swap the status to host endian;
 343	 * mcc tag is opaquely copied from mcc_wrb.
 
 
 344	 */
 345	be_dws_le_to_cpu(compl, 4);
 346	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
 347		CQE_STATUS_COMPL_MASK;
 348	extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 349		CQE_STATUS_EXTD_MASK;
 350	/* Need to reset the entire word that houses the valid bit */
 351	compl->flags = 0;
 
 
 
 
 
 
 352
 353	if (compl_status == MCC_STATUS_SUCCESS)
 354		return 0;
 
 
 355
 356	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 357		    "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
 358		    hdr->subsystem, hdr->opcode, compl_status, extd_status);
 359	return compl_status;
 360}
 361
 362static void beiscsi_process_async_link(struct beiscsi_hba *phba,
 363				       struct be_mcc_compl *compl)
 364{
 365	struct be_async_event_link_state *evt;
 366
 367	evt = (struct be_async_event_link_state *)compl;
 368
 369	phba->port_speed = evt->port_speed;
 370	/**
 371	 * Check logical link status in ASYNC event.
 372	 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
 373	 **/
 374	if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
 375		set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
 376		if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
 377			beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
 378		__beiscsi_log(phba, KERN_ERR,
 379			      "BC_%d : Link Up on Port %d tag 0x%x\n",
 380			      evt->physical_port, evt->event_tag);
 381	} else {
 382		clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
 383		__beiscsi_log(phba, KERN_ERR,
 384			      "BC_%d : Link Down on Port %d tag 0x%x\n",
 385			      evt->physical_port, evt->event_tag);
 386		iscsi_host_for_each_session(phba->shost,
 387					    beiscsi_session_fail);
 388	}
 
 389}
 390
 391static char *beiscsi_port_misconf_event_msg[] = {
 392	"Physical Link is functional.",
 393	"Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
 394	"Optics of two types installed - Remove one optic or install matching pair of optics.",
 395	"Incompatible optics - Replace with compatible optics for card to function.",
 396	"Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
 397	"Uncertified optics - Replace with Avago Certified optics to enable link operation."
 398};
 399
 400static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
 401				      struct be_mcc_compl *compl)
 402{
 403	struct be_async_event_sli *async_sli;
 404	u8 evt_type, state, old_state, le;
 405	char *sev = KERN_WARNING;
 406	char *msg = NULL;
 407
 408	evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
 409	evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
 410
 411	/* processing only MISCONFIGURED physical port event */
 412	if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
 413		return;
 414
 415	async_sli = (struct be_async_event_sli *)compl;
 416	state = async_sli->event_data1 >>
 417		 (phba->fw_config.phys_port * 8) & 0xff;
 418	le = async_sli->event_data2 >>
 419		 (phba->fw_config.phys_port * 8) & 0xff;
 420
 421	old_state = phba->optic_state;
 422	phba->optic_state = state;
 423
 424	if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
 425		/* fw is reporting a state we don't know, log and return */
 426		__beiscsi_log(phba, KERN_ERR,
 427			    "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
 428			    phba->port_name, async_sli->event_data1);
 429		return;
 430	}
 431
 432	if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
 433		/* log link effect for unqualified-4, uncertified-5 optics */
 434		if (state > 3)
 435			msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
 436				" Link is non-operational." :
 437				" Link is operational.";
 438		/* 1 - info */
 439		if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
 440			sev = KERN_INFO;
 441		/* 2 - error */
 442		if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
 443			sev = KERN_ERR;
 444	}
 445
 446	if (old_state != phba->optic_state)
 447		__beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
 448			      phba->port_name,
 449			      beiscsi_port_misconf_event_msg[state],
 450			      !msg ? "" : msg);
 451}
 452
 453void beiscsi_process_async_event(struct beiscsi_hba *phba,
 454				struct be_mcc_compl *compl)
 455{
 456	char *sev = KERN_INFO;
 457	u8 evt_code;
 458
 459	/* interpret flags as an async trailer */
 460	evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
 461	evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
 462	switch (evt_code) {
 463	case ASYNC_EVENT_CODE_LINK_STATE:
 464		beiscsi_process_async_link(phba, compl);
 465		break;
 466	case ASYNC_EVENT_CODE_ISCSI:
 467		if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
 468			beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
 469		sev = KERN_ERR;
 470		break;
 471	case ASYNC_EVENT_CODE_SLI:
 472		beiscsi_process_async_sli(phba, compl);
 473		break;
 474	default:
 475		/* event not registered */
 476		sev = KERN_ERR;
 
 
 477	}
 
 478
 479	beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 480		    "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
 481		    evt_code, compl->status, compl->flags);
 
 
 
 
 
 
 482}
 483
 484int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
 485			      struct be_mcc_compl *compl)
 486{
 487	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 488	u16 compl_status, extd_status;
 489	struct be_dma_mem *tag_mem;
 490	unsigned int tag, wrb_idx;
 491
 492	be_dws_le_to_cpu(compl, 4);
 493	tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
 494	wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
 
 
 
 
 
 
 
 
 
 495
 496	if (!test_bit(MCC_TAG_STATE_RUNNING,
 497		      &ctrl->ptag_state[tag].tag_state)) {
 498		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
 499			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
 500			    "BC_%d : MBX cmd completed but not posted\n");
 501		return 0;
 502	}
 503
 504	/* end MCC with this tag */
 505	clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
 506
 507	if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
 508		beiscsi_log(phba, KERN_WARNING,
 509			    BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
 510			    BEISCSI_LOG_CONFIG,
 511			    "BC_%d : MBX Completion for timeout Command from FW\n");
 512		/**
 513		 * Check for the size before freeing resource.
 514		 * Only for non-embedded cmd, PCI resource is allocated.
 515		 **/
 516		tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
 517		if (tag_mem->size) {
 518			dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
 519					tag_mem->va, tag_mem->dma);
 520			tag_mem->size = 0;
 521		}
 522		free_mcc_wrb(ctrl, tag);
 523		return 0;
 524	}
 525
 526	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
 527		       CQE_STATUS_COMPL_MASK;
 528	extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 529		      CQE_STATUS_EXTD_MASK;
 530	/* The ctrl.mcc_tag_status[tag] is filled with
 531	 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
 532	 * [7:0] = compl_status
 533	 */
 534	ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
 535	ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
 536	ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
 537				     CQE_STATUS_ADDL_MASK;
 538	ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
 539
 540	if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
 541		if (ctrl->ptag_state[tag].cbfn)
 542			ctrl->ptag_state[tag].cbfn(phba, tag);
 543		else
 544			__beiscsi_log(phba, KERN_ERR,
 545				      "BC_%d : MBX ASYNC command with no callback\n");
 546		free_mcc_wrb(ctrl, tag);
 547		return 0;
 548	}
 549
 550	if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
 551		/* just check completion status and free wrb */
 552		__beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
 553		free_mcc_wrb(ctrl, tag);
 554		return 0;
 555	}
 556
 557	wake_up_interruptible(&ctrl->mcc_wait[tag]);
 558	return 0;
 559}
 560
 561void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
 
 562{
 563	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 564	u32 val = 0;
 565
 566	set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
 567	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
 568	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
 569	/* make request available for DMA */
 570	wmb();
 571	iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
 572}
 573
 574/*
 575 * be_mbox_db_ready_poll()- Check ready status
 576 * @ctrl: Function specific MBX data structure
 577 *
 578 * Check for the ready status of FW to send BMBX
 579 * commands to adapter.
 580 *
 581 * return
 582 * Success: 0
 583 * Failure: Non-Zero
 584 **/
 585static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
 586{
 587	/* wait 30s for generic non-flash MBOX operation */
 588#define BEISCSI_MBX_RDY_BIT_TIMEOUT	30000
 589	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
 590	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 591	unsigned long timeout;
 592	u32 ready;
 593
 594	/*
 595	 * This BMBX busy wait path is used during init only.
 596	 * For the commands executed during init, 5s should suffice.
 597	 */
 598	timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
 599	do {
 600		if (beiscsi_hba_in_error(phba))
 601			return -EIO;
 
 602
 603		ready = ioread32(db);
 604		if (ready == 0xffffffff)
 605			return -EIO;
 
 606
 607		ready &= MPU_MAILBOX_DB_RDY_MASK;
 608		if (ready)
 609			return 0;
 610
 611		if (time_after(jiffies, timeout))
 612			break;
 613		/* 1ms sleep is enough in most cases */
 614		schedule_timeout_uninterruptible(msecs_to_jiffies(1));
 615	} while (!ready);
 616
 617	beiscsi_log(phba, KERN_ERR,
 618			BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 619			"BC_%d : FW Timed Out\n");
 620	set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
 621	return -EBUSY;
 622}
 623
 624/*
 625 * be_mbox_notify: Notify adapter of new BMBX command
 626 * @ctrl: Function specific MBX data structure
 627 *
 628 * Ring doorbell to inform adapter of a BMBX command
 629 * to process
 630 *
 631 * return
 632 * Success: 0
 633 * Failure: Non-Zero
 634 **/
 635static int be_mbox_notify(struct be_ctrl_info *ctrl)
 636{
 637	int status;
 638	u32 val = 0;
 639	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
 640	struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
 641	struct be_mcc_mailbox *mbox = mbox_mem->va;
 642
 643	status = be_mbox_db_ready_poll(ctrl);
 644	if (status)
 645		return status;
 646
 647	val &= ~MPU_MAILBOX_DB_RDY_MASK;
 648	val |= MPU_MAILBOX_DB_HI_MASK;
 649	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
 650	iowrite32(val, db);
 651
 652	status = be_mbox_db_ready_poll(ctrl);
 653	if (status)
 
 654		return status;
 655
 656	val = 0;
 657	val &= ~MPU_MAILBOX_DB_RDY_MASK;
 658	val &= ~MPU_MAILBOX_DB_HI_MASK;
 659	val |= (u32) (mbox_mem->dma >> 4) << 2;
 660	iowrite32(val, db);
 661
 662	status = be_mbox_db_ready_poll(ctrl);
 663	if (status)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664		return status;
 665
 666	/* RDY is set; small delay before CQE read. */
 667	udelay(1);
 
 
 668
 669	status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
 670	return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 671}
 672
 673void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len,
 674			bool embedded, u8 sge_cnt)
 675{
 676	if (embedded)
 677		wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
 678	else
 679		wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
 680					   MCC_WRB_SGE_CNT_SHIFT;
 681	wrb->payload_length = payload_len;
 682	be_dws_cpu_to_le(wrb, 8);
 683}
 684
 685void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 686			u8 subsystem, u8 opcode, u32 cmd_len)
 687{
 688	req_hdr->opcode = opcode;
 689	req_hdr->subsystem = subsystem;
 690	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
 691	req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
 692}
 693
 694static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
 695							struct be_dma_mem *mem)
 696{
 697	int i, buf_pages;
 698	u64 dma = (u64) mem->dma;
 699
 700	buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
 701	for (i = 0; i < buf_pages; i++) {
 702		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
 703		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
 704		dma += PAGE_SIZE_4K;
 705	}
 706}
 707
 708static u32 eq_delay_to_mult(u32 usec_delay)
 709{
 710#define MAX_INTR_RATE 651042
 711	const u32 round = 10;
 712	u32 multiplier;
 713
 714	if (usec_delay == 0)
 715		multiplier = 0;
 716	else {
 717		u32 interrupt_rate = 1000000 / usec_delay;
 718		if (interrupt_rate == 0)
 719			multiplier = 1023;
 720		else {
 721			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
 722			multiplier /= interrupt_rate;
 723			multiplier = (multiplier + round / 2) / round;
 724			multiplier = min(multiplier, (u32) 1023);
 725		}
 726	}
 727	return multiplier;
 728}
 729
 730struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
 731{
 732	return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
 733}
 734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 735int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
 736			  struct be_queue_info *eq, int eq_delay)
 737{
 738	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 739	struct be_cmd_req_eq_create *req = embedded_payload(wrb);
 740	struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
 741	struct be_dma_mem *q_mem = &eq->dma_mem;
 742	int status;
 743
 744	mutex_lock(&ctrl->mbox_lock);
 
 745	memset(wrb, 0, sizeof(*wrb));
 746
 747	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 748
 749	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 750			OPCODE_COMMON_EQ_CREATE, sizeof(*req));
 751
 752	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 753
 754	AMAP_SET_BITS(struct amap_eq_context, func, req->context,
 755						PCI_FUNC(ctrl->pdev->devfn));
 756	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
 757	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
 758	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
 759					__ilog2_u32(eq->len / 256));
 760	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
 761					eq_delay_to_mult(eq_delay));
 762	be_dws_cpu_to_le(req->context, sizeof(req->context));
 763
 764	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 765
 766	status = be_mbox_notify(ctrl);
 767	if (!status) {
 768		eq->id = le16_to_cpu(resp->eq_id);
 769		eq->created = true;
 770	}
 771	mutex_unlock(&ctrl->mbox_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 772	return status;
 773}
 774
 775int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
 776			  struct be_queue_info *cq, struct be_queue_info *eq,
 777			  bool sol_evts, bool no_delay, int coalesce_wm)
 778{
 779	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 780	struct be_cmd_req_cq_create *req = embedded_payload(wrb);
 781	struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
 782	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 783	struct be_dma_mem *q_mem = &cq->dma_mem;
 784	void *ctxt = &req->context;
 785	int status;
 786
 787	mutex_lock(&ctrl->mbox_lock);
 
 788	memset(wrb, 0, sizeof(*wrb));
 789
 790	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 791
 792	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 793			OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 
 
 794
 795	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 796	if (is_chip_be2_be3r(phba)) {
 797		AMAP_SET_BITS(struct amap_cq_context, coalescwm,
 798			      ctxt, coalesce_wm);
 799		AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
 800		AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
 801			      __ilog2_u32(cq->len / 256));
 802		AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
 803		AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
 804		AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
 805		AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
 806		AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
 807		AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
 808			      PCI_FUNC(ctrl->pdev->devfn));
 809	} else {
 810		req->hdr.version = MBX_CMD_VER2;
 811		req->page_size = 1;
 812		AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
 813			      ctxt, coalesce_wm);
 814		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
 815			      ctxt, no_delay);
 816		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
 817			      __ilog2_u32(cq->len / 256));
 818		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
 819		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
 820		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
 821		AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
 822	}
 823
 
 
 
 
 
 
 
 
 
 
 
 824	be_dws_cpu_to_le(ctxt, sizeof(req->context));
 825
 826	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 827
 828	status = be_mbox_notify(ctrl);
 829	if (!status) {
 830		cq->id = le16_to_cpu(resp->cq_id);
 831		cq->created = true;
 832	} else
 833		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 834			    "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
 835			    status);
 836
 837	mutex_unlock(&ctrl->mbox_lock);
 838
 839	return status;
 840}
 841
 842static u32 be_encoded_q_len(int q_len)
 843{
 844	u32 len_encoded = fls(q_len);	/* log2(len) + 1 */
 845	if (len_encoded == 16)
 846		len_encoded = 0;
 847	return len_encoded;
 848}
 849
 850int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
 851			struct be_queue_info *mccq,
 852			struct be_queue_info *cq)
 853{
 854	struct be_mcc_wrb *wrb;
 855	struct be_cmd_req_mcc_create_ext *req;
 856	struct be_dma_mem *q_mem = &mccq->dma_mem;
 857	struct be_ctrl_info *ctrl;
 858	void *ctxt;
 859	int status;
 860
 861	mutex_lock(&phba->ctrl.mbox_lock);
 862	ctrl = &phba->ctrl;
 863	wrb = wrb_from_mbox(&ctrl->mbox_mem);
 864	memset(wrb, 0, sizeof(*wrb));
 865	req = embedded_payload(wrb);
 866	ctxt = &req->context;
 867
 868	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 869
 870	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 871			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
 872
 873	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
 874	req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
 875	req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
 876	req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
 877
 878	AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
 879		      PCI_FUNC(phba->pcidev->devfn));
 880	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
 881	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
 882		be_encoded_q_len(mccq->len));
 883	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
 884
 885	be_dws_cpu_to_le(ctxt, sizeof(req->context));
 886
 887	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 888
 889	status = be_mbox_notify(ctrl);
 890	if (!status) {
 891		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
 892		mccq->id = le16_to_cpu(resp->id);
 893		mccq->created = true;
 894	}
 895	mutex_unlock(&phba->ctrl.mbox_lock);
 896
 897	return status;
 898}
 899
 900int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
 901			  int queue_type)
 902{
 903	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 904	struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
 905	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 906	u8 subsys = 0, opcode = 0;
 907	int status;
 908
 909	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
 910		    "BC_%d : In beiscsi_cmd_q_destroy "
 911		    "queue_type : %d\n", queue_type);
 912
 913	mutex_lock(&ctrl->mbox_lock);
 914	memset(wrb, 0, sizeof(*wrb));
 915	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 916
 917	switch (queue_type) {
 918	case QTYPE_EQ:
 919		subsys = CMD_SUBSYSTEM_COMMON;
 920		opcode = OPCODE_COMMON_EQ_DESTROY;
 921		break;
 922	case QTYPE_CQ:
 923		subsys = CMD_SUBSYSTEM_COMMON;
 924		opcode = OPCODE_COMMON_CQ_DESTROY;
 925		break;
 926	case QTYPE_MCCQ:
 927		subsys = CMD_SUBSYSTEM_COMMON;
 928		opcode = OPCODE_COMMON_MCC_DESTROY;
 929		break;
 930	case QTYPE_WRBQ:
 931		subsys = CMD_SUBSYSTEM_ISCSI;
 932		opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
 933		break;
 934	case QTYPE_DPDUQ:
 935		subsys = CMD_SUBSYSTEM_ISCSI;
 936		opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
 937		break;
 938	case QTYPE_SGL:
 939		subsys = CMD_SUBSYSTEM_ISCSI;
 940		opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
 941		break;
 942	default:
 943		mutex_unlock(&ctrl->mbox_lock);
 944		BUG();
 
 945	}
 946	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
 947	if (queue_type != QTYPE_SGL)
 948		req->id = cpu_to_le16(q->id);
 949
 950	status = be_mbox_notify(ctrl);
 951
 952	mutex_unlock(&ctrl->mbox_lock);
 953	return status;
 954}
 955
 956/**
 957 * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
 958 * @ctrl: ptr to ctrl_info
 959 * @cq: Completion Queue
 960 * @dq: Default Queue
 961 * @length: ring size
 962 * @entry_size: size of each entry in DEFQ
 963 * @is_header: Header or Data DEFQ
 964 * @ulp_num: Bind to which ULP
 965 *
 966 * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
 967 * on this queue by the FW
 968 *
 969 * return
 970 *	Success: 0
 971 *	Failure: Non-Zero Value
 972 *
 973 **/
 974int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
 975				    struct be_queue_info *cq,
 976				    struct be_queue_info *dq, int length,
 977				    int entry_size, uint8_t is_header,
 978				    uint8_t ulp_num)
 979{
 980	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 981	struct be_defq_create_req *req = embedded_payload(wrb);
 982	struct be_dma_mem *q_mem = &dq->dma_mem;
 983	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 984	void *ctxt = &req->context;
 985	int status;
 986
 987	mutex_lock(&ctrl->mbox_lock);
 
 988	memset(wrb, 0, sizeof(*wrb));
 989
 990	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 991
 992	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
 993			   OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
 994
 995	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
 996	if (phba->fw_config.dual_ulp_aware) {
 997		req->ulp_num = ulp_num;
 998		req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
 999		req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1000	}
1001
1002	if (is_chip_be2_be3r(phba)) {
1003		AMAP_SET_BITS(struct amap_be_default_pdu_context,
1004			      rx_pdid, ctxt, 0);
1005		AMAP_SET_BITS(struct amap_be_default_pdu_context,
1006			      rx_pdid_valid, ctxt, 1);
1007		AMAP_SET_BITS(struct amap_be_default_pdu_context,
1008			      pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
1009		AMAP_SET_BITS(struct amap_be_default_pdu_context,
1010			      ring_size, ctxt,
1011			      be_encoded_q_len(length /
1012			      sizeof(struct phys_addr)));
1013		AMAP_SET_BITS(struct amap_be_default_pdu_context,
1014			      default_buffer_size, ctxt, entry_size);
1015		AMAP_SET_BITS(struct amap_be_default_pdu_context,
1016			      cq_id_recv, ctxt,	cq->id);
1017	} else {
1018		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1019			      rx_pdid, ctxt, 0);
1020		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1021			      rx_pdid_valid, ctxt, 1);
1022		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1023			      ring_size, ctxt,
1024			      be_encoded_q_len(length /
1025			      sizeof(struct phys_addr)));
1026		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1027			      default_buffer_size, ctxt, entry_size);
1028		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1029			      cq_id_recv, ctxt, cq->id);
1030	}
1031
1032	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1033
1034	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1035
1036	status = be_mbox_notify(ctrl);
1037	if (!status) {
1038		struct be_ring *defq_ring;
1039		struct be_defq_create_resp *resp = embedded_payload(wrb);
1040
1041		dq->id = le16_to_cpu(resp->id);
1042		dq->created = true;
1043		if (is_header)
1044			defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
1045		else
1046			defq_ring = &phba->phwi_ctrlr->
1047				    default_pdu_data[ulp_num];
1048
1049		defq_ring->id = dq->id;
1050
1051		if (!phba->fw_config.dual_ulp_aware) {
1052			defq_ring->ulp_num = BEISCSI_ULP0;
1053			defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
1054		} else {
1055			defq_ring->ulp_num = resp->ulp_num;
1056			defq_ring->doorbell_offset = resp->doorbell_offset;
1057		}
1058	}
1059	mutex_unlock(&ctrl->mbox_lock);
1060
1061	return status;
1062}
1063
1064/**
1065 * be_cmd_wrbq_create()- Create WRBQ
1066 * @ctrl: ptr to ctrl_info
1067 * @q_mem: memory details for the queue
1068 * @wrbq: queue info
1069 * @pwrb_context: ptr to wrb_context
1070 * @ulp_num: ULP on which the WRBQ is to be created
1071 *
1072 * Create WRBQ on the passed ULP_NUM.
1073 *
1074 **/
1075int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
1076			struct be_dma_mem *q_mem,
1077			struct be_queue_info *wrbq,
1078			struct hwi_wrb_context *pwrb_context,
1079			uint8_t ulp_num)
1080{
1081	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1082	struct be_wrbq_create_req *req = embedded_payload(wrb);
1083	struct be_wrbq_create_resp *resp = embedded_payload(wrb);
1084	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1085	int status;
1086
1087	mutex_lock(&ctrl->mbox_lock);
1088	memset(wrb, 0, sizeof(*wrb));
1089
1090	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1091
1092	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1093		OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
1094	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1095
1096	if (phba->fw_config.dual_ulp_aware) {
1097		req->ulp_num = ulp_num;
1098		req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1099		req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1100	}
1101
1102	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1103
1104	status = be_mbox_notify(ctrl);
1105	if (!status) {
1106		wrbq->id = le16_to_cpu(resp->cid);
1107		wrbq->created = true;
1108
1109		pwrb_context->cid = wrbq->id;
1110		if (!phba->fw_config.dual_ulp_aware) {
1111			pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
1112			pwrb_context->ulp_num = BEISCSI_ULP0;
1113		} else {
1114			pwrb_context->ulp_num = resp->ulp_num;
1115			pwrb_context->doorbell_offset = resp->doorbell_offset;
1116		}
1117	}
1118	mutex_unlock(&ctrl->mbox_lock);
1119	return status;
1120}
1121
1122int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
1123				    struct be_dma_mem *q_mem)
1124{
1125	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1126	struct be_post_template_pages_req *req = embedded_payload(wrb);
1127	int status;
1128
1129	mutex_lock(&ctrl->mbox_lock);
1130
1131	memset(wrb, 0, sizeof(*wrb));
1132	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1133	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1134			   OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
1135			   sizeof(*req));
1136
1137	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1138	req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1139	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1140
1141	status = be_mbox_notify(ctrl);
1142	mutex_unlock(&ctrl->mbox_lock);
1143	return status;
1144}
1145
1146int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
1147{
1148	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1149	struct be_remove_template_pages_req *req = embedded_payload(wrb);
1150	int status;
1151
1152	mutex_lock(&ctrl->mbox_lock);
1153
1154	memset(wrb, 0, sizeof(*wrb));
1155	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1156	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1157			   OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
1158			   sizeof(*req));
1159
1160	req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1161
1162	status = be_mbox_notify(ctrl);
1163	mutex_unlock(&ctrl->mbox_lock);
1164	return status;
1165}
1166
1167int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
1168				struct be_dma_mem *q_mem,
1169				u32 page_offset, u32 num_pages)
1170{
1171	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1172	struct be_post_sgl_pages_req *req = embedded_payload(wrb);
1173	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1174	int status;
1175	unsigned int curr_pages;
 
1176	u32 temp_num_pages = num_pages;
1177
1178	if (num_pages == 0xff)
1179		num_pages = 1;
1180
1181	mutex_lock(&ctrl->mbox_lock);
1182	do {
1183		memset(wrb, 0, sizeof(*wrb));
1184		be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1185		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1186				   OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
1187				   sizeof(*req));
1188		curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
1189						pages);
1190		req->num_pages = min(num_pages, curr_pages);
1191		req->page_offset = page_offset;
1192		be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
1193		q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
 
1194		page_offset += req->num_pages;
1195		num_pages -= req->num_pages;
1196
1197		if (temp_num_pages == 0xff)
1198			req->num_pages = temp_num_pages;
1199
1200		status = be_mbox_notify(ctrl);
1201		if (status) {
1202			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1203				    "BC_%d : FW CMD to map iscsi frags failed.\n");
1204
1205			goto error;
1206		}
1207	} while (num_pages > 0);
1208error:
1209	mutex_unlock(&ctrl->mbox_lock);
1210	if (status != 0)
1211		beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
1212	return status;
1213}
1214
1215/**
1216 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1217 * @phba: device priv structure instance
1218 * @vlan_tag: TAG to be set
1219 *
1220 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1221 *
1222 * returns
1223 *	TAG for the MBX Cmd
1224 * **/
1225int be_cmd_set_vlan(struct beiscsi_hba *phba,
1226		     uint16_t vlan_tag)
1227{
1228	unsigned int tag;
1229	struct be_mcc_wrb *wrb;
1230	struct be_cmd_set_vlan_req *req;
1231	struct be_ctrl_info *ctrl = &phba->ctrl;
1232
1233	if (mutex_lock_interruptible(&ctrl->mbox_lock))
1234		return 0;
1235	wrb = alloc_mcc_wrb(phba, &tag);
1236	if (!wrb) {
1237		mutex_unlock(&ctrl->mbox_lock);
1238		return 0;
1239	}
1240
1241	req = embedded_payload(wrb);
1242	be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
1243	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1244			   OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
1245			   sizeof(*req));
1246
1247	req->interface_hndl = phba->interface_handle;
1248	req->vlan_priority = vlan_tag;
1249
1250	be_mcc_notify(phba, tag);
1251	mutex_unlock(&ctrl->mbox_lock);
1252
1253	return tag;
1254}
1255
1256int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
1257			       struct beiscsi_hba *phba)
1258{
1259	struct be_dma_mem nonemb_cmd;
1260	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1261	struct be_mgmt_controller_attributes *req;
1262	struct be_sge *sge = nonembedded_sgl(wrb);
1263	int status = 0;
1264
1265	nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
1266				sizeof(struct be_mgmt_controller_attributes),
1267				&nonemb_cmd.dma, GFP_KERNEL);
1268	if (nonemb_cmd.va == NULL) {
1269		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1270			    "BG_%d : dma_alloc_coherent failed in %s\n",
1271			    __func__);
1272		return -ENOMEM;
1273	}
1274	nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
1275	req = nonemb_cmd.va;
1276	memset(req, 0, sizeof(*req));
1277	mutex_lock(&ctrl->mbox_lock);
1278	memset(wrb, 0, sizeof(*wrb));
1279	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1280	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1281			   OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
1282	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
1283	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
1284	sge->len = cpu_to_le32(nonemb_cmd.size);
1285	status = be_mbox_notify(ctrl);
1286	if (!status) {
1287		struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
1288
1289		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1290			    "BG_%d : Firmware Version of CMD : %s\n"
1291			    "Firmware Version is : %s\n"
1292			    "Developer Build, not performing version check...\n",
1293			    resp->params.hba_attribs
1294			    .flashrom_version_string,
1295			    resp->params.hba_attribs.
1296			    firmware_version_string);
1297
1298		phba->fw_config.iscsi_features =
1299				resp->params.hba_attribs.iscsi_features;
1300		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1301			    "BM_%d : phba->fw_config.iscsi_features = %d\n",
1302			    phba->fw_config.iscsi_features);
1303		memcpy(phba->fw_ver_str, resp->params.hba_attribs.
1304		       firmware_version_string, BEISCSI_VER_STRLEN);
1305	} else
1306		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1307			    "BG_%d :  Failed in beiscsi_check_supported_fw\n");
1308	mutex_unlock(&ctrl->mbox_lock);
1309	if (nonemb_cmd.va)
1310		dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
1311				    nonemb_cmd.va, nonemb_cmd.dma);
1312
1313	return status;
1314}
1315
1316/**
1317 * beiscsi_get_fw_config()- Get the FW config for the function
1318 * @ctrl: ptr to Ctrl Info
1319 * @phba: ptr to the dev priv structure
1320 *
1321 * Get the FW config and resources available for the function.
1322 * The resources are created based on the count received here.
1323 *
1324 * return
1325 *	Success: 0
1326 *	Failure: Non-Zero Value
1327 **/
1328int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
1329			  struct beiscsi_hba *phba)
1330{
1331	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1332	struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
1333	uint32_t cid_count, icd_count;
1334	int status = -EINVAL;
1335	uint8_t ulp_num = 0;
1336
1337	mutex_lock(&ctrl->mbox_lock);
1338	memset(wrb, 0, sizeof(*wrb));
1339	be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
1340
1341	be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
1342			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
1343			   EMBED_MBX_MAX_PAYLOAD_SIZE);
1344
1345	if (be_mbox_notify(ctrl)) {
1346		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1347			    "BG_%d : Failed in beiscsi_get_fw_config\n");
1348		goto fail_init;
1349	}
1350
1351	/* FW response formats depend on port id */
1352	phba->fw_config.phys_port = pfw_cfg->phys_port;
1353	if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
1354		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1355			    "BG_%d : invalid physical port id %d\n",
1356			    phba->fw_config.phys_port);
1357		goto fail_init;
1358	}
1359
1360	/* populate and check FW config against min and max values */
1361	if (!is_chip_be2_be3r(phba)) {
1362		phba->fw_config.eqid_count = pfw_cfg->eqid_count;
1363		phba->fw_config.cqid_count = pfw_cfg->cqid_count;
1364		if (phba->fw_config.eqid_count == 0 ||
1365		    phba->fw_config.eqid_count > 2048) {
1366			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1367				    "BG_%d : invalid EQ count %d\n",
1368				    phba->fw_config.eqid_count);
1369			goto fail_init;
1370		}
1371		if (phba->fw_config.cqid_count == 0 ||
1372		    phba->fw_config.cqid_count > 4096) {
1373			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1374				    "BG_%d : invalid CQ count %d\n",
1375				    phba->fw_config.cqid_count);
1376			goto fail_init;
1377		}
1378		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1379			    "BG_%d : EQ_Count : %d CQ_Count : %d\n",
1380			    phba->fw_config.eqid_count,
1381			    phba->fw_config.cqid_count);
1382	}
1383
1384	/**
1385	 * Check on which all ULP iSCSI Protocol is loaded.
1386	 * Set the Bit for those ULP. This set flag is used
1387	 * at all places in the code to check on which ULP
1388	 * iSCSi Protocol is loaded
1389	 **/
1390	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
1391		if (pfw_cfg->ulp[ulp_num].ulp_mode &
1392		    BEISCSI_ULP_ISCSI_INI_MODE) {
1393			set_bit(ulp_num, &phba->fw_config.ulp_supported);
1394
1395			/* Get the CID, ICD and Chain count for each ULP */
1396			phba->fw_config.iscsi_cid_start[ulp_num] =
1397				pfw_cfg->ulp[ulp_num].sq_base;
1398			phba->fw_config.iscsi_cid_count[ulp_num] =
1399				pfw_cfg->ulp[ulp_num].sq_count;
1400
1401			phba->fw_config.iscsi_icd_start[ulp_num] =
1402				pfw_cfg->ulp[ulp_num].icd_base;
1403			phba->fw_config.iscsi_icd_count[ulp_num] =
1404				pfw_cfg->ulp[ulp_num].icd_count;
1405
1406			phba->fw_config.iscsi_chain_start[ulp_num] =
1407				pfw_cfg->chain_icd[ulp_num].chain_base;
1408			phba->fw_config.iscsi_chain_count[ulp_num] =
1409				pfw_cfg->chain_icd[ulp_num].chain_count;
1410
1411			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1412				    "BG_%d : Function loaded on ULP : %d\n"
1413				    "\tiscsi_cid_count : %d\n"
1414				    "\tiscsi_cid_start : %d\n"
1415				    "\t iscsi_icd_count : %d\n"
1416				    "\t iscsi_icd_start : %d\n",
1417				    ulp_num,
1418				    phba->fw_config.
1419				    iscsi_cid_count[ulp_num],
1420				    phba->fw_config.
1421				    iscsi_cid_start[ulp_num],
1422				    phba->fw_config.
1423				    iscsi_icd_count[ulp_num],
1424				    phba->fw_config.
1425				    iscsi_icd_start[ulp_num]);
1426		}
1427	}
1428
1429	if (phba->fw_config.ulp_supported == 0) {
1430		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1431			    "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
1432			    pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
1433			    pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
1434		goto fail_init;
1435	}
1436
1437	/**
1438	 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
1439	 **/
1440	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
1441		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
1442			break;
1443	icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
1444	if (icd_count == 0 || icd_count > 65536) {
1445		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1446			    "BG_%d: invalid ICD count %d\n", icd_count);
1447		goto fail_init;
1448	}
1449
1450	cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
1451		    BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
1452	if (cid_count == 0 || cid_count > 4096) {
1453		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1454			    "BG_%d: invalid CID count %d\n", cid_count);
1455		goto fail_init;
1456	}
1457
1458	/**
1459	 * Check FW is dual ULP aware i.e. can handle either
1460	 * of the protocols.
1461	 */
1462	phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
1463					  BEISCSI_FUNC_DUA_MODE);
1464
1465	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1466		    "BG_%d : DUA Mode : 0x%x\n",
1467		    phba->fw_config.dual_ulp_aware);
1468
1469	/* all set, continue using this FW config */
1470	status = 0;
1471fail_init:
1472	mutex_unlock(&ctrl->mbox_lock);
1473	return status;
1474}
1475
1476/**
1477 * beiscsi_get_port_name()- Get port name for the function
1478 * @ctrl: ptr to Ctrl Info
1479 * @phba: ptr to the dev priv structure
1480 *
1481 * Get the alphanumeric character for port
1482 *
1483 **/
1484int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
1485{
1486	int ret = 0;
1487	struct be_mcc_wrb *wrb;
1488	struct be_cmd_get_port_name *ioctl;
1489
1490	mutex_lock(&ctrl->mbox_lock);
1491	wrb = wrb_from_mbox(&ctrl->mbox_mem);
1492	memset(wrb, 0, sizeof(*wrb));
1493	ioctl = embedded_payload(wrb);
1494
1495	be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1496	be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1497			   OPCODE_COMMON_GET_PORT_NAME,
1498			   EMBED_MBX_MAX_PAYLOAD_SIZE);
1499	ret = be_mbox_notify(ctrl);
1500	phba->port_name = 0;
1501	if (!ret) {
1502		phba->port_name = ioctl->p.resp.port_names >>
1503				  (phba->fw_config.phys_port * 8) & 0xff;
1504	} else {
1505		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1506			    "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
1507			    ret, ioctl->h.resp_hdr.status);
1508	}
1509
1510	if (phba->port_name == 0)
1511		phba->port_name = '?';
1512
1513	mutex_unlock(&ctrl->mbox_lock);
1514	return ret;
1515}
1516
1517int beiscsi_set_host_data(struct beiscsi_hba *phba)
1518{
1519	struct be_ctrl_info *ctrl = &phba->ctrl;
1520	struct be_cmd_set_host_data *ioctl;
1521	struct be_mcc_wrb *wrb;
1522	int ret = 0;
1523
1524	if (is_chip_be2_be3r(phba))
1525		return ret;
1526
1527	mutex_lock(&ctrl->mbox_lock);
1528	wrb = wrb_from_mbox(&ctrl->mbox_mem);
1529	memset(wrb, 0, sizeof(*wrb));
1530	ioctl = embedded_payload(wrb);
1531
1532	be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1533	be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1534			   OPCODE_COMMON_SET_HOST_DATA,
1535			   EMBED_MBX_MAX_PAYLOAD_SIZE);
1536	ioctl->param.req.param_id = BE_CMD_SET_HOST_PARAM_ID;
1537	ioctl->param.req.param_len =
1538		snprintf((char *)ioctl->param.req.param_data,
1539			 sizeof(ioctl->param.req.param_data),
1540			 "Linux iSCSI v%s", BUILD_STR);
1541	ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4);
1542	if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
1543		ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
1544	ret = be_mbox_notify(ctrl);
1545	if (!ret) {
1546		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1547			    "BG_%d : HBA set host driver version\n");
1548	} else {
1549		/**
1550		 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1551		 * Older FW versions return this error.
1552		 */
1553		if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1554				ret == MCC_STATUS_INVALID_LENGTH)
1555			__beiscsi_log(phba, KERN_INFO,
1556				      "BG_%d : HBA failed to set host driver version\n");
1557	}
1558
1559	mutex_unlock(&ctrl->mbox_lock);
1560	return ret;
1561}
1562
1563int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
1564{
1565	struct be_ctrl_info *ctrl = &phba->ctrl;
1566	struct be_cmd_set_features *ioctl;
1567	struct be_mcc_wrb *wrb;
1568	int ret = 0;
1569
1570	mutex_lock(&ctrl->mbox_lock);
1571	wrb = wrb_from_mbox(&ctrl->mbox_mem);
1572	memset(wrb, 0, sizeof(*wrb));
1573	ioctl = embedded_payload(wrb);
1574
1575	be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1576	be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1577			   OPCODE_COMMON_SET_FEATURES,
1578			   EMBED_MBX_MAX_PAYLOAD_SIZE);
1579	ioctl->feature = BE_CMD_SET_FEATURE_UER;
1580	ioctl->param_len = sizeof(ioctl->param.req);
1581	ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
1582	ret = be_mbox_notify(ctrl);
1583	if (!ret) {
1584		phba->ue2rp = ioctl->param.resp.ue2rp;
1585		set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
1586		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1587			    "BG_%d : HBA error recovery supported\n");
1588	} else {
1589		/**
1590		 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1591		 * Older FW versions return this error.
1592		 */
1593		if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1594		    ret == MCC_STATUS_INVALID_LENGTH)
1595			__beiscsi_log(phba, KERN_INFO,
1596				      "BG_%d : HBA error recovery not supported\n");
1597	}
1598
1599	mutex_unlock(&ctrl->mbox_lock);
1600	return ret;
1601}
1602
1603static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
1604{
1605	u32 sem;
1606
1607	if (is_chip_be2_be3r(phba))
1608		sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
1609	else
1610		pci_read_config_dword(phba->pcidev,
1611				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
1612	return sem;
1613}
1614
1615int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
1616{
1617	u32 loop, post, rdy = 0;
1618
1619	loop = 1000;
1620	while (loop--) {
1621		post = beiscsi_get_post_stage(phba);
1622		if (post & POST_ERROR_BIT)
1623			break;
1624		if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
1625			rdy = 1;
1626			break;
1627		}
1628		msleep(60);
1629	}
1630
1631	if (!rdy) {
1632		__beiscsi_log(phba, KERN_ERR,
1633			      "BC_%d : FW not ready 0x%x\n", post);
1634	}
1635
1636	return rdy;
1637}
1638
1639int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
1640{
1641	struct be_ctrl_info *ctrl = &phba->ctrl;
1642	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1643	struct be_post_sgl_pages_req *req;
1644	int status;
1645
1646	mutex_lock(&ctrl->mbox_lock);
1647
1648	req = embedded_payload(wrb);
1649	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1650	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1651			   OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1652	status = be_mbox_notify(ctrl);
1653
1654	mutex_unlock(&ctrl->mbox_lock);
1655	return status;
1656}
1657
1658int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
1659{
1660	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1661	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1662	u8 *endian_check;
1663	int status;
1664
1665	mutex_lock(&ctrl->mbox_lock);
1666	memset(wrb, 0, sizeof(*wrb));
1667
1668	endian_check = (u8 *) wrb;
1669	if (load) {
1670		/* to start communicating */
1671		*endian_check++ = 0xFF;
1672		*endian_check++ = 0x12;
1673		*endian_check++ = 0x34;
1674		*endian_check++ = 0xFF;
1675		*endian_check++ = 0xFF;
1676		*endian_check++ = 0x56;
1677		*endian_check++ = 0x78;
1678		*endian_check++ = 0xFF;
1679	} else {
1680		/* to stop communicating */
1681		*endian_check++ = 0xFF;
1682		*endian_check++ = 0xAA;
1683		*endian_check++ = 0xBB;
1684		*endian_check++ = 0xFF;
1685		*endian_check++ = 0xFF;
1686		*endian_check++ = 0xCC;
1687		*endian_check++ = 0xDD;
1688		*endian_check = 0xFF;
1689	}
1690	be_dws_cpu_to_le(wrb, sizeof(*wrb));
1691
1692	status = be_mbox_notify(ctrl);
1693	if (status)
1694		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1695			    "BC_%d : special WRB message failed\n");
1696	mutex_unlock(&ctrl->mbox_lock);
1697	return status;
1698}
1699
1700int beiscsi_init_sliport(struct beiscsi_hba *phba)
1701{
1702	int status;
1703
1704	/* check POST stage before talking to FW */
1705	status = beiscsi_check_fw_rdy(phba);
1706	if (!status)
1707		return -EIO;
1708
1709	/* clear all error states after checking FW rdy */
1710	phba->state &= ~BEISCSI_HBA_IN_ERR;
1711
1712	/* check again UER support */
1713	phba->state &= ~BEISCSI_HBA_UER_SUPP;
1714
1715	/*
1716	 * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
1717	 * It should clean up any stale info in FW for this fn.
1718	 */
1719	status = beiscsi_cmd_function_reset(phba);
1720	if (status) {
1721		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1722			    "BC_%d : SLI Function Reset failed\n");
1723		return status;
1724	}
1725
1726	/* indicate driver is loading */
1727	return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
1728}
1729
1730/**
1731 * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
1732 * @phba: pointer to dev priv structure
1733 * @ulp: ULP number.
1734 *
1735 * return
1736 *	Success: 0
1737 *	Failure: Non-Zero Value
1738 **/
1739int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
1740{
1741	struct be_ctrl_info *ctrl = &phba->ctrl;
1742	struct iscsi_cleanup_req_v1 *req_v1;
1743	struct iscsi_cleanup_req *req;
1744	u16 hdr_ring_id, data_ring_id;
1745	struct be_mcc_wrb *wrb;
1746	int status;
1747
1748	mutex_lock(&ctrl->mbox_lock);
1749	wrb = wrb_from_mbox(&ctrl->mbox_mem);
1750
1751	hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
1752	data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
1753	if (is_chip_be2_be3r(phba)) {
1754		req = embedded_payload(wrb);
1755		be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1756		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1757				   OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
1758		req->chute = (1 << ulp);
1759		/* BE2/BE3 FW creates 8-bit ring id */
1760		req->hdr_ring_id = hdr_ring_id;
1761		req->data_ring_id = data_ring_id;
1762	} else {
1763		req_v1 = embedded_payload(wrb);
1764		be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0);
1765		be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI,
1766				   OPCODE_COMMON_ISCSI_CLEANUP,
1767				   sizeof(*req_v1));
1768		req_v1->hdr.version = 1;
1769		req_v1->chute = (1 << ulp);
1770		req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id);
1771		req_v1->data_ring_id = cpu_to_le16(data_ring_id);
1772	}
1773
1774	status = be_mbox_notify(ctrl);
1775	if (status)
1776		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
1777			    "BG_%d : %s failed %d\n", __func__, ulp);
1778	mutex_unlock(&ctrl->mbox_lock);
1779	return status;
1780}
1781
1782/*
1783 * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
1784 * @phba: Driver priv structure
1785 *
1786 * Read registers linked to UE and check for the UE status
1787 **/
1788int beiscsi_detect_ue(struct beiscsi_hba *phba)
1789{
1790	uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
1791	uint32_t ue_hi = 0, ue_lo = 0;
1792	uint8_t i = 0;
1793	int ret = 0;
1794
1795	pci_read_config_dword(phba->pcidev,
1796			      PCICFG_UE_STATUS_LOW, &ue_lo);
1797	pci_read_config_dword(phba->pcidev,
1798			      PCICFG_UE_STATUS_MASK_LOW,
1799			      &ue_mask_lo);
1800	pci_read_config_dword(phba->pcidev,
1801			      PCICFG_UE_STATUS_HIGH,
1802			      &ue_hi);
1803	pci_read_config_dword(phba->pcidev,
1804			      PCICFG_UE_STATUS_MASK_HI,
1805			      &ue_mask_hi);
1806
1807	ue_lo = (ue_lo & ~ue_mask_lo);
1808	ue_hi = (ue_hi & ~ue_mask_hi);
1809
1810
1811	if (ue_lo || ue_hi) {
1812		set_bit(BEISCSI_HBA_IN_UE, &phba->state);
1813		__beiscsi_log(phba, KERN_ERR,
1814			      "BC_%d : HBA error detected\n");
1815		ret = 1;
1816	}
1817
1818	if (ue_lo) {
1819		for (i = 0; ue_lo; ue_lo >>= 1, i++) {
1820			if (ue_lo & 1)
1821				__beiscsi_log(phba, KERN_ERR,
1822					      "BC_%d : UE_LOW %s bit set\n",
1823					      desc_ue_status_low[i]);
1824		}
1825	}
1826
1827	if (ue_hi) {
1828		for (i = 0; ue_hi; ue_hi >>= 1, i++) {
1829			if (ue_hi & 1)
1830				__beiscsi_log(phba, KERN_ERR,
1831					      "BC_%d : UE_HIGH %s bit set\n",
1832					      desc_ue_status_hi[i]);
1833		}
1834	}
1835	return ret;
1836}
1837
1838/*
1839 * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
1840 * @phba: Driver priv structure
1841 *
1842 * Read SLIPORT SEMAPHORE register to check for UER
1843 *
1844 **/
1845int beiscsi_detect_tpe(struct beiscsi_hba *phba)
1846{
1847	u32 post, status;
1848	int ret = 0;
1849
1850	post = beiscsi_get_post_stage(phba);
1851	status = post & POST_STAGE_MASK;
1852	if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
1853	    POST_STAGE_RECOVERABLE_ERR) {
1854		set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
1855		__beiscsi_log(phba, KERN_INFO,
1856			      "BC_%d : HBA error recoverable: 0x%x\n", post);
1857		ret = 1;
1858	} else {
1859		__beiscsi_log(phba, KERN_INFO,
1860			      "BC_%d : HBA in UE: 0x%x\n", post);
1861	}
1862
1863	return ret;
1864}