Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Copyright (c) 2016 Hisilicon Limited.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#include <linux/platform_device.h>
 34#include "hns_roce_common.h"
 35#include "hns_roce_device.h"
 36#include "hns_roce_eq.h"
 37
 38static void eq_set_cons_index(struct hns_roce_eq *eq, int req_not)
 39{
 40	roce_raw_write((eq->cons_index & CONS_INDEX_MASK) |
 41		      (req_not << eq->log_entries), eq->doorbell);
 42	/* Memory barrier */
 43	mb();
 44}
 45
 46static struct hns_roce_aeqe *get_aeqe(struct hns_roce_eq *eq, u32 entry)
 47{
 48	unsigned long off = (entry & (eq->entries - 1)) *
 49			     HNS_ROCE_AEQ_ENTRY_SIZE;
 50
 51	return (struct hns_roce_aeqe *)((u8 *)
 52		(eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
 53		off % HNS_ROCE_BA_SIZE);
 54}
 55
 56static struct hns_roce_aeqe *next_aeqe_sw(struct hns_roce_eq *eq)
 57{
 58	struct hns_roce_aeqe *aeqe = get_aeqe(eq, eq->cons_index);
 59
 60	return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
 61		!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
 62}
 63
 64static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
 65					 struct hns_roce_aeqe *aeqe, int qpn)
 66{
 67	struct device *dev = &hr_dev->pdev->dev;
 68
 69	dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
 70	switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
 71			       HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
 72	case HNS_ROCE_LWQCE_QPC_ERROR:
 73		dev_warn(dev, "QP %d, QPC error.\n", qpn);
 74		break;
 75	case HNS_ROCE_LWQCE_MTU_ERROR:
 76		dev_warn(dev, "QP %d, MTU error.\n", qpn);
 77		break;
 78	case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
 79		dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
 80		break;
 81	case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
 82		dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
 83		break;
 84	case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
 85		dev_warn(dev, "QP %d, WQE shift error\n", qpn);
 86		break;
 87	case HNS_ROCE_LWQCE_SL_ERROR:
 88		dev_warn(dev, "QP %d, SL error.\n", qpn);
 89		break;
 90	case HNS_ROCE_LWQCE_PORT_ERROR:
 91		dev_warn(dev, "QP %d, port error.\n", qpn);
 92		break;
 93	default:
 94		break;
 95	}
 96}
 97
 98static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
 99						struct hns_roce_aeqe *aeqe,
100						int qpn)
101{
102	struct device *dev = &hr_dev->pdev->dev;
103
104	dev_warn(dev, "Local Access Violation Work Queue Error.\n");
105	switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
106			       HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
107	case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
108		dev_warn(dev, "QP %d, R_key violation.\n", qpn);
109		break;
110	case HNS_ROCE_LAVWQE_LENGTH_ERROR:
111		dev_warn(dev, "QP %d, length error.\n", qpn);
112		break;
113	case HNS_ROCE_LAVWQE_VA_ERROR:
114		dev_warn(dev, "QP %d, VA error.\n", qpn);
115		break;
116	case HNS_ROCE_LAVWQE_PD_ERROR:
117		dev_err(dev, "QP %d, PD error.\n", qpn);
118		break;
119	case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
120		dev_warn(dev, "QP %d, rw acc error.\n", qpn);
121		break;
122	case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
123		dev_warn(dev, "QP %d, key state error.\n", qpn);
124		break;
125	case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
126		dev_warn(dev, "QP %d, MR operation error.\n", qpn);
127		break;
128	default:
129		break;
130	}
131}
132
133static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
134				   struct hns_roce_aeqe *aeqe,
135				   int event_type)
136{
137	struct device *dev = &hr_dev->pdev->dev;
138	int phy_port;
139	int qpn;
140
141	qpn = roce_get_field(aeqe->event.qp_event.qp,
142			     HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
143			     HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
144	phy_port = roce_get_field(aeqe->event.qp_event.qp,
145			HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
146			HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
147	if (qpn <= 1)
148		qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
149
150	switch (event_type) {
151	case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
152		dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
153			      "QP %d, phy_port %d.\n", qpn, phy_port);
154		break;
155	case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
156		hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
157		break;
158	case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
159		hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
160		break;
161	default:
162		break;
163	}
164
165	hns_roce_qp_event(hr_dev, qpn, event_type);
166}
167
168static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
169				   struct hns_roce_aeqe *aeqe,
170				   int event_type)
171{
172	struct device *dev = &hr_dev->pdev->dev;
173	u32 cqn;
174
175	cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
176		    HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
177		    HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
178
179	switch (event_type) {
180	case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
181		dev_warn(dev, "CQ 0x%x access err.\n", cqn);
182		break;
183	case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
184		dev_warn(dev, "CQ 0x%x overflow\n", cqn);
185		break;
186	case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
187		dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
188		break;
189	default:
190		break;
191	}
192
193	hns_roce_cq_event(hr_dev, cqn, event_type);
194}
195
196static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
197					struct hns_roce_aeqe *aeqe)
198{
199	struct device *dev = &hr_dev->pdev->dev;
200
201	switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
202			       HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
203	case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
204		dev_warn(dev, "SDB overflow.\n");
205		break;
206	case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
207		dev_warn(dev, "SDB almost overflow.\n");
208		break;
209	case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
210		dev_warn(dev, "SDB almost empty.\n");
211		break;
212	case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
213		dev_warn(dev, "ODB overflow.\n");
214		break;
215	case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
216		dev_warn(dev, "ODB almost overflow.\n");
217		break;
218	case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
219		dev_warn(dev, "SDB almost empty.\n");
220		break;
221	default:
222		break;
223	}
224}
225
226static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
227{
228	struct device *dev = &hr_dev->pdev->dev;
229	struct hns_roce_aeqe *aeqe;
230	int aeqes_found = 0;
231	int event_type;
232
233	while ((aeqe = next_aeqe_sw(eq))) {
234		dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
235			roce_get_field(aeqe->asyn,
236				       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
237				       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
238		/* Memory barrier */
239		rmb();
240
241		event_type = roce_get_field(aeqe->asyn,
242				HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
243				HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
244		switch (event_type) {
245		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
246			dev_warn(dev, "PATH MIG not supported\n");
247			break;
248		case HNS_ROCE_EVENT_TYPE_COMM_EST:
249			dev_warn(dev, "COMMUNICATION established\n");
250			break;
251		case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
252			dev_warn(dev, "SQ DRAINED not supported\n");
253			break;
254		case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
255			dev_warn(dev, "PATH MIG failed\n");
256			break;
257		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
258		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
259		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
260			hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
261			break;
262		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
263		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
264		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
265			dev_warn(dev, "SRQ not support!\n");
266			break;
267		case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
268		case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
269		case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
270			hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
271			break;
272		case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
273			dev_warn(dev, "port change.\n");
274			break;
275		case HNS_ROCE_EVENT_TYPE_MB:
276			hns_roce_cmd_event(hr_dev,
277					   le16_to_cpu(aeqe->event.cmd.token),
278					   aeqe->event.cmd.status,
279					   le64_to_cpu(aeqe->event.cmd.out_param
280					   ));
281			break;
282		case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
283			hns_roce_db_overflow_handle(hr_dev, aeqe);
284			break;
285		case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
286			dev_warn(dev, "CEQ 0x%lx overflow.\n",
287			roce_get_field(aeqe->event.ce_event.ceqe,
288				     HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
289				     HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
290			break;
291		default:
292			dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
293				 event_type, eq->eqn, eq->cons_index);
294			break;
295		};
296
297		eq->cons_index++;
298		aeqes_found = 1;
299
300		if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
301			dev_warn(dev, "cons_index overflow, set back to zero\n"
302				);
303			eq->cons_index = 0;
304		}
305	}
306
307	eq_set_cons_index(eq, 0);
308
309	return aeqes_found;
310}
311
312static struct hns_roce_ceqe *get_ceqe(struct hns_roce_eq *eq, u32 entry)
313{
314	unsigned long off = (entry & (eq->entries - 1)) *
315			     HNS_ROCE_CEQ_ENTRY_SIZE;
316
317	return (struct hns_roce_ceqe *)((u8 *)
318			(eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
319			off % HNS_ROCE_BA_SIZE);
320}
321
322static struct hns_roce_ceqe *next_ceqe_sw(struct hns_roce_eq *eq)
323{
324	struct hns_roce_ceqe *ceqe = get_ceqe(eq, eq->cons_index);
325
326	return (!!(roce_get_bit(ceqe->ceqe.comp,
327		 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
328		 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
329}
330
331static int hns_roce_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
332{
333	struct hns_roce_ceqe *ceqe;
334	int ceqes_found = 0;
335	u32 cqn;
336
337	while ((ceqe = next_ceqe_sw(eq))) {
338		/* Memory barrier */
339		rmb();
340		cqn = roce_get_field(ceqe->ceqe.comp,
341				     HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
342				     HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
343		hns_roce_cq_completion(hr_dev, cqn);
344
345		++eq->cons_index;
346		ceqes_found = 1;
347
348		if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth[eq->eqn] - 1) {
349			dev_warn(&eq->hr_dev->pdev->dev,
350				"cons_index overflow, set back to zero\n");
351			eq->cons_index = 0;
352		}
353	}
354
355	eq_set_cons_index(eq, 0);
356
357	return ceqes_found;
358}
359
360static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
361				struct hns_roce_eq *eq)
362{
363	struct device *dev = &eq->hr_dev->pdev->dev;
364	int eqovf_found = 0;
365	u32 caepaemask_val;
366	u32 cealmovf_val;
367	u32 caepaest_val;
368	u32 aeshift_val;
369	u32 ceshift_val;
370	u32 cemask_val;
371	int i = 0;
372
373	/**
374	 * AEQ overflow ECC mult bit err CEQ overflow alarm
375	 * must clear interrupt, mask irq, clear irq, cancel mask operation
376	 */
377	aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
378
379	if (roce_get_bit(aeshift_val,
380		ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
381		dev_warn(dev, "AEQ overflow!\n");
382
383		/* Set mask */
384		caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
385		roce_set_bit(caepaemask_val,
386			     ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
387			     HNS_ROCE_INT_MASK_ENABLE);
388		roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
389
390		/* Clear int state(INT_WC : write 1 clear) */
391		caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
392		roce_set_bit(caepaest_val,
393			     ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
394		roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
395
396		/* Clear mask */
397		caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
398		roce_set_bit(caepaemask_val,
399			     ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
400			     HNS_ROCE_INT_MASK_DISABLE);
401		roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
402	}
403
404	/* CEQ almost overflow */
405	for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
406		ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
407					i * CEQ_REG_OFFSET);
408
409		if (roce_get_bit(ceshift_val,
410		ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
411			dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
412			eqovf_found++;
413
414			/* Set mask */
415			cemask_val = roce_read(hr_dev,
416					       ROCEE_CAEP_CE_IRQ_MASK_0_REG +
417					       i * CEQ_REG_OFFSET);
418			roce_set_bit(cemask_val,
419				ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
420				HNS_ROCE_INT_MASK_ENABLE);
421			roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
422				   i * CEQ_REG_OFFSET, cemask_val);
423
424			/* Clear int state(INT_WC : write 1 clear) */
425			cealmovf_val = roce_read(hr_dev,
426				       ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
427				       i * CEQ_REG_OFFSET);
428			roce_set_bit(cealmovf_val,
429				     ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
430				     1);
431			roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
432				    i * CEQ_REG_OFFSET, cealmovf_val);
433
434			/* Clear mask */
435			cemask_val = roce_read(hr_dev,
436				     ROCEE_CAEP_CE_IRQ_MASK_0_REG +
437				     i * CEQ_REG_OFFSET);
438			roce_set_bit(cemask_val,
439			       ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
440			       HNS_ROCE_INT_MASK_DISABLE);
441			roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
442				   i * CEQ_REG_OFFSET, cemask_val);
443		}
444	}
445
446	/* ECC multi-bit error alarm */
447	dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
448		 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
449		 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
450		 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
451
452	dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
453		 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
454		 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
455		 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
456
457	return eqovf_found;
458}
459
460static int hns_roce_eq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
461{
462	int eqes_found = 0;
463
464	if (likely(eq->type_flag == HNS_ROCE_CEQ))
465		/* CEQ irq routine, CEQ is pulse irq, not clear */
466		eqes_found = hns_roce_ceq_int(hr_dev, eq);
467	else if (likely(eq->type_flag == HNS_ROCE_AEQ))
468		/* AEQ irq routine, AEQ is pulse irq, not clear */
469		eqes_found = hns_roce_aeq_int(hr_dev, eq);
470	else
471		/* AEQ queue overflow irq */
472		eqes_found = hns_roce_aeq_ovf_int(hr_dev, eq);
473
474	return eqes_found;
475}
476
477static irqreturn_t hns_roce_msi_x_interrupt(int irq, void *eq_ptr)
478{
479	int int_work = 0;
480	struct hns_roce_eq  *eq  = eq_ptr;
481	struct hns_roce_dev *hr_dev = eq->hr_dev;
482
483	int_work = hns_roce_eq_int(hr_dev, eq);
484
485	return IRQ_RETVAL(int_work);
486}
487
488static void hns_roce_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
489			       int enable_flag)
490{
491	void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
492	u32 val;
493
494	val = readl(eqc);
495
496	if (enable_flag)
497		roce_set_field(val,
498			       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
499			       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
500			       HNS_ROCE_EQ_STAT_VALID);
501	else
502		roce_set_field(val,
503			       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
504			       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
505			       HNS_ROCE_EQ_STAT_INVALID);
506	writel(val, eqc);
507}
508
509static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
510			      struct hns_roce_eq *eq)
511{
512	void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
513	struct device *dev = &hr_dev->pdev->dev;
514	dma_addr_t tmp_dma_addr;
515	u32 eqconsindx_val = 0;
516	u32 eqcuridx_val = 0;
517	u32 eqshift_val = 0;
518	int num_bas = 0;
519	int ret;
520	int i;
521
522	num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
523		   HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
524
525	if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
526		dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
527			(eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
528			num_bas);
529		return -EINVAL;
530	}
531
532	eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
533	if (!eq->buf_list)
534		return -ENOMEM;
535
536	for (i = 0; i < num_bas; ++i) {
537		eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
538							 &tmp_dma_addr,
539							 GFP_KERNEL);
540		if (!eq->buf_list[i].buf) {
541			ret = -ENOMEM;
542			goto err_out_free_pages;
543		}
544
545		eq->buf_list[i].map = tmp_dma_addr;
546		memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
547	}
548	eq->cons_index = 0;
549	roce_set_field(eqshift_val,
550		       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
551		       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
552		       HNS_ROCE_EQ_STAT_INVALID);
553	roce_set_field(eqshift_val,
554		       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
555		       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
556		       eq->log_entries);
557	writel(eqshift_val, eqc);
558
559	/* Configure eq extended address 12~44bit */
560	writel((u32)(eq->buf_list[0].map >> 12), (u8 *)eqc + 4);
561
562	/*
563	 * Configure eq extended address 45~49 bit.
564	 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
565	 * using 4K page, and shift more 32 because of
566	 * caculating the high 32 bit value evaluated to hardware.
567	 */
568	roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
569		       ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
570		       eq->buf_list[0].map >> 44);
571	roce_set_field(eqcuridx_val,
572		       ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
573		       ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
574	writel(eqcuridx_val, (u8 *)eqc + 8);
575
576	/* Configure eq consumer index */
577	roce_set_field(eqconsindx_val,
578		       ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
579		       ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
580	writel(eqconsindx_val, (u8 *)eqc + 0xc);
581
582	return 0;
583
584err_out_free_pages:
585	for (i = i - 1; i >= 0; i--)
586		dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
587				  eq->buf_list[i].map);
588
589	kfree(eq->buf_list);
590	return ret;
591}
592
593static void hns_roce_free_eq(struct hns_roce_dev *hr_dev,
594			     struct hns_roce_eq *eq)
595{
596	int i = 0;
597	int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
598		      HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
599
600	if (!eq->buf_list)
601		return;
602
603	for (i = 0; i < npages; ++i)
604		dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
605				  eq->buf_list[i].buf, eq->buf_list[i].map);
606
607	kfree(eq->buf_list);
608}
609
610static void hns_roce_int_mask_en(struct hns_roce_dev *hr_dev)
611{
612	int i = 0;
613	u32 aemask_val;
614	int masken = 0;
615
616	/* AEQ INT */
617	aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
618	roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
619		     masken);
620	roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
621	roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
622
623	/* CEQ INT */
624	for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
625		/* IRQ mask */
626		roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
627			   i * CEQ_REG_OFFSET, masken);
628	}
629}
630
631static void hns_roce_ce_int_default_cfg(struct hns_roce_dev *hr_dev)
632{
633	/* Configure ce int interval */
634	roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
635		   HNS_ROCE_CEQ_DEFAULT_INTERVAL);
636
637	/* Configure ce int burst num */
638	roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
639		   HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
640}
641
642int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev)
643{
644	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
645	struct device *dev = &hr_dev->pdev->dev;
646	struct hns_roce_eq *eq = NULL;
647	int eq_num = 0;
648	int ret = 0;
649	int i = 0;
650	int j = 0;
651
652	eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
653	eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
654	if (!eq_table->eq)
655		return -ENOMEM;
656
657	eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
658				     GFP_KERNEL);
659	if (!eq_table->eqc_base) {
660		ret = -ENOMEM;
661		goto err_eqc_base_alloc_fail;
662	}
663
664	for (i = 0; i < eq_num; i++) {
665		eq = &eq_table->eq[i];
666		eq->hr_dev = hr_dev;
667		eq->eqn = i;
668		eq->irq = hr_dev->irq[i];
669		eq->log_page_size = PAGE_SHIFT;
670
671		if (i < hr_dev->caps.num_comp_vectors) {
672			/* CEQ */
673			eq_table->eqc_base[i] = hr_dev->reg_base +
674						ROCEE_CAEP_CEQC_SHIFT_0_REG +
675						HNS_ROCE_CEQC_REG_OFFSET * i;
676			eq->type_flag = HNS_ROCE_CEQ;
677			eq->doorbell = hr_dev->reg_base +
678				       ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
679				       HNS_ROCE_CEQC_REG_OFFSET * i;
680			eq->entries = hr_dev->caps.ceqe_depth[i];
681			eq->log_entries = ilog2(eq->entries);
682			eq->eqe_size = sizeof(struct hns_roce_ceqe);
683		} else {
684			/* AEQ */
685			eq_table->eqc_base[i] = hr_dev->reg_base +
686						ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
687			eq->type_flag = HNS_ROCE_AEQ;
688			eq->doorbell = hr_dev->reg_base +
689				       ROCEE_CAEP_AEQE_CONS_IDX_REG;
690			eq->entries = hr_dev->caps.aeqe_depth;
691			eq->log_entries = ilog2(eq->entries);
692			eq->eqe_size = sizeof(struct hns_roce_aeqe);
693		}
694	}
695
696	/* Disable irq */
697	hns_roce_int_mask_en(hr_dev);
698
699	/* Configure CE irq interval and burst num */
700	hns_roce_ce_int_default_cfg(hr_dev);
701
702	for (i = 0; i < eq_num; i++) {
703		ret = hns_roce_create_eq(hr_dev, &eq_table->eq[i]);
704		if (ret) {
705			dev_err(dev, "eq create failed\n");
706			goto err_create_eq_fail;
707		}
708	}
709
710	for (j = 0; j < eq_num; j++) {
711		ret = request_irq(eq_table->eq[j].irq, hns_roce_msi_x_interrupt,
712				  0, hr_dev->irq_names[j], eq_table->eq + j);
713		if (ret) {
714			dev_err(dev, "request irq error!\n");
715			goto err_request_irq_fail;
716		}
717	}
718
719	for (i = 0; i < eq_num; i++)
720		hns_roce_enable_eq(hr_dev, i, EQ_ENABLE);
721
722	return 0;
723
724err_request_irq_fail:
725	for (j = j - 1; j >= 0; j--)
726		free_irq(eq_table->eq[j].irq, eq_table->eq + j);
727
728err_create_eq_fail:
729	for (i = i - 1; i >= 0; i--)
730		hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
731
732	kfree(eq_table->eqc_base);
733
734err_eqc_base_alloc_fail:
735	kfree(eq_table->eq);
736
737	return ret;
738}
739
740void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev)
741{
742	int i;
743	int eq_num;
744	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
745
746	eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
747	for (i = 0; i < eq_num; i++) {
748		/* Disable EQ */
749		hns_roce_enable_eq(hr_dev, i, EQ_DISABLE);
750
751		free_irq(eq_table->eq[i].irq, eq_table->eq + i);
752
753		hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
754	}
755
756	kfree(eq_table->eqc_base);
757	kfree(eq_table->eq);
758}