Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  linux/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
  4 *
  5 *  eHEA ethernet device driver for IBM eServer System p
  6 *
  7 *  (C) Copyright IBM Corp. 2006
  8 *
  9 *  Authors:
 10 *	 Christoph Raisch <raisch@de.ibm.com>
 11 *	 Jan-Bernd Themann <themann@de.ibm.com>
 12 *	 Thomas Klein <tklein@de.ibm.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13 */
 14
 15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 16
 17#include "ehea_phyp.h"
 18
 19
 20static inline u16 get_order_of_qentries(u16 queue_entries)
 21{
 22	u8 ld = 1;		/*  logarithmus dualis */
 23	while (((1U << ld) - 1) < queue_entries)
 24		ld++;
 25	return ld - 1;
 26}
 27
 28/* Defines for H_CALL H_ALLOC_RESOURCE */
 29#define H_ALL_RES_TYPE_QP	 1
 30#define H_ALL_RES_TYPE_CQ	 2
 31#define H_ALL_RES_TYPE_EQ	 3
 32#define H_ALL_RES_TYPE_MR	 5
 33#define H_ALL_RES_TYPE_MW	 6
 34
 35static long ehea_plpar_hcall_norets(unsigned long opcode,
 36				    unsigned long arg1,
 37				    unsigned long arg2,
 38				    unsigned long arg3,
 39				    unsigned long arg4,
 40				    unsigned long arg5,
 41				    unsigned long arg6,
 42				    unsigned long arg7)
 43{
 44	long ret;
 45	int i, sleep_msecs;
 46
 47	for (i = 0; i < 5; i++) {
 48		ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
 49					 arg5, arg6, arg7);
 50
 51		if (H_IS_LONG_BUSY(ret)) {
 52			sleep_msecs = get_longbusy_msecs(ret);
 53			msleep_interruptible(sleep_msecs);
 54			continue;
 55		}
 56
 57		if (ret < H_SUCCESS)
 58			pr_err("opcode=%lx ret=%lx"
 59			       " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
 60			       " arg5=%lx arg6=%lx arg7=%lx\n",
 61			       opcode, ret,
 62			       arg1, arg2, arg3, arg4, arg5, arg6, arg7);
 63
 64		return ret;
 65	}
 66
 67	return H_BUSY;
 68}
 69
 70static long ehea_plpar_hcall9(unsigned long opcode,
 71			      unsigned long *outs, /* array of 9 outputs */
 72			      unsigned long arg1,
 73			      unsigned long arg2,
 74			      unsigned long arg3,
 75			      unsigned long arg4,
 76			      unsigned long arg5,
 77			      unsigned long arg6,
 78			      unsigned long arg7,
 79			      unsigned long arg8,
 80			      unsigned long arg9)
 81{
 82	long ret;
 83	int i, sleep_msecs;
 84	u8 cb_cat;
 85
 86	for (i = 0; i < 5; i++) {
 87		ret = plpar_hcall9(opcode, outs,
 88				   arg1, arg2, arg3, arg4, arg5,
 89				   arg6, arg7, arg8, arg9);
 90
 91		if (H_IS_LONG_BUSY(ret)) {
 92			sleep_msecs = get_longbusy_msecs(ret);
 93			msleep_interruptible(sleep_msecs);
 94			continue;
 95		}
 96
 97		cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2);
 98
 99		if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY)
100		    && (opcode == H_MODIFY_HEA_PORT))
101		    && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
102		    || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
103		    && (arg3 == H_PORT_CB7_DUCQPN)))))
104			pr_err("opcode=%lx ret=%lx"
105			       " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
106			       " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
107			       " arg9=%lx"
108			       " out1=%lx out2=%lx out3=%lx out4=%lx"
109			       " out5=%lx out6=%lx out7=%lx out8=%lx"
110			       " out9=%lx\n",
111			       opcode, ret,
112			       arg1, arg2, arg3, arg4, arg5,
113			       arg6, arg7, arg8, arg9,
114			       outs[0], outs[1], outs[2], outs[3], outs[4],
115			       outs[5], outs[6], outs[7], outs[8]);
116		return ret;
117	}
118
119	return H_BUSY;
120}
121
122u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
123			 const u64 qp_handle, const u64 sel_mask, void *cb_addr)
124{
125	return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
126				       adapter_handle,		/* R4 */
127				       qp_category,		/* R5 */
128				       qp_handle,		/* R6 */
129				       sel_mask,		/* R7 */
130				       __pa(cb_addr),		/* R8 */
131				       0, 0);
132}
133
134/* input param R5 */
135#define H_ALL_RES_QP_EQPO	  EHEA_BMASK_IBM(9, 11)
136#define H_ALL_RES_QP_QPP	  EHEA_BMASK_IBM(12, 12)
137#define H_ALL_RES_QP_RQR	  EHEA_BMASK_IBM(13, 15)
138#define H_ALL_RES_QP_EQEG	  EHEA_BMASK_IBM(16, 16)
139#define H_ALL_RES_QP_LL_QP	  EHEA_BMASK_IBM(17, 17)
140#define H_ALL_RES_QP_DMA128	  EHEA_BMASK_IBM(19, 19)
141#define H_ALL_RES_QP_HSM	  EHEA_BMASK_IBM(20, 21)
142#define H_ALL_RES_QP_SIGT	  EHEA_BMASK_IBM(22, 23)
143#define H_ALL_RES_QP_TENURE	  EHEA_BMASK_IBM(48, 55)
144#define H_ALL_RES_QP_RES_TYP	  EHEA_BMASK_IBM(56, 63)
145
146/* input param R9  */
147#define H_ALL_RES_QP_TOKEN	  EHEA_BMASK_IBM(0, 31)
148#define H_ALL_RES_QP_PD		  EHEA_BMASK_IBM(32, 63)
149
150/* input param R10 */
151#define H_ALL_RES_QP_MAX_SWQE	  EHEA_BMASK_IBM(4, 7)
152#define H_ALL_RES_QP_MAX_R1WQE	  EHEA_BMASK_IBM(12, 15)
153#define H_ALL_RES_QP_MAX_R2WQE	  EHEA_BMASK_IBM(20, 23)
154#define H_ALL_RES_QP_MAX_R3WQE	  EHEA_BMASK_IBM(28, 31)
155/* Max Send Scatter Gather Elements */
156#define H_ALL_RES_QP_MAX_SSGE	  EHEA_BMASK_IBM(37, 39)
157#define H_ALL_RES_QP_MAX_R1SGE	  EHEA_BMASK_IBM(45, 47)
158/* Max Receive SG Elements RQ1 */
159#define H_ALL_RES_QP_MAX_R2SGE	  EHEA_BMASK_IBM(53, 55)
160#define H_ALL_RES_QP_MAX_R3SGE	  EHEA_BMASK_IBM(61, 63)
161
162/* input param R11 */
163#define H_ALL_RES_QP_SWQE_IDL	  EHEA_BMASK_IBM(0, 7)
164/* max swqe immediate data length */
165#define H_ALL_RES_QP_PORT_NUM	  EHEA_BMASK_IBM(48, 63)
166
167/* input param R12 */
168#define H_ALL_RES_QP_TH_RQ2	  EHEA_BMASK_IBM(0, 15)
169/* Threshold RQ2 */
170#define H_ALL_RES_QP_TH_RQ3	  EHEA_BMASK_IBM(16, 31)
171/* Threshold RQ3 */
172
173/* output param R6 */
174#define H_ALL_RES_QP_ACT_SWQE	  EHEA_BMASK_IBM(0, 15)
175#define H_ALL_RES_QP_ACT_R1WQE	  EHEA_BMASK_IBM(16, 31)
176#define H_ALL_RES_QP_ACT_R2WQE	  EHEA_BMASK_IBM(32, 47)
177#define H_ALL_RES_QP_ACT_R3WQE	  EHEA_BMASK_IBM(48, 63)
178
179/* output param, R7 */
180#define H_ALL_RES_QP_ACT_SSGE	  EHEA_BMASK_IBM(0, 7)
181#define H_ALL_RES_QP_ACT_R1SGE	  EHEA_BMASK_IBM(8, 15)
182#define H_ALL_RES_QP_ACT_R2SGE	  EHEA_BMASK_IBM(16, 23)
183#define H_ALL_RES_QP_ACT_R3SGE	  EHEA_BMASK_IBM(24, 31)
184#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
185
186/* output param R8,R9 */
187#define H_ALL_RES_QP_SIZE_SQ	  EHEA_BMASK_IBM(0, 31)
188#define H_ALL_RES_QP_SIZE_RQ1	  EHEA_BMASK_IBM(32, 63)
189#define H_ALL_RES_QP_SIZE_RQ2	  EHEA_BMASK_IBM(0, 31)
190#define H_ALL_RES_QP_SIZE_RQ3	  EHEA_BMASK_IBM(32, 63)
191
192/* output param R11,R12 */
193#define H_ALL_RES_QP_LIOBN_SQ	  EHEA_BMASK_IBM(0, 31)
194#define H_ALL_RES_QP_LIOBN_RQ1	  EHEA_BMASK_IBM(32, 63)
195#define H_ALL_RES_QP_LIOBN_RQ2	  EHEA_BMASK_IBM(0, 31)
196#define H_ALL_RES_QP_LIOBN_RQ3	  EHEA_BMASK_IBM(32, 63)
197
198u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
199			     struct ehea_qp_init_attr *init_attr, const u32 pd,
200			     u64 *qp_handle, struct h_epas *h_epas)
201{
202	u64 hret;
203	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
204
205	u64 allocate_controls =
206	    EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
207	    | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
208	    | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6)	/* rq1 & rq2 & rq3 */
209	    | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0)	/* EQE gen. disabled */
210	    | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
211	    | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
212	    | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
213	    | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
214	    | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
215
216	u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
217	    | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
218
219	u64 max_r10_reg =
220	    EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
221			   get_order_of_qentries(init_attr->max_nr_send_wqes))
222	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
223			     get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
224	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
225			     get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
226	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
227			     get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
228	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
229	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
230			     init_attr->wqe_size_enc_rq1)
231	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
232			     init_attr->wqe_size_enc_rq2)
233	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
234			     init_attr->wqe_size_enc_rq3);
235
236	u64 r11_in =
237	    EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
238	    | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
239	u64 threshold =
240	    EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
241	    | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
242
243	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
244				 outs,
245				 adapter_handle,		/* R4 */
246				 allocate_controls,		/* R5 */
247				 init_attr->send_cq_handle,	/* R6 */
248				 init_attr->recv_cq_handle,	/* R7 */
249				 init_attr->aff_eq_handle,	/* R8 */
250				 r9_reg,			/* R9 */
251				 max_r10_reg,			/* R10 */
252				 r11_in,			/* R11 */
253				 threshold);			/* R12 */
254
255	*qp_handle = outs[0];
256	init_attr->qp_nr = (u32)outs[1];
257
258	init_attr->act_nr_send_wqes =
259	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
260	init_attr->act_nr_rwqes_rq1 =
261	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
262	init_attr->act_nr_rwqes_rq2 =
263	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
264	init_attr->act_nr_rwqes_rq3 =
265	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
266
267	init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
268	init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
269	init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
270	init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
271
272	init_attr->nr_sq_pages =
273	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
274	init_attr->nr_rq1_pages =
275	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
276	init_attr->nr_rq2_pages =
277	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
278	init_attr->nr_rq3_pages =
279	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
280
281	init_attr->liobn_sq =
282	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
283	init_attr->liobn_rq1 =
284	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
285	init_attr->liobn_rq2 =
286	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
287	init_attr->liobn_rq3 =
288	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
289
290	if (!hret)
291		hcp_epas_ctor(h_epas, outs[6], outs[6]);
292
293	return hret;
294}
295
296u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
297			     struct ehea_cq_attr *cq_attr,
298			     u64 *cq_handle, struct h_epas *epas)
299{
300	u64 hret;
301	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
302
303	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
304				 outs,
305				 adapter_handle,		/* R4 */
306				 H_ALL_RES_TYPE_CQ,		/* R5 */
307				 cq_attr->eq_handle,		/* R6 */
308				 cq_attr->cq_token,		/* R7 */
309				 cq_attr->max_nr_of_cqes,	/* R8 */
310				 0, 0, 0, 0);			/* R9-R12 */
311
312	*cq_handle = outs[0];
313	cq_attr->act_nr_of_cqes = outs[3];
314	cq_attr->nr_pages = outs[4];
315
316	if (!hret)
317		hcp_epas_ctor(epas, outs[5], outs[6]);
318
319	return hret;
320}
321
322/* Defines for H_CALL H_ALLOC_RESOURCE */
323#define H_ALL_RES_TYPE_QP	 1
324#define H_ALL_RES_TYPE_CQ	 2
325#define H_ALL_RES_TYPE_EQ	 3
326#define H_ALL_RES_TYPE_MR	 5
327#define H_ALL_RES_TYPE_MW	 6
328
329/*  input param R5 */
330#define H_ALL_RES_EQ_NEQ	     EHEA_BMASK_IBM(0, 0)
331#define H_ALL_RES_EQ_NON_NEQ_ISN     EHEA_BMASK_IBM(6, 7)
332#define H_ALL_RES_EQ_INH_EQE_GEN     EHEA_BMASK_IBM(16, 16)
333#define H_ALL_RES_EQ_RES_TYPE	     EHEA_BMASK_IBM(56, 63)
334/*  input param R6 */
335#define H_ALL_RES_EQ_MAX_EQE	     EHEA_BMASK_IBM(32, 63)
336
337/*  output param R6 */
338#define H_ALL_RES_EQ_LIOBN	     EHEA_BMASK_IBM(32, 63)
339
340/*  output param R7 */
341#define H_ALL_RES_EQ_ACT_EQE	     EHEA_BMASK_IBM(32, 63)
342
343/*  output param R8 */
344#define H_ALL_RES_EQ_ACT_PS	     EHEA_BMASK_IBM(32, 63)
345
346/*  output param R9 */
347#define H_ALL_RES_EQ_ACT_EQ_IST_C    EHEA_BMASK_IBM(30, 31)
348#define H_ALL_RES_EQ_ACT_EQ_IST_1    EHEA_BMASK_IBM(40, 63)
349
350/*  output param R10 */
351#define H_ALL_RES_EQ_ACT_EQ_IST_2    EHEA_BMASK_IBM(40, 63)
352
353/*  output param R11 */
354#define H_ALL_RES_EQ_ACT_EQ_IST_3    EHEA_BMASK_IBM(40, 63)
355
356/*  output param R12 */
357#define H_ALL_RES_EQ_ACT_EQ_IST_4    EHEA_BMASK_IBM(40, 63)
358
359u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
360			     struct ehea_eq_attr *eq_attr, u64 *eq_handle)
361{
362	u64 hret, allocate_controls;
363	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
364
365	/* resource type */
366	allocate_controls =
367	    EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
368	    | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
369	    | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
370	    | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
371
372	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
373				 outs,
374				 adapter_handle,		/* R4 */
375				 allocate_controls,		/* R5 */
376				 eq_attr->max_nr_of_eqes,	/* R6 */
377				 0, 0, 0, 0, 0, 0);		/* R7-R10 */
378
379	*eq_handle = outs[0];
380	eq_attr->act_nr_of_eqes = outs[3];
381	eq_attr->nr_pages = outs[4];
382	eq_attr->ist1 = outs[5];
383	eq_attr->ist2 = outs[6];
384	eq_attr->ist3 = outs[7];
385	eq_attr->ist4 = outs[8];
386
387	return hret;
388}
389
390u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
391			  const u64 qp_handle, const u64 sel_mask,
392			  void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
393			  u16 *out_swr, u16 *out_rwr)
394{
395	u64 hret;
396	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
397
398	hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
399				 outs,
400				 adapter_handle,		/* R4 */
401				 (u64) cat,			/* R5 */
402				 qp_handle,			/* R6 */
403				 sel_mask,			/* R7 */
404				 __pa(cb_addr),			/* R8 */
405				 0, 0, 0, 0);			/* R9-R12 */
406
407	*inv_attr_id = outs[0];
408	*out_swr = outs[3];
409	*out_rwr = outs[4];
410	*proc_mask = outs[5];
411
412	return hret;
413}
414
415u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
416			  const u8 queue_type, const u64 resource_handle,
417			  const u64 log_pageaddr, u64 count)
418{
419	u64  reg_control;
420
421	reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
422		    | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
423
424	return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
425				       adapter_handle,		/* R4 */
426				       reg_control,		/* R5 */
427				       resource_handle,		/* R6 */
428				       log_pageaddr,		/* R7 */
429				       count,			/* R8 */
430				       0, 0);			/* R9-R10 */
431}
432
433u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
434			const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
435			struct ehea_mr *mr)
436{
437	u64 hret;
438	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
439
440	hret = ehea_plpar_hcall9(H_REGISTER_SMR,
441				 outs,
442				 adapter_handle	      ,		 /* R4 */
443				 orig_mr_handle,		 /* R5 */
444				 vaddr_in,			 /* R6 */
445				 (((u64)access_ctrl) << 32ULL),	 /* R7 */
446				 pd,				 /* R8 */
447				 0, 0, 0, 0);			 /* R9-R12 */
448
449	mr->handle = outs[0];
450	mr->lkey = (u32)outs[2];
451
452	return hret;
453}
454
455u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
456{
457	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
458
459	return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
460				 outs,
461				 adapter_handle,		/* R4 */
462				 H_DISABLE_GET_EHEA_WQE_P,	/* R5 */
463				 qp_handle,			/* R6 */
464				 0, 0, 0, 0, 0, 0);		/* R7-R12 */
465}
466
467u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
468			 u64 force_bit)
469{
470	return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
471				       adapter_handle,	   /* R4 */
472				       res_handle,	   /* R5 */
473				       force_bit,
474				       0, 0, 0, 0);	   /* R7-R10 */
475}
476
477u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
478			     const u64 length, const u32 access_ctrl,
479			     const u32 pd, u64 *mr_handle, u32 *lkey)
480{
481	u64 hret;
482	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
483
484	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
485				 outs,
486				 adapter_handle,		   /* R4 */
487				 5,				   /* R5 */
488				 vaddr,				   /* R6 */
489				 length,			   /* R7 */
490				 (((u64) access_ctrl) << 32ULL),   /* R8 */
491				 pd,				   /* R9 */
492				 0, 0, 0);			   /* R10-R12 */
493
494	*mr_handle = outs[0];
495	*lkey = (u32)outs[2];
496	return hret;
497}
498
499u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
500			     const u8 pagesize, const u8 queue_type,
501			     const u64 log_pageaddr, const u64 count)
502{
503	if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
504		pr_err("not on pageboundary\n");
505		return H_PARAMETER;
506	}
507
508	return ehea_h_register_rpage(adapter_handle, pagesize,
509				     queue_type, mr_handle,
510				     log_pageaddr, count);
511}
512
513u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
514{
515	u64 hret, cb_logaddr;
516
517	cb_logaddr = __pa(cb_addr);
518
519	hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
520				       adapter_handle,		/* R4 */
521				       cb_logaddr,		/* R5 */
522				       0, 0, 0, 0, 0);		/* R6-R10 */
523#ifdef DEBUG
524	ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
525#endif
526	return hret;
527}
528
529u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
530			   const u8 cb_cat, const u64 select_mask,
531			   void *cb_addr)
532{
533	u64 port_info;
534	u64 cb_logaddr = __pa(cb_addr);
535	u64 arr_index = 0;
536
537	port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
538		  | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
539
540	return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
541				       adapter_handle,		/* R4 */
542				       port_info,		/* R5 */
543				       select_mask,		/* R6 */
544				       arr_index,		/* R7 */
545				       cb_logaddr,		/* R8 */
546				       0, 0);			/* R9-R10 */
547}
548
549u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
550			    const u8 cb_cat, const u64 select_mask,
551			    void *cb_addr)
552{
553	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
554	u64 port_info;
555	u64 arr_index = 0;
556	u64 cb_logaddr = __pa(cb_addr);
557
558	port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
559		  | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
560#ifdef DEBUG
561	ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
562#endif
563	return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
564				 outs,
565				 adapter_handle,		/* R4 */
566				 port_info,			/* R5 */
567				 select_mask,			/* R6 */
568				 arr_index,			/* R7 */
569				 cb_logaddr,			/* R8 */
570				 0, 0, 0, 0);			/* R9-R12 */
571}
572
573u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
574			  const u8 reg_type, const u64 mc_mac_addr,
575			  const u16 vlan_id, const u32 hcall_id)
576{
577	u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
578	u64 mac_addr = mc_mac_addr >> 16;
579
580	r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
581	r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
582	r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
583	r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
584
585	return ehea_plpar_hcall_norets(hcall_id,
586				       adapter_handle,		/* R4 */
587				       r5_port_num,		/* R5 */
588				       r6_reg_type,		/* R6 */
589				       r7_mc_mac_addr,		/* R7 */
590				       r8_vlan_id,		/* R8 */
591				       0, 0);			/* R9-R12 */
592}
593
594u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
595			const u64 event_mask)
596{
597	return ehea_plpar_hcall_norets(H_RESET_EVENTS,
598				       adapter_handle,		/* R4 */
599				       neq_handle,		/* R5 */
600				       event_mask,		/* R6 */
601				       0, 0, 0, 0);		/* R7-R12 */
602}
603
604u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
605		      void *rblock)
606{
607	return ehea_plpar_hcall_norets(H_ERROR_DATA,
608				       adapter_handle,		/* R4 */
609				       ressource_handle,	/* R5 */
610				       __pa(rblock),		/* R6 */
611				       0, 0, 0, 0);		/* R7-R12 */
612}
v3.15
 
  1/*
  2 *  linux/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
  3 *
  4 *  eHEA ethernet device driver for IBM eServer System p
  5 *
  6 *  (C) Copyright IBM Corp. 2006
  7 *
  8 *  Authors:
  9 *	 Christoph Raisch <raisch@de.ibm.com>
 10 *	 Jan-Bernd Themann <themann@de.ibm.com>
 11 *	 Thomas Klein <tklein@de.ibm.com>
 12 *
 13 *
 14 * This program is free software; you can redistribute it and/or modify
 15 * it under the terms of the GNU General Public License as published by
 16 * the Free Software Foundation; either version 2, or (at your option)
 17 * any later version.
 18 *
 19 * This program is distributed in the hope that it will be useful,
 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
 22 * GNU General Public License for more details.
 23 *
 24 * You should have received a copy of the GNU General Public License
 25 * along with this program; if not, write to the Free Software
 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 27 */
 28
 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 30
 31#include "ehea_phyp.h"
 32
 33
 34static inline u16 get_order_of_qentries(u16 queue_entries)
 35{
 36	u8 ld = 1;		/*  logarithmus dualis */
 37	while (((1U << ld) - 1) < queue_entries)
 38		ld++;
 39	return ld - 1;
 40}
 41
 42/* Defines for H_CALL H_ALLOC_RESOURCE */
 43#define H_ALL_RES_TYPE_QP	 1
 44#define H_ALL_RES_TYPE_CQ	 2
 45#define H_ALL_RES_TYPE_EQ	 3
 46#define H_ALL_RES_TYPE_MR	 5
 47#define H_ALL_RES_TYPE_MW	 6
 48
 49static long ehea_plpar_hcall_norets(unsigned long opcode,
 50				    unsigned long arg1,
 51				    unsigned long arg2,
 52				    unsigned long arg3,
 53				    unsigned long arg4,
 54				    unsigned long arg5,
 55				    unsigned long arg6,
 56				    unsigned long arg7)
 57{
 58	long ret;
 59	int i, sleep_msecs;
 60
 61	for (i = 0; i < 5; i++) {
 62		ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
 63					 arg5, arg6, arg7);
 64
 65		if (H_IS_LONG_BUSY(ret)) {
 66			sleep_msecs = get_longbusy_msecs(ret);
 67			msleep_interruptible(sleep_msecs);
 68			continue;
 69		}
 70
 71		if (ret < H_SUCCESS)
 72			pr_err("opcode=%lx ret=%lx"
 73			       " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
 74			       " arg5=%lx arg6=%lx arg7=%lx\n",
 75			       opcode, ret,
 76			       arg1, arg2, arg3, arg4, arg5, arg6, arg7);
 77
 78		return ret;
 79	}
 80
 81	return H_BUSY;
 82}
 83
 84static long ehea_plpar_hcall9(unsigned long opcode,
 85			      unsigned long *outs, /* array of 9 outputs */
 86			      unsigned long arg1,
 87			      unsigned long arg2,
 88			      unsigned long arg3,
 89			      unsigned long arg4,
 90			      unsigned long arg5,
 91			      unsigned long arg6,
 92			      unsigned long arg7,
 93			      unsigned long arg8,
 94			      unsigned long arg9)
 95{
 96	long ret;
 97	int i, sleep_msecs;
 98	u8 cb_cat;
 99
100	for (i = 0; i < 5; i++) {
101		ret = plpar_hcall9(opcode, outs,
102				   arg1, arg2, arg3, arg4, arg5,
103				   arg6, arg7, arg8, arg9);
104
105		if (H_IS_LONG_BUSY(ret)) {
106			sleep_msecs = get_longbusy_msecs(ret);
107			msleep_interruptible(sleep_msecs);
108			continue;
109		}
110
111		cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2);
112
113		if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY)
114		    && (opcode == H_MODIFY_HEA_PORT))
115		    && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
116		    || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
117		    && (arg3 == H_PORT_CB7_DUCQPN)))))
118			pr_err("opcode=%lx ret=%lx"
119			       " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
120			       " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
121			       " arg9=%lx"
122			       " out1=%lx out2=%lx out3=%lx out4=%lx"
123			       " out5=%lx out6=%lx out7=%lx out8=%lx"
124			       " out9=%lx\n",
125			       opcode, ret,
126			       arg1, arg2, arg3, arg4, arg5,
127			       arg6, arg7, arg8, arg9,
128			       outs[0], outs[1], outs[2], outs[3], outs[4],
129			       outs[5], outs[6], outs[7], outs[8]);
130		return ret;
131	}
132
133	return H_BUSY;
134}
135
136u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
137			 const u64 qp_handle, const u64 sel_mask, void *cb_addr)
138{
139	return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
140				       adapter_handle,		/* R4 */
141				       qp_category,		/* R5 */
142				       qp_handle,		/* R6 */
143				       sel_mask,		/* R7 */
144				       __pa(cb_addr),		/* R8 */
145				       0, 0);
146}
147
148/* input param R5 */
149#define H_ALL_RES_QP_EQPO	  EHEA_BMASK_IBM(9, 11)
150#define H_ALL_RES_QP_QPP	  EHEA_BMASK_IBM(12, 12)
151#define H_ALL_RES_QP_RQR	  EHEA_BMASK_IBM(13, 15)
152#define H_ALL_RES_QP_EQEG	  EHEA_BMASK_IBM(16, 16)
153#define H_ALL_RES_QP_LL_QP	  EHEA_BMASK_IBM(17, 17)
154#define H_ALL_RES_QP_DMA128	  EHEA_BMASK_IBM(19, 19)
155#define H_ALL_RES_QP_HSM	  EHEA_BMASK_IBM(20, 21)
156#define H_ALL_RES_QP_SIGT	  EHEA_BMASK_IBM(22, 23)
157#define H_ALL_RES_QP_TENURE	  EHEA_BMASK_IBM(48, 55)
158#define H_ALL_RES_QP_RES_TYP	  EHEA_BMASK_IBM(56, 63)
159
160/* input param R9  */
161#define H_ALL_RES_QP_TOKEN	  EHEA_BMASK_IBM(0, 31)
162#define H_ALL_RES_QP_PD		  EHEA_BMASK_IBM(32, 63)
163
164/* input param R10 */
165#define H_ALL_RES_QP_MAX_SWQE	  EHEA_BMASK_IBM(4, 7)
166#define H_ALL_RES_QP_MAX_R1WQE	  EHEA_BMASK_IBM(12, 15)
167#define H_ALL_RES_QP_MAX_R2WQE	  EHEA_BMASK_IBM(20, 23)
168#define H_ALL_RES_QP_MAX_R3WQE	  EHEA_BMASK_IBM(28, 31)
169/* Max Send Scatter Gather Elements */
170#define H_ALL_RES_QP_MAX_SSGE	  EHEA_BMASK_IBM(37, 39)
171#define H_ALL_RES_QP_MAX_R1SGE	  EHEA_BMASK_IBM(45, 47)
172/* Max Receive SG Elements RQ1 */
173#define H_ALL_RES_QP_MAX_R2SGE	  EHEA_BMASK_IBM(53, 55)
174#define H_ALL_RES_QP_MAX_R3SGE	  EHEA_BMASK_IBM(61, 63)
175
176/* input param R11 */
177#define H_ALL_RES_QP_SWQE_IDL	  EHEA_BMASK_IBM(0, 7)
178/* max swqe immediate data length */
179#define H_ALL_RES_QP_PORT_NUM	  EHEA_BMASK_IBM(48, 63)
180
181/* input param R12 */
182#define H_ALL_RES_QP_TH_RQ2	  EHEA_BMASK_IBM(0, 15)
183/* Threshold RQ2 */
184#define H_ALL_RES_QP_TH_RQ3	  EHEA_BMASK_IBM(16, 31)
185/* Threshold RQ3 */
186
187/* output param R6 */
188#define H_ALL_RES_QP_ACT_SWQE	  EHEA_BMASK_IBM(0, 15)
189#define H_ALL_RES_QP_ACT_R1WQE	  EHEA_BMASK_IBM(16, 31)
190#define H_ALL_RES_QP_ACT_R2WQE	  EHEA_BMASK_IBM(32, 47)
191#define H_ALL_RES_QP_ACT_R3WQE	  EHEA_BMASK_IBM(48, 63)
192
193/* output param, R7 */
194#define H_ALL_RES_QP_ACT_SSGE	  EHEA_BMASK_IBM(0, 7)
195#define H_ALL_RES_QP_ACT_R1SGE	  EHEA_BMASK_IBM(8, 15)
196#define H_ALL_RES_QP_ACT_R2SGE	  EHEA_BMASK_IBM(16, 23)
197#define H_ALL_RES_QP_ACT_R3SGE	  EHEA_BMASK_IBM(24, 31)
198#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
199
200/* output param R8,R9 */
201#define H_ALL_RES_QP_SIZE_SQ	  EHEA_BMASK_IBM(0, 31)
202#define H_ALL_RES_QP_SIZE_RQ1	  EHEA_BMASK_IBM(32, 63)
203#define H_ALL_RES_QP_SIZE_RQ2	  EHEA_BMASK_IBM(0, 31)
204#define H_ALL_RES_QP_SIZE_RQ3	  EHEA_BMASK_IBM(32, 63)
205
206/* output param R11,R12 */
207#define H_ALL_RES_QP_LIOBN_SQ	  EHEA_BMASK_IBM(0, 31)
208#define H_ALL_RES_QP_LIOBN_RQ1	  EHEA_BMASK_IBM(32, 63)
209#define H_ALL_RES_QP_LIOBN_RQ2	  EHEA_BMASK_IBM(0, 31)
210#define H_ALL_RES_QP_LIOBN_RQ3	  EHEA_BMASK_IBM(32, 63)
211
212u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
213			     struct ehea_qp_init_attr *init_attr, const u32 pd,
214			     u64 *qp_handle, struct h_epas *h_epas)
215{
216	u64 hret;
217	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
218
219	u64 allocate_controls =
220	    EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
221	    | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
222	    | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6)	/* rq1 & rq2 & rq3 */
223	    | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0)	/* EQE gen. disabled */
224	    | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
225	    | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
226	    | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
227	    | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
228	    | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
229
230	u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
231	    | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
232
233	u64 max_r10_reg =
234	    EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
235			   get_order_of_qentries(init_attr->max_nr_send_wqes))
236	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
237			     get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
238	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
239			     get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
240	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
241			     get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
242	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
243	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
244			     init_attr->wqe_size_enc_rq1)
245	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
246			     init_attr->wqe_size_enc_rq2)
247	    | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
248			     init_attr->wqe_size_enc_rq3);
249
250	u64 r11_in =
251	    EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
252	    | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
253	u64 threshold =
254	    EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
255	    | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
256
257	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
258				 outs,
259				 adapter_handle,		/* R4 */
260				 allocate_controls,		/* R5 */
261				 init_attr->send_cq_handle,	/* R6 */
262				 init_attr->recv_cq_handle,	/* R7 */
263				 init_attr->aff_eq_handle,	/* R8 */
264				 r9_reg,			/* R9 */
265				 max_r10_reg,			/* R10 */
266				 r11_in,			/* R11 */
267				 threshold);			/* R12 */
268
269	*qp_handle = outs[0];
270	init_attr->qp_nr = (u32)outs[1];
271
272	init_attr->act_nr_send_wqes =
273	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
274	init_attr->act_nr_rwqes_rq1 =
275	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
276	init_attr->act_nr_rwqes_rq2 =
277	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
278	init_attr->act_nr_rwqes_rq3 =
279	    (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
280
281	init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
282	init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
283	init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
284	init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
285
286	init_attr->nr_sq_pages =
287	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
288	init_attr->nr_rq1_pages =
289	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
290	init_attr->nr_rq2_pages =
291	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
292	init_attr->nr_rq3_pages =
293	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
294
295	init_attr->liobn_sq =
296	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
297	init_attr->liobn_rq1 =
298	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
299	init_attr->liobn_rq2 =
300	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
301	init_attr->liobn_rq3 =
302	    (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
303
304	if (!hret)
305		hcp_epas_ctor(h_epas, outs[6], outs[6]);
306
307	return hret;
308}
309
310u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
311			     struct ehea_cq_attr *cq_attr,
312			     u64 *cq_handle, struct h_epas *epas)
313{
314	u64 hret;
315	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
316
317	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
318				 outs,
319				 adapter_handle,		/* R4 */
320				 H_ALL_RES_TYPE_CQ,		/* R5 */
321				 cq_attr->eq_handle,		/* R6 */
322				 cq_attr->cq_token,		/* R7 */
323				 cq_attr->max_nr_of_cqes,	/* R8 */
324				 0, 0, 0, 0);			/* R9-R12 */
325
326	*cq_handle = outs[0];
327	cq_attr->act_nr_of_cqes = outs[3];
328	cq_attr->nr_pages = outs[4];
329
330	if (!hret)
331		hcp_epas_ctor(epas, outs[5], outs[6]);
332
333	return hret;
334}
335
336/* Defines for H_CALL H_ALLOC_RESOURCE */
337#define H_ALL_RES_TYPE_QP	 1
338#define H_ALL_RES_TYPE_CQ	 2
339#define H_ALL_RES_TYPE_EQ	 3
340#define H_ALL_RES_TYPE_MR	 5
341#define H_ALL_RES_TYPE_MW	 6
342
343/*  input param R5 */
344#define H_ALL_RES_EQ_NEQ	     EHEA_BMASK_IBM(0, 0)
345#define H_ALL_RES_EQ_NON_NEQ_ISN     EHEA_BMASK_IBM(6, 7)
346#define H_ALL_RES_EQ_INH_EQE_GEN     EHEA_BMASK_IBM(16, 16)
347#define H_ALL_RES_EQ_RES_TYPE	     EHEA_BMASK_IBM(56, 63)
348/*  input param R6 */
349#define H_ALL_RES_EQ_MAX_EQE	     EHEA_BMASK_IBM(32, 63)
350
351/*  output param R6 */
352#define H_ALL_RES_EQ_LIOBN	     EHEA_BMASK_IBM(32, 63)
353
354/*  output param R7 */
355#define H_ALL_RES_EQ_ACT_EQE	     EHEA_BMASK_IBM(32, 63)
356
357/*  output param R8 */
358#define H_ALL_RES_EQ_ACT_PS	     EHEA_BMASK_IBM(32, 63)
359
360/*  output param R9 */
361#define H_ALL_RES_EQ_ACT_EQ_IST_C    EHEA_BMASK_IBM(30, 31)
362#define H_ALL_RES_EQ_ACT_EQ_IST_1    EHEA_BMASK_IBM(40, 63)
363
364/*  output param R10 */
365#define H_ALL_RES_EQ_ACT_EQ_IST_2    EHEA_BMASK_IBM(40, 63)
366
367/*  output param R11 */
368#define H_ALL_RES_EQ_ACT_EQ_IST_3    EHEA_BMASK_IBM(40, 63)
369
370/*  output param R12 */
371#define H_ALL_RES_EQ_ACT_EQ_IST_4    EHEA_BMASK_IBM(40, 63)
372
373u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
374			     struct ehea_eq_attr *eq_attr, u64 *eq_handle)
375{
376	u64 hret, allocate_controls;
377	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
378
379	/* resource type */
380	allocate_controls =
381	    EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
382	    | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
383	    | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
384	    | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
385
386	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
387				 outs,
388				 adapter_handle,		/* R4 */
389				 allocate_controls,		/* R5 */
390				 eq_attr->max_nr_of_eqes,	/* R6 */
391				 0, 0, 0, 0, 0, 0);		/* R7-R10 */
392
393	*eq_handle = outs[0];
394	eq_attr->act_nr_of_eqes = outs[3];
395	eq_attr->nr_pages = outs[4];
396	eq_attr->ist1 = outs[5];
397	eq_attr->ist2 = outs[6];
398	eq_attr->ist3 = outs[7];
399	eq_attr->ist4 = outs[8];
400
401	return hret;
402}
403
404u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
405			  const u64 qp_handle, const u64 sel_mask,
406			  void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
407			  u16 *out_swr, u16 *out_rwr)
408{
409	u64 hret;
410	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
411
412	hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
413				 outs,
414				 adapter_handle,		/* R4 */
415				 (u64) cat,			/* R5 */
416				 qp_handle,			/* R6 */
417				 sel_mask,			/* R7 */
418				 __pa(cb_addr),			/* R8 */
419				 0, 0, 0, 0);			/* R9-R12 */
420
421	*inv_attr_id = outs[0];
422	*out_swr = outs[3];
423	*out_rwr = outs[4];
424	*proc_mask = outs[5];
425
426	return hret;
427}
428
429u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
430			  const u8 queue_type, const u64 resource_handle,
431			  const u64 log_pageaddr, u64 count)
432{
433	u64  reg_control;
434
435	reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
436		    | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
437
438	return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
439				       adapter_handle,		/* R4 */
440				       reg_control,		/* R5 */
441				       resource_handle,		/* R6 */
442				       log_pageaddr,		/* R7 */
443				       count,			/* R8 */
444				       0, 0);			/* R9-R10 */
445}
446
447u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
448			const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
449			struct ehea_mr *mr)
450{
451	u64 hret;
452	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
453
454	hret = ehea_plpar_hcall9(H_REGISTER_SMR,
455				 outs,
456				 adapter_handle	      ,		 /* R4 */
457				 orig_mr_handle,		 /* R5 */
458				 vaddr_in,			 /* R6 */
459				 (((u64)access_ctrl) << 32ULL),	 /* R7 */
460				 pd,				 /* R8 */
461				 0, 0, 0, 0);			 /* R9-R12 */
462
463	mr->handle = outs[0];
464	mr->lkey = (u32)outs[2];
465
466	return hret;
467}
468
469u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
470{
471	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
472
473	return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
474				 outs,
475				 adapter_handle,		/* R4 */
476				 H_DISABLE_GET_EHEA_WQE_P,	/* R5 */
477				 qp_handle,			/* R6 */
478				 0, 0, 0, 0, 0, 0);		/* R7-R12 */
479}
480
481u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
482			 u64 force_bit)
483{
484	return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
485				       adapter_handle,	   /* R4 */
486				       res_handle,	   /* R5 */
487				       force_bit,
488				       0, 0, 0, 0);	   /* R7-R10 */
489}
490
491u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
492			     const u64 length, const u32 access_ctrl,
493			     const u32 pd, u64 *mr_handle, u32 *lkey)
494{
495	u64 hret;
496	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
497
498	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
499				 outs,
500				 adapter_handle,		   /* R4 */
501				 5,				   /* R5 */
502				 vaddr,				   /* R6 */
503				 length,			   /* R7 */
504				 (((u64) access_ctrl) << 32ULL),   /* R8 */
505				 pd,				   /* R9 */
506				 0, 0, 0);			   /* R10-R12 */
507
508	*mr_handle = outs[0];
509	*lkey = (u32)outs[2];
510	return hret;
511}
512
513u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
514			     const u8 pagesize, const u8 queue_type,
515			     const u64 log_pageaddr, const u64 count)
516{
517	if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
518		pr_err("not on pageboundary\n");
519		return H_PARAMETER;
520	}
521
522	return ehea_h_register_rpage(adapter_handle, pagesize,
523				     queue_type, mr_handle,
524				     log_pageaddr, count);
525}
526
527u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
528{
529	u64 hret, cb_logaddr;
530
531	cb_logaddr = __pa(cb_addr);
532
533	hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
534				       adapter_handle,		/* R4 */
535				       cb_logaddr,		/* R5 */
536				       0, 0, 0, 0, 0);		/* R6-R10 */
537#ifdef DEBUG
538	ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
539#endif
540	return hret;
541}
542
543u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
544			   const u8 cb_cat, const u64 select_mask,
545			   void *cb_addr)
546{
547	u64 port_info;
548	u64 cb_logaddr = __pa(cb_addr);
549	u64 arr_index = 0;
550
551	port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
552		  | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
553
554	return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
555				       adapter_handle,		/* R4 */
556				       port_info,		/* R5 */
557				       select_mask,		/* R6 */
558				       arr_index,		/* R7 */
559				       cb_logaddr,		/* R8 */
560				       0, 0);			/* R9-R10 */
561}
562
563u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
564			    const u8 cb_cat, const u64 select_mask,
565			    void *cb_addr)
566{
567	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
568	u64 port_info;
569	u64 arr_index = 0;
570	u64 cb_logaddr = __pa(cb_addr);
571
572	port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
573		  | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
574#ifdef DEBUG
575	ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
576#endif
577	return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
578				 outs,
579				 adapter_handle,		/* R4 */
580				 port_info,			/* R5 */
581				 select_mask,			/* R6 */
582				 arr_index,			/* R7 */
583				 cb_logaddr,			/* R8 */
584				 0, 0, 0, 0);			/* R9-R12 */
585}
586
587u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
588			  const u8 reg_type, const u64 mc_mac_addr,
589			  const u16 vlan_id, const u32 hcall_id)
590{
591	u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
592	u64 mac_addr = mc_mac_addr >> 16;
593
594	r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
595	r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
596	r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
597	r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
598
599	return ehea_plpar_hcall_norets(hcall_id,
600				       adapter_handle,		/* R4 */
601				       r5_port_num,		/* R5 */
602				       r6_reg_type,		/* R6 */
603				       r7_mc_mac_addr,		/* R7 */
604				       r8_vlan_id,		/* R8 */
605				       0, 0);			/* R9-R12 */
606}
607
608u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
609			const u64 event_mask)
610{
611	return ehea_plpar_hcall_norets(H_RESET_EVENTS,
612				       adapter_handle,		/* R4 */
613				       neq_handle,		/* R5 */
614				       event_mask,		/* R6 */
615				       0, 0, 0, 0);		/* R7-R12 */
616}
617
618u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
619		      void *rblock)
620{
621	return ehea_plpar_hcall_norets(H_ERROR_DATA,
622				       adapter_handle,		/* R4 */
623				       ressource_handle,	/* R5 */
624				       __pa(rblock),		/* R6 */
625				       0, 0, 0, 0);		/* R7-R12 */
626}