Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
4 *
5 * eHEA ethernet device driver for IBM eServer System p
6 *
7 * (C) Copyright IBM Corp. 2006
8 *
9 * Authors:
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
13 */
14
15#ifndef __EHEA_QMR_H__
16#define __EHEA_QMR_H__
17
18#include <linux/prefetch.h>
19#include "ehea.h"
20#include "ehea_hw.h"
21
22/*
23 * page size of ehea hardware queues
24 */
25
26#define EHEA_PAGESHIFT 12
27#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
28#define EHEA_SECTSIZE (1UL << 24)
29#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
30#define EHEA_HUGEPAGESHIFT 34
31#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
32#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
33
34#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
35#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
36#endif
37
38/* Some abbreviations used here:
39 *
40 * WQE - Work Queue Entry
41 * SWQE - Send Work Queue Entry
42 * RWQE - Receive Work Queue Entry
43 * CQE - Completion Queue Entry
44 * EQE - Event Queue Entry
45 * MR - Memory Region
46 */
47
48/* Use of WR_ID field for EHEA */
49#define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
50#define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
51#define EHEA_SWQE2_TYPE 0x1
52#define EHEA_SWQE3_TYPE 0x2
53#define EHEA_RWQE2_TYPE 0x3
54#define EHEA_RWQE3_TYPE 0x4
55#define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
56#define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
57
58struct ehea_vsgentry {
59 u64 vaddr;
60 u32 l_key;
61 u32 len;
62};
63
64/* maximum number of sg entries allowed in a WQE */
65#define EHEA_MAX_WQE_SG_ENTRIES 252
66#define SWQE2_MAX_IMM (0xD0 - 0x30)
67#define SWQE3_MAX_IMM 224
68
69/* tx control flags for swqe */
70#define EHEA_SWQE_CRC 0x8000
71#define EHEA_SWQE_IP_CHECKSUM 0x4000
72#define EHEA_SWQE_TCP_CHECKSUM 0x2000
73#define EHEA_SWQE_TSO 0x1000
74#define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
75#define EHEA_SWQE_VLAN_INSERT 0x0400
76#define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
77#define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
78#define EHEA_SWQE_WRAP_CTL_REC 0x0080
79#define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
80#define EHEA_SWQE_BIND 0x0020
81#define EHEA_SWQE_PURGE 0x0010
82
83/* sizeof(struct ehea_swqe) less the union */
84#define SWQE_HEADER_SIZE 32
85
86struct ehea_swqe {
87 u64 wr_id;
88 u16 tx_control;
89 u16 vlan_tag;
90 u8 reserved1;
91 u8 ip_start;
92 u8 ip_end;
93 u8 immediate_data_length;
94 u8 tcp_offset;
95 u8 reserved2;
96 u16 reserved2b;
97 u8 wrap_tag;
98 u8 descriptors; /* number of valid descriptors in WQE */
99 u16 reserved3;
100 u16 reserved4;
101 u16 mss;
102 u32 reserved5;
103 union {
104 /* Send WQE Format 1 */
105 struct {
106 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
107 } no_immediate_data;
108
109 /* Send WQE Format 2 */
110 struct {
111 struct ehea_vsgentry sg_entry;
112 /* 0x30 */
113 u8 immediate_data[SWQE2_MAX_IMM];
114 /* 0xd0 */
115 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
116 } immdata_desc __packed;
117
118 /* Send WQE Format 3 */
119 struct {
120 u8 immediate_data[SWQE3_MAX_IMM];
121 } immdata_nodesc;
122 } u;
123};
124
125struct ehea_rwqe {
126 u64 wr_id; /* work request ID */
127 u8 reserved1[5];
128 u8 data_segments;
129 u16 reserved2;
130 u64 reserved3;
131 u64 reserved4;
132 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
133};
134
135#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
136
137#define EHEA_CQE_TYPE_RQ 0x60
138#define EHEA_CQE_STAT_ERR_MASK 0x700F
139#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
140#define EHEA_CQE_BLIND_CKSUM 0x8000
141#define EHEA_CQE_STAT_ERR_TCP 0x4000
142#define EHEA_CQE_STAT_ERR_IP 0x2000
143#define EHEA_CQE_STAT_ERR_CRC 0x1000
144
145/* Defines which bad send cqe stati lead to a port reset */
146#define EHEA_CQE_STAT_RESET_MASK 0x0002
147
148struct ehea_cqe {
149 u64 wr_id; /* work request ID from WQE */
150 u8 type;
151 u8 valid;
152 u16 status;
153 u16 reserved1;
154 u16 num_bytes_transfered;
155 u16 vlan_tag;
156 u16 inet_checksum_value;
157 u8 reserved2;
158 u8 header_length;
159 u16 reserved3;
160 u16 page_offset;
161 u16 wqe_count;
162 u32 qp_token;
163 u32 timestamp;
164 u32 reserved4;
165 u64 reserved5[3];
166};
167
168#define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
169#define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
170#define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
171#define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
172#define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
173#define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
174#define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
175#define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
176#define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
177#define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
178#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
179#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
180
181#define EHEA_AER_RESTYPE_QP 0x8
182#define EHEA_AER_RESTYPE_CQ 0x4
183#define EHEA_AER_RESTYPE_EQ 0x3
184
185/* Defines which affiliated errors lead to a port reset */
186#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
187#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
188
189struct ehea_eqe {
190 u64 entry;
191};
192
193#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
194#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
195
196static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
197{
198 struct ehea_page *current_page;
199
200 if (q_offset >= queue->queue_length)
201 q_offset -= queue->queue_length;
202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
203 return ¤t_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
204}
205
206static inline void *hw_qeit_get(struct hw_queue *queue)
207{
208 return hw_qeit_calc(queue, queue->current_q_offset);
209}
210
211static inline void hw_qeit_inc(struct hw_queue *queue)
212{
213 queue->current_q_offset += queue->qe_size;
214 if (queue->current_q_offset >= queue->queue_length) {
215 queue->current_q_offset = 0;
216 /* toggle the valid flag */
217 queue->toggle_state = (~queue->toggle_state) & 1;
218 }
219}
220
221static inline void *hw_qeit_get_inc(struct hw_queue *queue)
222{
223 void *retvalue = hw_qeit_get(queue);
224 hw_qeit_inc(queue);
225 return retvalue;
226}
227
228static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
229{
230 struct ehea_cqe *retvalue = hw_qeit_get(queue);
231 u8 valid = retvalue->valid;
232 void *pref;
233
234 if ((valid >> 7) == (queue->toggle_state & 1)) {
235 /* this is a good one */
236 hw_qeit_inc(queue);
237 pref = hw_qeit_calc(queue, queue->current_q_offset);
238 prefetch(pref);
239 prefetch(pref + 128);
240 } else
241 retvalue = NULL;
242 return retvalue;
243}
244
245static inline void *hw_qeit_get_valid(struct hw_queue *queue)
246{
247 struct ehea_cqe *retvalue = hw_qeit_get(queue);
248 void *pref;
249 u8 valid;
250
251 pref = hw_qeit_calc(queue, queue->current_q_offset);
252 prefetch(pref);
253 prefetch(pref + 128);
254 prefetch(pref + 256);
255 valid = retvalue->valid;
256 if (!((valid >> 7) == (queue->toggle_state & 1)))
257 retvalue = NULL;
258 return retvalue;
259}
260
261static inline void *hw_qeit_reset(struct hw_queue *queue)
262{
263 queue->current_q_offset = 0;
264 return hw_qeit_get(queue);
265}
266
267static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
268{
269 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
270 void *retvalue;
271
272 retvalue = hw_qeit_get(queue);
273 queue->current_q_offset += queue->qe_size;
274 if (queue->current_q_offset > last_entry_in_q) {
275 queue->current_q_offset = 0;
276 queue->toggle_state = (~queue->toggle_state) & 1;
277 }
278 return retvalue;
279}
280
281static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
282{
283 void *retvalue = hw_qeit_get(queue);
284 u32 qe = *(u8 *)retvalue;
285 if ((qe >> 7) == (queue->toggle_state & 1))
286 hw_qeit_eq_get_inc(queue);
287 else
288 retvalue = NULL;
289 return retvalue;
290}
291
292static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
293 int rq_nr)
294{
295 struct hw_queue *queue;
296
297 if (rq_nr == 1)
298 queue = &qp->hw_rqueue1;
299 else if (rq_nr == 2)
300 queue = &qp->hw_rqueue2;
301 else
302 queue = &qp->hw_rqueue3;
303
304 return hw_qeit_get_inc(queue);
305}
306
307static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
308 int *wqe_index)
309{
310 struct hw_queue *queue = &my_qp->hw_squeue;
311 struct ehea_swqe *wqe_p;
312
313 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
314 wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
315
316 return wqe_p;
317}
318
319static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
320{
321 iosync();
322 ehea_update_sqa(my_qp, 1);
323}
324
325static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
326{
327 struct hw_queue *queue = &qp->hw_rqueue1;
328
329 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
330 return hw_qeit_get_valid(queue);
331}
332
333static inline void ehea_inc_cq(struct ehea_cq *cq)
334{
335 hw_qeit_inc(&cq->hw_queue);
336}
337
338static inline void ehea_inc_rq1(struct ehea_qp *qp)
339{
340 hw_qeit_inc(&qp->hw_rqueue1);
341}
342
343static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
344{
345 return hw_qeit_get_valid(&my_cq->hw_queue);
346}
347
348#define EHEA_CQ_REGISTER_ORIG 0
349#define EHEA_EQ_REGISTER_ORIG 0
350
351enum ehea_eq_type {
352 EHEA_EQ = 0, /* event queue */
353 EHEA_NEQ /* notification event queue */
354};
355
356struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
357 enum ehea_eq_type type,
358 const u32 length, const u8 eqe_gen);
359
360int ehea_destroy_eq(struct ehea_eq *eq);
361
362struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
363
364struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
365 u64 eq_handle, u32 cq_token);
366
367int ehea_destroy_cq(struct ehea_cq *cq);
368
369struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
370 struct ehea_qp_init_attr *init_attr);
371
372int ehea_destroy_qp(struct ehea_qp *qp);
373
374int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
375
376int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
377 struct ehea_mr *shared_mr);
378
379int ehea_rem_mr(struct ehea_mr *mr);
380
381u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
382 u64 *aer, u64 *aerr);
383
384int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
385int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
386int ehea_create_busmap(void);
387void ehea_destroy_busmap(void);
388u64 ehea_map_vaddr(void *caddr);
389
390#endif /* __EHEA_QMR_H__ */
1/*
2 * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __EHEA_QMR_H__
30#define __EHEA_QMR_H__
31
32#include <linux/prefetch.h>
33#include "ehea.h"
34#include "ehea_hw.h"
35
36/*
37 * page size of ehea hardware queues
38 */
39
40#define EHEA_PAGESHIFT 12
41#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
42#define EHEA_SECTSIZE (1UL << 24)
43#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
44#define EHEA_HUGEPAGESHIFT 34
45#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
46#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
47
48#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
49#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
50#endif
51
52/* Some abbreviations used here:
53 *
54 * WQE - Work Queue Entry
55 * SWQE - Send Work Queue Entry
56 * RWQE - Receive Work Queue Entry
57 * CQE - Completion Queue Entry
58 * EQE - Event Queue Entry
59 * MR - Memory Region
60 */
61
62/* Use of WR_ID field for EHEA */
63#define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
64#define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
65#define EHEA_SWQE2_TYPE 0x1
66#define EHEA_SWQE3_TYPE 0x2
67#define EHEA_RWQE2_TYPE 0x3
68#define EHEA_RWQE3_TYPE 0x4
69#define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
70#define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
71
72struct ehea_vsgentry {
73 u64 vaddr;
74 u32 l_key;
75 u32 len;
76};
77
78/* maximum number of sg entries allowed in a WQE */
79#define EHEA_MAX_WQE_SG_ENTRIES 252
80#define SWQE2_MAX_IMM (0xD0 - 0x30)
81#define SWQE3_MAX_IMM 224
82
83/* tx control flags for swqe */
84#define EHEA_SWQE_CRC 0x8000
85#define EHEA_SWQE_IP_CHECKSUM 0x4000
86#define EHEA_SWQE_TCP_CHECKSUM 0x2000
87#define EHEA_SWQE_TSO 0x1000
88#define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
89#define EHEA_SWQE_VLAN_INSERT 0x0400
90#define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
91#define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
92#define EHEA_SWQE_WRAP_CTL_REC 0x0080
93#define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
94#define EHEA_SWQE_BIND 0x0020
95#define EHEA_SWQE_PURGE 0x0010
96
97/* sizeof(struct ehea_swqe) less the union */
98#define SWQE_HEADER_SIZE 32
99
100struct ehea_swqe {
101 u64 wr_id;
102 u16 tx_control;
103 u16 vlan_tag;
104 u8 reserved1;
105 u8 ip_start;
106 u8 ip_end;
107 u8 immediate_data_length;
108 u8 tcp_offset;
109 u8 reserved2;
110 u16 reserved2b;
111 u8 wrap_tag;
112 u8 descriptors; /* number of valid descriptors in WQE */
113 u16 reserved3;
114 u16 reserved4;
115 u16 mss;
116 u32 reserved5;
117 union {
118 /* Send WQE Format 1 */
119 struct {
120 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
121 } no_immediate_data;
122
123 /* Send WQE Format 2 */
124 struct {
125 struct ehea_vsgentry sg_entry;
126 /* 0x30 */
127 u8 immediate_data[SWQE2_MAX_IMM];
128 /* 0xd0 */
129 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
130 } immdata_desc __packed;
131
132 /* Send WQE Format 3 */
133 struct {
134 u8 immediate_data[SWQE3_MAX_IMM];
135 } immdata_nodesc;
136 } u;
137};
138
139struct ehea_rwqe {
140 u64 wr_id; /* work request ID */
141 u8 reserved1[5];
142 u8 data_segments;
143 u16 reserved2;
144 u64 reserved3;
145 u64 reserved4;
146 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
147};
148
149#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
150
151#define EHEA_CQE_TYPE_RQ 0x60
152#define EHEA_CQE_STAT_ERR_MASK 0x700F
153#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
154#define EHEA_CQE_BLIND_CKSUM 0x8000
155#define EHEA_CQE_STAT_ERR_TCP 0x4000
156#define EHEA_CQE_STAT_ERR_IP 0x2000
157#define EHEA_CQE_STAT_ERR_CRC 0x1000
158
159/* Defines which bad send cqe stati lead to a port reset */
160#define EHEA_CQE_STAT_RESET_MASK 0x0002
161
162struct ehea_cqe {
163 u64 wr_id; /* work request ID from WQE */
164 u8 type;
165 u8 valid;
166 u16 status;
167 u16 reserved1;
168 u16 num_bytes_transfered;
169 u16 vlan_tag;
170 u16 inet_checksum_value;
171 u8 reserved2;
172 u8 header_length;
173 u16 reserved3;
174 u16 page_offset;
175 u16 wqe_count;
176 u32 qp_token;
177 u32 timestamp;
178 u32 reserved4;
179 u64 reserved5[3];
180};
181
182#define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
183#define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
184#define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
185#define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
186#define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
187#define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
188#define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
189#define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
190#define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
191#define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
192#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
193#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
194
195#define EHEA_AER_RESTYPE_QP 0x8
196#define EHEA_AER_RESTYPE_CQ 0x4
197#define EHEA_AER_RESTYPE_EQ 0x3
198
199/* Defines which affiliated errors lead to a port reset */
200#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
201#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
202
203struct ehea_eqe {
204 u64 entry;
205};
206
207#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
208#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
209
210static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
211{
212 struct ehea_page *current_page;
213
214 if (q_offset >= queue->queue_length)
215 q_offset -= queue->queue_length;
216 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
217 return ¤t_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
218}
219
220static inline void *hw_qeit_get(struct hw_queue *queue)
221{
222 return hw_qeit_calc(queue, queue->current_q_offset);
223}
224
225static inline void hw_qeit_inc(struct hw_queue *queue)
226{
227 queue->current_q_offset += queue->qe_size;
228 if (queue->current_q_offset >= queue->queue_length) {
229 queue->current_q_offset = 0;
230 /* toggle the valid flag */
231 queue->toggle_state = (~queue->toggle_state) & 1;
232 }
233}
234
235static inline void *hw_qeit_get_inc(struct hw_queue *queue)
236{
237 void *retvalue = hw_qeit_get(queue);
238 hw_qeit_inc(queue);
239 return retvalue;
240}
241
242static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
243{
244 struct ehea_cqe *retvalue = hw_qeit_get(queue);
245 u8 valid = retvalue->valid;
246 void *pref;
247
248 if ((valid >> 7) == (queue->toggle_state & 1)) {
249 /* this is a good one */
250 hw_qeit_inc(queue);
251 pref = hw_qeit_calc(queue, queue->current_q_offset);
252 prefetch(pref);
253 prefetch(pref + 128);
254 } else
255 retvalue = NULL;
256 return retvalue;
257}
258
259static inline void *hw_qeit_get_valid(struct hw_queue *queue)
260{
261 struct ehea_cqe *retvalue = hw_qeit_get(queue);
262 void *pref;
263 u8 valid;
264
265 pref = hw_qeit_calc(queue, queue->current_q_offset);
266 prefetch(pref);
267 prefetch(pref + 128);
268 prefetch(pref + 256);
269 valid = retvalue->valid;
270 if (!((valid >> 7) == (queue->toggle_state & 1)))
271 retvalue = NULL;
272 return retvalue;
273}
274
275static inline void *hw_qeit_reset(struct hw_queue *queue)
276{
277 queue->current_q_offset = 0;
278 return hw_qeit_get(queue);
279}
280
281static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
282{
283 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
284 void *retvalue;
285
286 retvalue = hw_qeit_get(queue);
287 queue->current_q_offset += queue->qe_size;
288 if (queue->current_q_offset > last_entry_in_q) {
289 queue->current_q_offset = 0;
290 queue->toggle_state = (~queue->toggle_state) & 1;
291 }
292 return retvalue;
293}
294
295static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
296{
297 void *retvalue = hw_qeit_get(queue);
298 u32 qe = *(u8 *)retvalue;
299 if ((qe >> 7) == (queue->toggle_state & 1))
300 hw_qeit_eq_get_inc(queue);
301 else
302 retvalue = NULL;
303 return retvalue;
304}
305
306static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
307 int rq_nr)
308{
309 struct hw_queue *queue;
310
311 if (rq_nr == 1)
312 queue = &qp->hw_rqueue1;
313 else if (rq_nr == 2)
314 queue = &qp->hw_rqueue2;
315 else
316 queue = &qp->hw_rqueue3;
317
318 return hw_qeit_get_inc(queue);
319}
320
321static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
322 int *wqe_index)
323{
324 struct hw_queue *queue = &my_qp->hw_squeue;
325 struct ehea_swqe *wqe_p;
326
327 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
328 wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
329
330 return wqe_p;
331}
332
333static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
334{
335 iosync();
336 ehea_update_sqa(my_qp, 1);
337}
338
339static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
340{
341 struct hw_queue *queue = &qp->hw_rqueue1;
342
343 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
344 return hw_qeit_get_valid(queue);
345}
346
347static inline void ehea_inc_cq(struct ehea_cq *cq)
348{
349 hw_qeit_inc(&cq->hw_queue);
350}
351
352static inline void ehea_inc_rq1(struct ehea_qp *qp)
353{
354 hw_qeit_inc(&qp->hw_rqueue1);
355}
356
357static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
358{
359 return hw_qeit_get_valid(&my_cq->hw_queue);
360}
361
362#define EHEA_CQ_REGISTER_ORIG 0
363#define EHEA_EQ_REGISTER_ORIG 0
364
365enum ehea_eq_type {
366 EHEA_EQ = 0, /* event queue */
367 EHEA_NEQ /* notification event queue */
368};
369
370struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
371 enum ehea_eq_type type,
372 const u32 length, const u8 eqe_gen);
373
374int ehea_destroy_eq(struct ehea_eq *eq);
375
376struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
377
378struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
379 u64 eq_handle, u32 cq_token);
380
381int ehea_destroy_cq(struct ehea_cq *cq);
382
383struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
384 struct ehea_qp_init_attr *init_attr);
385
386int ehea_destroy_qp(struct ehea_qp *qp);
387
388int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
389
390int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
391 struct ehea_mr *shared_mr);
392
393int ehea_rem_mr(struct ehea_mr *mr);
394
395u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
396 u64 *aer, u64 *aerr);
397
398int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
399int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
400int ehea_create_busmap(void);
401void ehea_destroy_busmap(void);
402u64 ehea_map_vaddr(void *caddr);
403
404#endif /* __EHEA_QMR_H__ */