Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
  3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
  4 *
  5 * This program is free software; you may redistribute it and/or modify
  6 * it under the terms of the GNU General Public License as published by
  7 * the Free Software Foundation; version 2 of the License.
  8 *
  9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 16 * SOFTWARE.
 17 *
 18 */
 19
 20#ifndef _VNIC_RQ_H_
 21#define _VNIC_RQ_H_
 22
 23#include <linux/pci.h>
 24#include <linux/netdevice.h>
 25
 26#include "vnic_dev.h"
 27#include "vnic_cq.h"
 28
 29/* Receive queue control */
 30struct vnic_rq_ctrl {
 31	u64 ring_base;			/* 0x00 */
 32	u32 ring_size;			/* 0x08 */
 33	u32 pad0;
 34	u32 posted_index;		/* 0x10 */
 35	u32 pad1;
 36	u32 cq_index;			/* 0x18 */
 37	u32 pad2;
 38	u32 enable;			/* 0x20 */
 39	u32 pad3;
 40	u32 running;			/* 0x28 */
 41	u32 pad4;
 42	u32 fetch_index;		/* 0x30 */
 43	u32 pad5;
 44	u32 error_interrupt_enable;	/* 0x38 */
 45	u32 pad6;
 46	u32 error_interrupt_offset;	/* 0x40 */
 47	u32 pad7;
 48	u32 error_status;		/* 0x48 */
 49	u32 pad8;
 50	u32 dropped_packet_count;	/* 0x50 */
 51	u32 pad9;
 52	u32 dropped_packet_count_rc;	/* 0x58 */
 53	u32 pad10;
 54};
 55
 56/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
 57#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
 58#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
 59#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
 60	((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
 61	VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
 62#define VNIC_RQ_BUF_BLK_SZ(entries) \
 63	(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
 64#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
 65	DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
 66#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
 67
 68struct vnic_rq_buf {
 69	struct vnic_rq_buf *next;
 70	dma_addr_t dma_addr;
 71	void *os_buf;
 72	unsigned int os_buf_index;
 73	unsigned int len;
 74	unsigned int index;
 75	void *desc;
 76	uint64_t wr_id;
 77};
 78
 79enum enic_poll_state {
 80	ENIC_POLL_STATE_IDLE,
 81	ENIC_POLL_STATE_NAPI,
 82	ENIC_POLL_STATE_POLL
 83};
 84
 85struct vnic_rq {
 86	unsigned int index;
 87	struct vnic_dev *vdev;
 88	struct vnic_rq_ctrl __iomem *ctrl;              /* memory-mapped */
 89	struct vnic_dev_ring ring;
 90	struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
 91	struct vnic_rq_buf *to_use;
 92	struct vnic_rq_buf *to_clean;
 93	void *os_buf_head;
 94	unsigned int pkts_outstanding;
 95#ifdef CONFIG_NET_RX_BUSY_POLL
 96	atomic_t bpoll_state;
 97#endif /* CONFIG_NET_RX_BUSY_POLL */
 98};
 99
100static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
101{
102	/* how many does SW own? */
103	return rq->ring.desc_avail;
104}
105
106static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
107{
108	/* how many does HW own? */
109	return rq->ring.desc_count - rq->ring.desc_avail - 1;
110}
111
112static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
113{
114	return rq->to_use->desc;
115}
116
117static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
118{
119	return rq->to_use->index;
120}
121
122static inline void vnic_rq_post(struct vnic_rq *rq,
123	void *os_buf, unsigned int os_buf_index,
124	dma_addr_t dma_addr, unsigned int len,
125	uint64_t wrid)
126{
127	struct vnic_rq_buf *buf = rq->to_use;
128
129	buf->os_buf = os_buf;
130	buf->os_buf_index = os_buf_index;
131	buf->dma_addr = dma_addr;
132	buf->len = len;
133	buf->wr_id = wrid;
134
135	buf = buf->next;
136	rq->to_use = buf;
137	rq->ring.desc_avail--;
138
139	/* Move the posted_index every nth descriptor
140	 */
141
142#ifndef VNIC_RQ_RETURN_RATE
143#define VNIC_RQ_RETURN_RATE		0xf	/* keep 2^n - 1 */
144#endif
145
146	if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
147		/* Adding write memory barrier prevents compiler and/or CPU
148		 * reordering, thus avoiding descriptor posting before
149		 * descriptor is initialized. Otherwise, hardware can read
150		 * stale descriptor fields.
151		 */
152		wmb();
153		iowrite32(buf->index, &rq->ctrl->posted_index);
154	}
155}
156
157static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
158{
159	rq->ring.desc_avail += count;
160}
161
162enum desc_return_options {
163	VNIC_RQ_RETURN_DESC,
164	VNIC_RQ_DEFER_RETURN_DESC,
165};
166
167static inline void vnic_rq_service(struct vnic_rq *rq,
168	struct cq_desc *cq_desc, u16 completed_index,
169	int desc_return, void (*buf_service)(struct vnic_rq *rq,
170	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
171	int skipped, void *opaque), void *opaque)
172{
173	struct vnic_rq_buf *buf;
174	int skipped;
175
176	buf = rq->to_clean;
177	while (1) {
178
179		skipped = (buf->index != completed_index);
180
181		(*buf_service)(rq, cq_desc, buf, skipped, opaque);
182
183		if (desc_return == VNIC_RQ_RETURN_DESC)
184			rq->ring.desc_avail++;
185
186		rq->to_clean = buf->next;
187
188		if (!skipped)
189			break;
190
191		buf = rq->to_clean;
192	}
193}
194
195static inline int vnic_rq_fill(struct vnic_rq *rq,
196	int (*buf_fill)(struct vnic_rq *rq))
197{
198	int err;
199
200	while (vnic_rq_desc_avail(rq) > 0) {
201
202		err = (*buf_fill)(rq);
203		if (err)
204			return err;
205	}
206
207	return 0;
208}
209
210#ifdef CONFIG_NET_RX_BUSY_POLL
211static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
212{
213	atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
214}
215
216static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
217{
218	int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
219				ENIC_POLL_STATE_NAPI);
220
221	return (rc == ENIC_POLL_STATE_IDLE);
222}
223
224static inline void enic_poll_unlock_napi(struct vnic_rq *rq,
225					 struct napi_struct *napi)
226{
227	WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI);
228	napi_gro_flush(napi, false);
229	atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
230}
231
232static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
233{
234	int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
235				ENIC_POLL_STATE_POLL);
236
237	return (rc == ENIC_POLL_STATE_IDLE);
238}
239
240
241static inline void enic_poll_unlock_poll(struct vnic_rq *rq)
242{
243	WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL);
244	atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
245}
246
247static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
248{
249	return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL;
250}
251
252#else
253
254static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
255{
256}
257
258static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
259{
260	return true;
261}
262
263static inline bool enic_poll_unlock_napi(struct vnic_rq *rq,
264					 struct napi_struct *napi)
265{
266	return false;
267}
268
269static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
270{
271	return false;
272}
273
274static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
275{
276	return false;
277}
278
279static inline bool enic_poll_ll_polling(struct vnic_rq *rq)
280{
281	return false;
282}
283#endif /* CONFIG_NET_RX_BUSY_POLL */
284
285void vnic_rq_free(struct vnic_rq *rq);
286int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
287	unsigned int desc_count, unsigned int desc_size);
288void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
289	unsigned int error_interrupt_enable,
290	unsigned int error_interrupt_offset);
291unsigned int vnic_rq_error_status(struct vnic_rq *rq);
292void vnic_rq_enable(struct vnic_rq *rq);
293int vnic_rq_disable(struct vnic_rq *rq);
294void vnic_rq_clean(struct vnic_rq *rq,
295	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
296
297#endif /* _VNIC_RQ_H_ */