Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2/*
  3 * Copyright(c) 2017 - 2018 Intel Corporation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6/*
  7 * This file contains HFI1 support for VNIC SDMA functionality
  8 */
  9
 10#include "sdma.h"
 11#include "vnic.h"
 12
 13#define HFI1_VNIC_SDMA_Q_ACTIVE   BIT(0)
 14#define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1)
 15
 16#define HFI1_VNIC_TXREQ_NAME_LEN   32
 17#define HFI1_VNIC_SDMA_DESC_WTRMRK 64
 18
 19/*
 20 * struct vnic_txreq - VNIC transmit descriptor
 21 * @txreq: sdma transmit request
 22 * @sdma: vnic sdma pointer
 23 * @skb: skb to send
 24 * @pad: pad buffer
 25 * @plen: pad length
 26 * @pbc_val: pbc value
 27 */
 28struct vnic_txreq {
 29	struct sdma_txreq       txreq;
 30	struct hfi1_vnic_sdma   *sdma;
 31
 32	struct sk_buff         *skb;
 33	unsigned char           pad[HFI1_VNIC_MAX_PAD];
 34	u16                     plen;
 35	__le64                  pbc_val;
 36};
 37
 38static void vnic_sdma_complete(struct sdma_txreq *txreq,
 39			       int status)
 40{
 41	struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
 42	struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
 43
 44	sdma_txclean(vnic_sdma->dd, txreq);
 45	dev_kfree_skb_any(tx->skb);
 46	kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
 47}
 48
 49static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
 50					   struct vnic_txreq *tx)
 51{
 52	int i, ret = 0;
 53
 54	ret = sdma_txadd_kvaddr(
 55		sde->dd,
 56		&tx->txreq,
 57		tx->skb->data,
 58		skb_headlen(tx->skb));
 59	if (unlikely(ret))
 60		goto bail_txadd;
 61
 62	for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
 63		skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
 64
 65		/* combine physically continuous fragments later? */
 66		ret = sdma_txadd_page(sde->dd,
 67				      &tx->txreq,
 68				      skb_frag_page(frag),
 69				      skb_frag_off(frag),
 70				      skb_frag_size(frag),
 71				      NULL, NULL, NULL);
 72		if (unlikely(ret))
 73			goto bail_txadd;
 74	}
 75
 76	if (tx->plen)
 77		ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
 78					tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
 79					tx->plen);
 80
 81bail_txadd:
 82	return ret;
 83}
 84
 85static int build_vnic_tx_desc(struct sdma_engine *sde,
 86			      struct vnic_txreq *tx,
 87			      u64 pbc)
 88{
 89	int ret = 0;
 90	u16 hdrbytes = 2 << 2;  /* PBC */
 91
 92	ret = sdma_txinit_ahg(
 93		&tx->txreq,
 94		0,
 95		hdrbytes + tx->skb->len + tx->plen,
 96		0,
 97		0,
 98		NULL,
 99		0,
100		vnic_sdma_complete);
101	if (unlikely(ret))
102		goto bail_txadd;
103
104	/* add pbc */
105	tx->pbc_val = cpu_to_le64(pbc);
106	ret = sdma_txadd_kvaddr(
107		sde->dd,
108		&tx->txreq,
109		&tx->pbc_val,
110		hdrbytes);
111	if (unlikely(ret))
112		goto bail_txadd;
113
114	/* add the ulp payload */
115	ret = build_vnic_ulp_payload(sde, tx);
116bail_txadd:
117	return ret;
118}
119
120/* setup the last plen bypes of pad */
121static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen)
122{
123	pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN;
124}
125
126int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
127		       struct hfi1_vnic_vport_info *vinfo,
128		       struct sk_buff *skb, u64 pbc, u8 plen)
129{
130	struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
131	struct sdma_engine *sde = vnic_sdma->sde;
132	struct vnic_txreq *tx;
133	int ret = -ECOMM;
134
135	if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE))
136		goto tx_err;
137
138	if (unlikely(!sde || !sdma_running(sde)))
139		goto tx_err;
140
141	tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
142	if (unlikely(!tx)) {
143		ret = -ENOMEM;
144		goto tx_err;
145	}
146
147	tx->sdma = vnic_sdma;
148	tx->skb = skb;
149	hfi1_vnic_update_pad(tx->pad, plen);
150	tx->plen = plen;
151	ret = build_vnic_tx_desc(sde, tx, pbc);
152	if (unlikely(ret))
153		goto free_desc;
154
155	ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
156			      &tx->txreq, vnic_sdma->pkts_sent);
157	/* When -ECOMM, sdma callback will be called with ABORT status */
158	if (unlikely(ret && unlikely(ret != -ECOMM)))
159		goto free_desc;
160
161	if (!ret) {
162		vnic_sdma->pkts_sent = true;
163		iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait);
164	}
165	return ret;
166
167free_desc:
168	sdma_txclean(dd, &tx->txreq);
169	kmem_cache_free(dd->vnic.txreq_cache, tx);
170tx_err:
171	if (ret != -EBUSY)
172		dev_kfree_skb_any(skb);
173	else
174		vnic_sdma->pkts_sent = false;
175	return ret;
176}
177
178/*
179 * hfi1_vnic_sdma_sleep - vnic sdma sleep function
180 *
181 * This function gets called from sdma_send_txreq() when there are not enough
182 * sdma descriptors available to send the packet. It adds Tx queue's wait
183 * structure to sdma engine's dmawait list to be woken up when descriptors
184 * become available.
185 */
186static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
187				struct iowait_work *wait,
188				struct sdma_txreq *txreq,
189				uint seq,
190				bool pkts_sent)
191{
192	struct hfi1_vnic_sdma *vnic_sdma =
193		container_of(wait->iow, struct hfi1_vnic_sdma, wait);
194
195	write_seqlock(&sde->waitlock);
196	if (sdma_progress(sde, seq, txreq)) {
197		write_sequnlock(&sde->waitlock);
198		return -EAGAIN;
199	}
200
201	vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
202	if (list_empty(&vnic_sdma->wait.list)) {
203		iowait_get_priority(wait->iow);
204		iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
205	}
206	write_sequnlock(&sde->waitlock);
207	return -EBUSY;
208}
209
210/*
211 * hfi1_vnic_sdma_wakeup - vnic sdma wakeup function
212 *
213 * This function gets called when SDMA descriptors becomes available and Tx
214 * queue's wait structure was previously added to sdma engine's dmawait list.
215 * It notifies the upper driver about Tx queue wakeup.
216 */
217static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason)
218{
219	struct hfi1_vnic_sdma *vnic_sdma =
220		container_of(wait, struct hfi1_vnic_sdma, wait);
221	struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo;
222
223	vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
224	if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx))
225		netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx);
226};
227
228inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
229				       u8 q_idx)
230{
231	struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
232
233	return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE);
234}
235
236void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
237{
238	int i;
239
240	for (i = 0; i < vinfo->num_tx_q; i++) {
241		struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
242
243		iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
244			    hfi1_vnic_sdma_sleep,
245			    hfi1_vnic_sdma_wakeup, NULL, NULL);
246		vnic_sdma->sde = &vinfo->dd->per_sdma[i];
247		vnic_sdma->dd = vinfo->dd;
248		vnic_sdma->vinfo = vinfo;
249		vnic_sdma->q_idx = i;
250		vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
251
252		/* Add a free descriptor watermark for wakeups */
253		if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
254			struct iowait_work *work;
255
256			INIT_LIST_HEAD(&vnic_sdma->stx.list);
257			vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
258			work = iowait_get_ib_work(&vnic_sdma->wait);
259			list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
260		}
261	}
262}
263
264int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
265{
266	char buf[HFI1_VNIC_TXREQ_NAME_LEN];
267
268	snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
269	dd->vnic.txreq_cache = kmem_cache_create(buf,
270						 sizeof(struct vnic_txreq),
271						 0, SLAB_HWCACHE_ALIGN,
272						 NULL);
273	if (!dd->vnic.txreq_cache)
274		return -ENOMEM;
275	return 0;
276}
277
278void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
279{
280	kmem_cache_destroy(dd->vnic.txreq_cache);
281	dd->vnic.txreq_cache = NULL;
282}
v5.4
 
  1/*
  2 * Copyright(c) 2017 - 2018 Intel Corporation.
  3 *
  4 * This file is provided under a dual BSD/GPLv2 license.  When using or
  5 * redistributing this file, you may do so under either license.
  6 *
  7 * GPL LICENSE SUMMARY
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of version 2 of the GNU General Public License as
 11 * published by the Free Software Foundation.
 12 *
 13 * This program is distributed in the hope that it will be useful, but
 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 16 * General Public License for more details.
 17 *
 18 * BSD LICENSE
 19 *
 20 * Redistribution and use in source and binary forms, with or without
 21 * modification, are permitted provided that the following conditions
 22 * are met:
 23 *
 24 *  - Redistributions of source code must retain the above copyright
 25 *    notice, this list of conditions and the following disclaimer.
 26 *  - Redistributions in binary form must reproduce the above copyright
 27 *    notice, this list of conditions and the following disclaimer in
 28 *    the documentation and/or other materials provided with the
 29 *    distribution.
 30 *  - Neither the name of Intel Corporation nor the names of its
 31 *    contributors may be used to endorse or promote products derived
 32 *    from this software without specific prior written permission.
 33 *
 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 45 *
 46 */
 47
 48/*
 49 * This file contains HFI1 support for VNIC SDMA functionality
 50 */
 51
 52#include "sdma.h"
 53#include "vnic.h"
 54
 55#define HFI1_VNIC_SDMA_Q_ACTIVE   BIT(0)
 56#define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1)
 57
 58#define HFI1_VNIC_TXREQ_NAME_LEN   32
 59#define HFI1_VNIC_SDMA_DESC_WTRMRK 64
 60
 61/*
 62 * struct vnic_txreq - VNIC transmit descriptor
 63 * @txreq: sdma transmit request
 64 * @sdma: vnic sdma pointer
 65 * @skb: skb to send
 66 * @pad: pad buffer
 67 * @plen: pad length
 68 * @pbc_val: pbc value
 69 */
 70struct vnic_txreq {
 71	struct sdma_txreq       txreq;
 72	struct hfi1_vnic_sdma   *sdma;
 73
 74	struct sk_buff         *skb;
 75	unsigned char           pad[HFI1_VNIC_MAX_PAD];
 76	u16                     plen;
 77	__le64                  pbc_val;
 78};
 79
 80static void vnic_sdma_complete(struct sdma_txreq *txreq,
 81			       int status)
 82{
 83	struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
 84	struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
 85
 86	sdma_txclean(vnic_sdma->dd, txreq);
 87	dev_kfree_skb_any(tx->skb);
 88	kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
 89}
 90
 91static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
 92					   struct vnic_txreq *tx)
 93{
 94	int i, ret = 0;
 95
 96	ret = sdma_txadd_kvaddr(
 97		sde->dd,
 98		&tx->txreq,
 99		tx->skb->data,
100		skb_headlen(tx->skb));
101	if (unlikely(ret))
102		goto bail_txadd;
103
104	for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
105		skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
106
107		/* combine physically continuous fragments later? */
108		ret = sdma_txadd_page(sde->dd,
109				      &tx->txreq,
110				      skb_frag_page(frag),
111				      skb_frag_off(frag),
112				      skb_frag_size(frag));
 
113		if (unlikely(ret))
114			goto bail_txadd;
115	}
116
117	if (tx->plen)
118		ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
119					tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
120					tx->plen);
121
122bail_txadd:
123	return ret;
124}
125
126static int build_vnic_tx_desc(struct sdma_engine *sde,
127			      struct vnic_txreq *tx,
128			      u64 pbc)
129{
130	int ret = 0;
131	u16 hdrbytes = 2 << 2;  /* PBC */
132
133	ret = sdma_txinit_ahg(
134		&tx->txreq,
135		0,
136		hdrbytes + tx->skb->len + tx->plen,
137		0,
138		0,
139		NULL,
140		0,
141		vnic_sdma_complete);
142	if (unlikely(ret))
143		goto bail_txadd;
144
145	/* add pbc */
146	tx->pbc_val = cpu_to_le64(pbc);
147	ret = sdma_txadd_kvaddr(
148		sde->dd,
149		&tx->txreq,
150		&tx->pbc_val,
151		hdrbytes);
152	if (unlikely(ret))
153		goto bail_txadd;
154
155	/* add the ulp payload */
156	ret = build_vnic_ulp_payload(sde, tx);
157bail_txadd:
158	return ret;
159}
160
161/* setup the last plen bypes of pad */
162static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen)
163{
164	pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN;
165}
166
167int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
168		       struct hfi1_vnic_vport_info *vinfo,
169		       struct sk_buff *skb, u64 pbc, u8 plen)
170{
171	struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
172	struct sdma_engine *sde = vnic_sdma->sde;
173	struct vnic_txreq *tx;
174	int ret = -ECOMM;
175
176	if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE))
177		goto tx_err;
178
179	if (unlikely(!sde || !sdma_running(sde)))
180		goto tx_err;
181
182	tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
183	if (unlikely(!tx)) {
184		ret = -ENOMEM;
185		goto tx_err;
186	}
187
188	tx->sdma = vnic_sdma;
189	tx->skb = skb;
190	hfi1_vnic_update_pad(tx->pad, plen);
191	tx->plen = plen;
192	ret = build_vnic_tx_desc(sde, tx, pbc);
193	if (unlikely(ret))
194		goto free_desc;
195
196	ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
197			      &tx->txreq, vnic_sdma->pkts_sent);
198	/* When -ECOMM, sdma callback will be called with ABORT status */
199	if (unlikely(ret && unlikely(ret != -ECOMM)))
200		goto free_desc;
201
202	if (!ret) {
203		vnic_sdma->pkts_sent = true;
204		iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait);
205	}
206	return ret;
207
208free_desc:
209	sdma_txclean(dd, &tx->txreq);
210	kmem_cache_free(dd->vnic.txreq_cache, tx);
211tx_err:
212	if (ret != -EBUSY)
213		dev_kfree_skb_any(skb);
214	else
215		vnic_sdma->pkts_sent = false;
216	return ret;
217}
218
219/*
220 * hfi1_vnic_sdma_sleep - vnic sdma sleep function
221 *
222 * This function gets called from sdma_send_txreq() when there are not enough
223 * sdma descriptors available to send the packet. It adds Tx queue's wait
224 * structure to sdma engine's dmawait list to be woken up when descriptors
225 * become available.
226 */
227static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
228				struct iowait_work *wait,
229				struct sdma_txreq *txreq,
230				uint seq,
231				bool pkts_sent)
232{
233	struct hfi1_vnic_sdma *vnic_sdma =
234		container_of(wait->iow, struct hfi1_vnic_sdma, wait);
235
236	write_seqlock(&sde->waitlock);
237	if (sdma_progress(sde, seq, txreq)) {
238		write_sequnlock(&sde->waitlock);
239		return -EAGAIN;
240	}
241
242	vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
243	if (list_empty(&vnic_sdma->wait.list)) {
244		iowait_get_priority(wait->iow);
245		iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
246	}
247	write_sequnlock(&sde->waitlock);
248	return -EBUSY;
249}
250
251/*
252 * hfi1_vnic_sdma_wakeup - vnic sdma wakeup function
253 *
254 * This function gets called when SDMA descriptors becomes available and Tx
255 * queue's wait structure was previously added to sdma engine's dmawait list.
256 * It notifies the upper driver about Tx queue wakeup.
257 */
258static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason)
259{
260	struct hfi1_vnic_sdma *vnic_sdma =
261		container_of(wait, struct hfi1_vnic_sdma, wait);
262	struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo;
263
264	vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
265	if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx))
266		netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx);
267};
268
269inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
270				       u8 q_idx)
271{
272	struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
273
274	return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE);
275}
276
277void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
278{
279	int i;
280
281	for (i = 0; i < vinfo->num_tx_q; i++) {
282		struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
283
284		iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
285			    hfi1_vnic_sdma_sleep,
286			    hfi1_vnic_sdma_wakeup, NULL, NULL);
287		vnic_sdma->sde = &vinfo->dd->per_sdma[i];
288		vnic_sdma->dd = vinfo->dd;
289		vnic_sdma->vinfo = vinfo;
290		vnic_sdma->q_idx = i;
291		vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
292
293		/* Add a free descriptor watermark for wakeups */
294		if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
295			struct iowait_work *work;
296
297			INIT_LIST_HEAD(&vnic_sdma->stx.list);
298			vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
299			work = iowait_get_ib_work(&vnic_sdma->wait);
300			list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
301		}
302	}
303}
304
305int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
306{
307	char buf[HFI1_VNIC_TXREQ_NAME_LEN];
308
309	snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
310	dd->vnic.txreq_cache = kmem_cache_create(buf,
311						 sizeof(struct vnic_txreq),
312						 0, SLAB_HWCACHE_ALIGN,
313						 NULL);
314	if (!dd->vnic.txreq_cache)
315		return -ENOMEM;
316	return 0;
317}
318
319void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
320{
321	kmem_cache_destroy(dd->vnic.txreq_cache);
322	dd->vnic.txreq_cache = NULL;
323}