Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright(c) 2016 - 2018 Intel Corporation.
4 */
5
6#include "hfi.h"
7#include "verbs_txreq.h"
8#include "qp.h"
9#include "trace.h"
10
11#define TXREQ_LEN 24
12
13void hfi1_put_txreq(struct verbs_txreq *tx)
14{
15 struct hfi1_ibdev *dev;
16 struct rvt_qp *qp;
17 unsigned long flags;
18 unsigned int seq;
19 struct hfi1_qp_priv *priv;
20
21 qp = tx->qp;
22 dev = to_idev(qp->ibqp.device);
23
24 if (tx->mr)
25 rvt_put_mr(tx->mr);
26
27 sdma_txclean(dd_from_dev(dev), &tx->txreq);
28
29 /* Free verbs_txreq and return to slab cache */
30 kmem_cache_free(dev->verbs_txreq_cache, tx);
31
32 do {
33 seq = read_seqbegin(&dev->txwait_lock);
34 if (!list_empty(&dev->txwait)) {
35 struct iowait *wait;
36
37 write_seqlock_irqsave(&dev->txwait_lock, flags);
38 wait = list_first_entry(&dev->txwait, struct iowait,
39 list);
40 qp = iowait_to_qp(wait);
41 priv = qp->priv;
42 list_del_init(&priv->s_iowait.list);
43 /* refcount held until actual wake up */
44 write_sequnlock_irqrestore(&dev->txwait_lock, flags);
45 hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
46 break;
47 }
48 } while (read_seqretry(&dev->txwait_lock, seq));
49}
50
51struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
52 struct rvt_qp *qp)
53 __must_hold(&qp->s_lock)
54{
55 struct verbs_txreq *tx = NULL;
56
57 write_seqlock(&dev->txwait_lock);
58 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
59 struct hfi1_qp_priv *priv;
60
61 tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
62 if (tx)
63 goto out;
64 priv = qp->priv;
65 if (list_empty(&priv->s_iowait.list)) {
66 dev->n_txwait++;
67 qp->s_flags |= RVT_S_WAIT_TX;
68 list_add_tail(&priv->s_iowait.list, &dev->txwait);
69 priv->s_iowait.lock = &dev->txwait_lock;
70 trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
71 rvt_get_qp(qp);
72 }
73 qp->s_flags &= ~RVT_S_BUSY;
74 }
75out:
76 write_sequnlock(&dev->txwait_lock);
77 return tx;
78}
79
80int verbs_txreq_init(struct hfi1_ibdev *dev)
81{
82 char buf[TXREQ_LEN];
83 struct hfi1_devdata *dd = dd_from_dev(dev);
84
85 snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
86 dev->verbs_txreq_cache = kmem_cache_create(buf,
87 sizeof(struct verbs_txreq),
88 0, SLAB_HWCACHE_ALIGN,
89 NULL);
90 if (!dev->verbs_txreq_cache)
91 return -ENOMEM;
92 return 0;
93}
94
95void verbs_txreq_exit(struct hfi1_ibdev *dev)
96{
97 kmem_cache_destroy(dev->verbs_txreq_cache);
98 dev->verbs_txreq_cache = NULL;
99}
1/*
2 * Copyright(c) 2016 - 2018 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include "hfi.h"
49#include "verbs_txreq.h"
50#include "qp.h"
51#include "trace.h"
52
53#define TXREQ_LEN 24
54
55void hfi1_put_txreq(struct verbs_txreq *tx)
56{
57 struct hfi1_ibdev *dev;
58 struct rvt_qp *qp;
59 unsigned long flags;
60 unsigned int seq;
61 struct hfi1_qp_priv *priv;
62
63 qp = tx->qp;
64 dev = to_idev(qp->ibqp.device);
65
66 if (tx->mr)
67 rvt_put_mr(tx->mr);
68
69 sdma_txclean(dd_from_dev(dev), &tx->txreq);
70
71 /* Free verbs_txreq and return to slab cache */
72 kmem_cache_free(dev->verbs_txreq_cache, tx);
73
74 do {
75 seq = read_seqbegin(&dev->txwait_lock);
76 if (!list_empty(&dev->txwait)) {
77 struct iowait *wait;
78
79 write_seqlock_irqsave(&dev->txwait_lock, flags);
80 wait = list_first_entry(&dev->txwait, struct iowait,
81 list);
82 qp = iowait_to_qp(wait);
83 priv = qp->priv;
84 list_del_init(&priv->s_iowait.list);
85 /* refcount held until actual wake up */
86 write_sequnlock_irqrestore(&dev->txwait_lock, flags);
87 hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
88 break;
89 }
90 } while (read_seqretry(&dev->txwait_lock, seq));
91}
92
93struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
94 struct rvt_qp *qp)
95 __must_hold(&qp->s_lock)
96{
97 struct verbs_txreq *tx = NULL;
98
99 write_seqlock(&dev->txwait_lock);
100 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
101 struct hfi1_qp_priv *priv;
102
103 tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
104 if (tx)
105 goto out;
106 priv = qp->priv;
107 if (list_empty(&priv->s_iowait.list)) {
108 dev->n_txwait++;
109 qp->s_flags |= RVT_S_WAIT_TX;
110 list_add_tail(&priv->s_iowait.list, &dev->txwait);
111 priv->s_iowait.lock = &dev->txwait_lock;
112 trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
113 rvt_get_qp(qp);
114 }
115 qp->s_flags &= ~RVT_S_BUSY;
116 }
117out:
118 write_sequnlock(&dev->txwait_lock);
119 return tx;
120}
121
122int verbs_txreq_init(struct hfi1_ibdev *dev)
123{
124 char buf[TXREQ_LEN];
125 struct hfi1_devdata *dd = dd_from_dev(dev);
126
127 snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
128 dev->verbs_txreq_cache = kmem_cache_create(buf,
129 sizeof(struct verbs_txreq),
130 0, SLAB_HWCACHE_ALIGN,
131 NULL);
132 if (!dev->verbs_txreq_cache)
133 return -ENOMEM;
134 return 0;
135}
136
137void verbs_txreq_exit(struct hfi1_ibdev *dev)
138{
139 kmem_cache_destroy(dev->verbs_txreq_cache);
140 dev->verbs_txreq_cache = NULL;
141}