Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _RDS_IB_H
3#define _RDS_IB_H
4
5#include <rdma/ib_verbs.h>
6#include <rdma/rdma_cm.h>
7#include <linux/interrupt.h>
8#include <linux/pci.h>
9#include <linux/slab.h>
10#include "rds.h"
11#include "rdma_transport.h"
12
13#define RDS_IB_MAX_SGE 8
14#define RDS_IB_RECV_SGE 2
15
16#define RDS_IB_DEFAULT_RECV_WR 1024
17#define RDS_IB_DEFAULT_SEND_WR 256
18#define RDS_IB_DEFAULT_FR_WR 512
19
20#define RDS_IB_DEFAULT_RETRY_COUNT 1
21
22#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
23
24#define RDS_IB_RECYCLE_BATCH_COUNT 32
25
26#define RDS_IB_WC_MAX 32
27
28extern struct rw_semaphore rds_ib_devices_lock;
29extern struct list_head rds_ib_devices;
30
31/*
32 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
33 * try and minimize the amount of memory tied up both the device and
34 * socket receive queues.
35 */
36struct rds_page_frag {
37 struct list_head f_item;
38 struct list_head f_cache_entry;
39 struct scatterlist f_sg;
40};
41
42struct rds_ib_incoming {
43 struct list_head ii_frags;
44 struct list_head ii_cache_entry;
45 struct rds_incoming ii_inc;
46};
47
48struct rds_ib_cache_head {
49 struct list_head *first;
50 unsigned long count;
51};
52
53struct rds_ib_refill_cache {
54 struct rds_ib_cache_head __percpu *percpu;
55 struct list_head *xfer;
56 struct list_head *ready;
57};
58
59/* This is the common structure for the IB private data exchange in setting up
60 * an RDS connection. The exchange is different for IPv4 and IPv6 connections.
61 * The reason is that the address size is different and the addresses
62 * exchanged are in the beginning of the structure. Hence it is not possible
63 * for interoperability if same structure is used.
64 */
65struct rds_ib_conn_priv_cmn {
66 u8 ricpc_protocol_major;
67 u8 ricpc_protocol_minor;
68 __be16 ricpc_protocol_minor_mask; /* bitmask */
69 u8 ricpc_dp_toss;
70 u8 ripc_reserved1;
71 __be16 ripc_reserved2;
72 __be64 ricpc_ack_seq;
73 __be32 ricpc_credit; /* non-zero enables flow ctl */
74};
75
76struct rds_ib_connect_private {
77 /* Add new fields at the end, and don't permute existing fields. */
78 __be32 dp_saddr;
79 __be32 dp_daddr;
80 struct rds_ib_conn_priv_cmn dp_cmn;
81};
82
83struct rds6_ib_connect_private {
84 /* Add new fields at the end, and don't permute existing fields. */
85 struct in6_addr dp_saddr;
86 struct in6_addr dp_daddr;
87 struct rds_ib_conn_priv_cmn dp_cmn;
88};
89
90#define dp_protocol_major dp_cmn.ricpc_protocol_major
91#define dp_protocol_minor dp_cmn.ricpc_protocol_minor
92#define dp_protocol_minor_mask dp_cmn.ricpc_protocol_minor_mask
93#define dp_ack_seq dp_cmn.ricpc_ack_seq
94#define dp_credit dp_cmn.ricpc_credit
95
96union rds_ib_conn_priv {
97 struct rds_ib_connect_private ricp_v4;
98 struct rds6_ib_connect_private ricp_v6;
99};
100
101struct rds_ib_send_work {
102 void *s_op;
103 union {
104 struct ib_send_wr s_wr;
105 struct ib_rdma_wr s_rdma_wr;
106 struct ib_atomic_wr s_atomic_wr;
107 };
108 struct ib_sge s_sge[RDS_IB_MAX_SGE];
109 unsigned long s_queued;
110};
111
112struct rds_ib_recv_work {
113 struct rds_ib_incoming *r_ibinc;
114 struct rds_page_frag *r_frag;
115 struct ib_recv_wr r_wr;
116 struct ib_sge r_sge[2];
117};
118
119struct rds_ib_work_ring {
120 u32 w_nr;
121 u32 w_alloc_ptr;
122 u32 w_alloc_ctr;
123 u32 w_free_ptr;
124 atomic_t w_free_ctr;
125};
126
127/* Rings are posted with all the allocations they'll need to queue the
128 * incoming message to the receiving socket so this can't fail.
129 * All fragments start with a header, so we can make sure we're not receiving
130 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
131 */
132struct rds_ib_ack_state {
133 u64 ack_next;
134 u64 ack_recv;
135 unsigned int ack_required:1;
136 unsigned int ack_next_valid:1;
137 unsigned int ack_recv_valid:1;
138};
139
140
141struct rds_ib_device;
142
143struct rds_ib_connection {
144
145 struct list_head ib_node;
146 struct rds_ib_device *rds_ibdev;
147 struct rds_connection *conn;
148
149 /* alphabet soup, IBTA style */
150 struct rdma_cm_id *i_cm_id;
151 struct ib_pd *i_pd;
152 struct ib_cq *i_send_cq;
153 struct ib_cq *i_recv_cq;
154 struct ib_wc i_send_wc[RDS_IB_WC_MAX];
155 struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
156
157 /* To control the number of wrs from fastreg */
158 atomic_t i_fastreg_wrs;
159 atomic_t i_fastreg_inuse_count;
160
161 /* interrupt handling */
162 struct tasklet_struct i_send_tasklet;
163 struct tasklet_struct i_recv_tasklet;
164
165 /* tx */
166 struct rds_ib_work_ring i_send_ring;
167 struct rm_data_op *i_data_op;
168 struct rds_header **i_send_hdrs;
169 dma_addr_t *i_send_hdrs_dma;
170 struct rds_ib_send_work *i_sends;
171 atomic_t i_signaled_sends;
172
173 /* rx */
174 struct mutex i_recv_mutex;
175 struct rds_ib_work_ring i_recv_ring;
176 struct rds_ib_incoming *i_ibinc;
177 u32 i_recv_data_rem;
178 struct rds_header **i_recv_hdrs;
179 dma_addr_t *i_recv_hdrs_dma;
180 struct rds_ib_recv_work *i_recvs;
181 u64 i_ack_recv; /* last ACK received */
182 struct rds_ib_refill_cache i_cache_incs;
183 struct rds_ib_refill_cache i_cache_frags;
184 atomic_t i_cache_allocs;
185
186 /* sending acks */
187 unsigned long i_ack_flags;
188#ifdef KERNEL_HAS_ATOMIC64
189 atomic64_t i_ack_next; /* next ACK to send */
190#else
191 spinlock_t i_ack_lock; /* protect i_ack_next */
192 u64 i_ack_next; /* next ACK to send */
193#endif
194 struct rds_header *i_ack;
195 struct ib_send_wr i_ack_wr;
196 struct ib_sge i_ack_sge;
197 dma_addr_t i_ack_dma;
198 unsigned long i_ack_queued;
199
200 /* Flow control related information
201 *
202 * Our algorithm uses a pair variables that we need to access
203 * atomically - one for the send credits, and one posted
204 * recv credits we need to transfer to remote.
205 * Rather than protect them using a slow spinlock, we put both into
206 * a single atomic_t and update it using cmpxchg
207 */
208 atomic_t i_credits;
209
210 /* Protocol version specific information */
211 unsigned int i_flowctl:1; /* enable/disable flow ctl */
212
213 /* Batched completions */
214 unsigned int i_unsignaled_wrs;
215
216 /* Endpoint role in connection */
217 bool i_active_side;
218 atomic_t i_cq_quiesce;
219
220 /* Send/Recv vectors */
221 int i_scq_vector;
222 int i_rcq_vector;
223 u8 i_sl;
224};
225
226/* This assumes that atomic_t is at least 32 bits */
227#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
228#define IB_GET_POST_CREDITS(v) ((v) >> 16)
229#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
230#define IB_SET_POST_CREDITS(v) ((v) << 16)
231
232struct rds_ib_ipaddr {
233 struct list_head list;
234 __be32 ipaddr;
235 struct rcu_head rcu;
236};
237
238enum {
239 RDS_IB_MR_8K_POOL,
240 RDS_IB_MR_1M_POOL,
241};
242
243struct rds_ib_device {
244 struct list_head list;
245 struct list_head ipaddr_list;
246 struct list_head conn_list;
247 struct ib_device *dev;
248 struct ib_pd *pd;
249 struct dma_pool *rid_hdrs_pool; /* RDS headers DMA pool */
250 u8 odp_capable:1;
251
252 unsigned int max_mrs;
253 struct rds_ib_mr_pool *mr_1m_pool;
254 struct rds_ib_mr_pool *mr_8k_pool;
255 unsigned int max_8k_mrs;
256 unsigned int max_1m_mrs;
257 int max_sge;
258 unsigned int max_wrs;
259 unsigned int max_initiator_depth;
260 unsigned int max_responder_resources;
261 spinlock_t spinlock; /* protect the above */
262 refcount_t refcount;
263 struct work_struct free_work;
264 int *vector_load;
265};
266
267static inline int ibdev_to_node(struct ib_device *ibdev)
268{
269 struct device *parent;
270
271 parent = ibdev->dev.parent;
272 return parent ? dev_to_node(parent) : NUMA_NO_NODE;
273}
274#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
275
276/* bits for i_ack_flags */
277#define IB_ACK_IN_FLIGHT 0
278#define IB_ACK_REQUESTED 1
279
280/* Magic WR_ID for ACKs */
281#define RDS_IB_ACK_WR_ID (~(u64) 0)
282
283struct rds_ib_statistics {
284 uint64_t s_ib_connect_raced;
285 uint64_t s_ib_listen_closed_stale;
286 uint64_t s_ib_evt_handler_call;
287 uint64_t s_ib_tasklet_call;
288 uint64_t s_ib_tx_cq_event;
289 uint64_t s_ib_tx_ring_full;
290 uint64_t s_ib_tx_throttle;
291 uint64_t s_ib_tx_sg_mapping_failure;
292 uint64_t s_ib_tx_stalled;
293 uint64_t s_ib_tx_credit_updates;
294 uint64_t s_ib_rx_cq_event;
295 uint64_t s_ib_rx_ring_empty;
296 uint64_t s_ib_rx_refill_from_cq;
297 uint64_t s_ib_rx_refill_from_thread;
298 uint64_t s_ib_rx_alloc_limit;
299 uint64_t s_ib_rx_total_frags;
300 uint64_t s_ib_rx_total_incs;
301 uint64_t s_ib_rx_credit_updates;
302 uint64_t s_ib_ack_sent;
303 uint64_t s_ib_ack_send_failure;
304 uint64_t s_ib_ack_send_delayed;
305 uint64_t s_ib_ack_send_piggybacked;
306 uint64_t s_ib_ack_received;
307 uint64_t s_ib_rdma_mr_8k_alloc;
308 uint64_t s_ib_rdma_mr_8k_free;
309 uint64_t s_ib_rdma_mr_8k_used;
310 uint64_t s_ib_rdma_mr_8k_pool_flush;
311 uint64_t s_ib_rdma_mr_8k_pool_wait;
312 uint64_t s_ib_rdma_mr_8k_pool_depleted;
313 uint64_t s_ib_rdma_mr_1m_alloc;
314 uint64_t s_ib_rdma_mr_1m_free;
315 uint64_t s_ib_rdma_mr_1m_used;
316 uint64_t s_ib_rdma_mr_1m_pool_flush;
317 uint64_t s_ib_rdma_mr_1m_pool_wait;
318 uint64_t s_ib_rdma_mr_1m_pool_depleted;
319 uint64_t s_ib_rdma_mr_8k_reused;
320 uint64_t s_ib_rdma_mr_1m_reused;
321 uint64_t s_ib_atomic_cswp;
322 uint64_t s_ib_atomic_fadd;
323 uint64_t s_ib_recv_added_to_cache;
324 uint64_t s_ib_recv_removed_from_cache;
325};
326
327extern struct workqueue_struct *rds_ib_wq;
328
329/*
330 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
331 * doesn't define it.
332 */
333static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
334 struct scatterlist *sglist,
335 unsigned int sg_dma_len,
336 int direction)
337{
338 struct scatterlist *sg;
339 unsigned int i;
340
341 for_each_sg(sglist, sg, sg_dma_len, i) {
342 ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
343 sg_dma_len(sg), direction);
344 }
345}
346#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
347
348static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
349 struct scatterlist *sglist,
350 unsigned int sg_dma_len,
351 int direction)
352{
353 struct scatterlist *sg;
354 unsigned int i;
355
356 for_each_sg(sglist, sg, sg_dma_len, i) {
357 ib_dma_sync_single_for_device(dev, sg_dma_address(sg),
358 sg_dma_len(sg), direction);
359 }
360}
361#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
362
363
364/* ib.c */
365extern struct rds_transport rds_ib_transport;
366struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
367void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
368extern struct ib_client rds_ib_client;
369
370extern unsigned int rds_ib_retry_count;
371
372extern spinlock_t ib_nodev_conns_lock;
373extern struct list_head ib_nodev_conns;
374
375/* ib_cm.c */
376int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
377void rds_ib_conn_free(void *arg);
378int rds_ib_conn_path_connect(struct rds_conn_path *cp);
379void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
380void rds_ib_state_change(struct sock *sk);
381int rds_ib_listen_init(void);
382void rds_ib_listen_stop(void);
383__printf(2, 3)
384void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
385int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
386 struct rdma_cm_event *event, bool isv6);
387int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6);
388void rds_ib_cm_connect_complete(struct rds_connection *conn,
389 struct rdma_cm_event *event);
390struct rds_header **rds_dma_hdrs_alloc(struct ib_device *ibdev,
391 struct dma_pool *pool,
392 dma_addr_t **dma_addrs, u32 num_hdrs);
393void rds_dma_hdrs_free(struct dma_pool *pool, struct rds_header **hdrs,
394 dma_addr_t *dma_addrs, u32 num_hdrs);
395
396#define rds_ib_conn_error(conn, fmt...) \
397 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
398
399/* ib_rdma.c */
400int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
401 struct in6_addr *ipaddr);
402void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
403void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
404void rds_ib_destroy_nodev_conns(void);
405void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
406
407/* ib_recv.c */
408int rds_ib_recv_init(void);
409void rds_ib_recv_exit(void);
410int rds_ib_recv_path(struct rds_conn_path *conn);
411int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp);
412void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
413void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
414void rds_ib_inc_free(struct rds_incoming *inc);
415int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
416void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
417 struct rds_ib_ack_state *state);
418void rds_ib_recv_tasklet_fn(unsigned long data);
419void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
420void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
421void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
422void rds_ib_attempt_ack(struct rds_ib_connection *ic);
423void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
424u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
425void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
426
427/* ib_ring.c */
428void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
429void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
430u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
431void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
432void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
433int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
434int rds_ib_ring_low(struct rds_ib_work_ring *ring);
435u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
436u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
437extern wait_queue_head_t rds_ib_ring_empty_wait;
438
439/* ib_send.c */
440void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
441int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
442 unsigned int hdr_off, unsigned int sg, unsigned int off);
443void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
444void rds_ib_send_init_ring(struct rds_ib_connection *ic);
445void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
446int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
447void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
448void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
449int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
450 u32 *adv_credits, int need_posted, int max_posted);
451int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
452
453/* ib_stats.c */
454DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
455#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
456#define rds_ib_stats_add(member, count) \
457 rds_stats_add_which(rds_ib_stats, member, count)
458unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
459 unsigned int avail);
460
461/* ib_sysctl.c */
462int rds_ib_sysctl_init(void);
463void rds_ib_sysctl_exit(void);
464extern unsigned long rds_ib_sysctl_max_send_wr;
465extern unsigned long rds_ib_sysctl_max_recv_wr;
466extern unsigned long rds_ib_sysctl_max_unsig_wrs;
467extern unsigned long rds_ib_sysctl_max_unsig_bytes;
468extern unsigned long rds_ib_sysctl_max_recv_allocation;
469extern unsigned int rds_ib_sysctl_flow_control;
470
471#endif
1#ifndef _RDS_IB_H
2#define _RDS_IB_H
3
4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h>
6#include <linux/interrupt.h>
7#include <linux/pci.h>
8#include <linux/slab.h>
9#include "rds.h"
10#include "rdma_transport.h"
11
12#define RDS_IB_MAX_SGE 8
13#define RDS_IB_RECV_SGE 2
14
15#define RDS_IB_DEFAULT_RECV_WR 1024
16#define RDS_IB_DEFAULT_SEND_WR 256
17#define RDS_IB_DEFAULT_FR_WR 512
18
19#define RDS_IB_DEFAULT_RETRY_COUNT 2
20
21#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
22
23#define RDS_IB_RECYCLE_BATCH_COUNT 32
24
25#define RDS_IB_WC_MAX 32
26
27extern struct rw_semaphore rds_ib_devices_lock;
28extern struct list_head rds_ib_devices;
29
30/*
31 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
32 * try and minimize the amount of memory tied up both the device and
33 * socket receive queues.
34 */
35struct rds_page_frag {
36 struct list_head f_item;
37 struct list_head f_cache_entry;
38 struct scatterlist f_sg;
39};
40
41struct rds_ib_incoming {
42 struct list_head ii_frags;
43 struct list_head ii_cache_entry;
44 struct rds_incoming ii_inc;
45};
46
47struct rds_ib_cache_head {
48 struct list_head *first;
49 unsigned long count;
50};
51
52struct rds_ib_refill_cache {
53 struct rds_ib_cache_head __percpu *percpu;
54 struct list_head *xfer;
55 struct list_head *ready;
56};
57
58struct rds_ib_connect_private {
59 /* Add new fields at the end, and don't permute existing fields. */
60 __be32 dp_saddr;
61 __be32 dp_daddr;
62 u8 dp_protocol_major;
63 u8 dp_protocol_minor;
64 __be16 dp_protocol_minor_mask; /* bitmask */
65 __be32 dp_reserved1;
66 __be64 dp_ack_seq;
67 __be32 dp_credit; /* non-zero enables flow ctl */
68};
69
70struct rds_ib_send_work {
71 void *s_op;
72 union {
73 struct ib_send_wr s_wr;
74 struct ib_rdma_wr s_rdma_wr;
75 struct ib_atomic_wr s_atomic_wr;
76 };
77 struct ib_sge s_sge[RDS_IB_MAX_SGE];
78 unsigned long s_queued;
79};
80
81struct rds_ib_recv_work {
82 struct rds_ib_incoming *r_ibinc;
83 struct rds_page_frag *r_frag;
84 struct ib_recv_wr r_wr;
85 struct ib_sge r_sge[2];
86};
87
88struct rds_ib_work_ring {
89 u32 w_nr;
90 u32 w_alloc_ptr;
91 u32 w_alloc_ctr;
92 u32 w_free_ptr;
93 atomic_t w_free_ctr;
94};
95
96/* Rings are posted with all the allocations they'll need to queue the
97 * incoming message to the receiving socket so this can't fail.
98 * All fragments start with a header, so we can make sure we're not receiving
99 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
100 */
101struct rds_ib_ack_state {
102 u64 ack_next;
103 u64 ack_recv;
104 unsigned int ack_required:1;
105 unsigned int ack_next_valid:1;
106 unsigned int ack_recv_valid:1;
107};
108
109
110struct rds_ib_device;
111
112struct rds_ib_connection {
113
114 struct list_head ib_node;
115 struct rds_ib_device *rds_ibdev;
116 struct rds_connection *conn;
117
118 /* alphabet soup, IBTA style */
119 struct rdma_cm_id *i_cm_id;
120 struct ib_pd *i_pd;
121 struct ib_cq *i_send_cq;
122 struct ib_cq *i_recv_cq;
123 struct ib_wc i_send_wc[RDS_IB_WC_MAX];
124 struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
125
126 /* To control the number of wrs from fastreg */
127 atomic_t i_fastreg_wrs;
128
129 /* interrupt handling */
130 struct tasklet_struct i_send_tasklet;
131 struct tasklet_struct i_recv_tasklet;
132
133 /* tx */
134 struct rds_ib_work_ring i_send_ring;
135 struct rm_data_op *i_data_op;
136 struct rds_header *i_send_hdrs;
137 u64 i_send_hdrs_dma;
138 struct rds_ib_send_work *i_sends;
139 atomic_t i_signaled_sends;
140
141 /* rx */
142 struct mutex i_recv_mutex;
143 struct rds_ib_work_ring i_recv_ring;
144 struct rds_ib_incoming *i_ibinc;
145 u32 i_recv_data_rem;
146 struct rds_header *i_recv_hdrs;
147 u64 i_recv_hdrs_dma;
148 struct rds_ib_recv_work *i_recvs;
149 u64 i_ack_recv; /* last ACK received */
150 struct rds_ib_refill_cache i_cache_incs;
151 struct rds_ib_refill_cache i_cache_frags;
152
153 /* sending acks */
154 unsigned long i_ack_flags;
155#ifdef KERNEL_HAS_ATOMIC64
156 atomic64_t i_ack_next; /* next ACK to send */
157#else
158 spinlock_t i_ack_lock; /* protect i_ack_next */
159 u64 i_ack_next; /* next ACK to send */
160#endif
161 struct rds_header *i_ack;
162 struct ib_send_wr i_ack_wr;
163 struct ib_sge i_ack_sge;
164 u64 i_ack_dma;
165 unsigned long i_ack_queued;
166
167 /* Flow control related information
168 *
169 * Our algorithm uses a pair variables that we need to access
170 * atomically - one for the send credits, and one posted
171 * recv credits we need to transfer to remote.
172 * Rather than protect them using a slow spinlock, we put both into
173 * a single atomic_t and update it using cmpxchg
174 */
175 atomic_t i_credits;
176
177 /* Protocol version specific information */
178 unsigned int i_flowctl:1; /* enable/disable flow ctl */
179
180 /* Batched completions */
181 unsigned int i_unsignaled_wrs;
182};
183
184/* This assumes that atomic_t is at least 32 bits */
185#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
186#define IB_GET_POST_CREDITS(v) ((v) >> 16)
187#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
188#define IB_SET_POST_CREDITS(v) ((v) << 16)
189
190struct rds_ib_ipaddr {
191 struct list_head list;
192 __be32 ipaddr;
193 struct rcu_head rcu;
194};
195
196enum {
197 RDS_IB_MR_8K_POOL,
198 RDS_IB_MR_1M_POOL,
199};
200
201struct rds_ib_device {
202 struct list_head list;
203 struct list_head ipaddr_list;
204 struct list_head conn_list;
205 struct ib_device *dev;
206 struct ib_pd *pd;
207 bool has_fmr;
208 bool has_fr;
209 bool use_fastreg;
210
211 unsigned int max_mrs;
212 struct rds_ib_mr_pool *mr_1m_pool;
213 struct rds_ib_mr_pool *mr_8k_pool;
214 unsigned int fmr_max_remaps;
215 unsigned int max_8k_mrs;
216 unsigned int max_1m_mrs;
217 int max_sge;
218 unsigned int max_wrs;
219 unsigned int max_initiator_depth;
220 unsigned int max_responder_resources;
221 spinlock_t spinlock; /* protect the above */
222 atomic_t refcount;
223 struct work_struct free_work;
224};
225
226#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
227#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
228
229/* bits for i_ack_flags */
230#define IB_ACK_IN_FLIGHT 0
231#define IB_ACK_REQUESTED 1
232
233/* Magic WR_ID for ACKs */
234#define RDS_IB_ACK_WR_ID (~(u64) 0)
235
236struct rds_ib_statistics {
237 uint64_t s_ib_connect_raced;
238 uint64_t s_ib_listen_closed_stale;
239 uint64_t s_ib_evt_handler_call;
240 uint64_t s_ib_tasklet_call;
241 uint64_t s_ib_tx_cq_event;
242 uint64_t s_ib_tx_ring_full;
243 uint64_t s_ib_tx_throttle;
244 uint64_t s_ib_tx_sg_mapping_failure;
245 uint64_t s_ib_tx_stalled;
246 uint64_t s_ib_tx_credit_updates;
247 uint64_t s_ib_rx_cq_event;
248 uint64_t s_ib_rx_ring_empty;
249 uint64_t s_ib_rx_refill_from_cq;
250 uint64_t s_ib_rx_refill_from_thread;
251 uint64_t s_ib_rx_alloc_limit;
252 uint64_t s_ib_rx_credit_updates;
253 uint64_t s_ib_ack_sent;
254 uint64_t s_ib_ack_send_failure;
255 uint64_t s_ib_ack_send_delayed;
256 uint64_t s_ib_ack_send_piggybacked;
257 uint64_t s_ib_ack_received;
258 uint64_t s_ib_rdma_mr_8k_alloc;
259 uint64_t s_ib_rdma_mr_8k_free;
260 uint64_t s_ib_rdma_mr_8k_used;
261 uint64_t s_ib_rdma_mr_8k_pool_flush;
262 uint64_t s_ib_rdma_mr_8k_pool_wait;
263 uint64_t s_ib_rdma_mr_8k_pool_depleted;
264 uint64_t s_ib_rdma_mr_1m_alloc;
265 uint64_t s_ib_rdma_mr_1m_free;
266 uint64_t s_ib_rdma_mr_1m_used;
267 uint64_t s_ib_rdma_mr_1m_pool_flush;
268 uint64_t s_ib_rdma_mr_1m_pool_wait;
269 uint64_t s_ib_rdma_mr_1m_pool_depleted;
270 uint64_t s_ib_rdma_mr_8k_reused;
271 uint64_t s_ib_rdma_mr_1m_reused;
272 uint64_t s_ib_atomic_cswp;
273 uint64_t s_ib_atomic_fadd;
274};
275
276extern struct workqueue_struct *rds_ib_wq;
277
278/*
279 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
280 * doesn't define it.
281 */
282static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
283 struct scatterlist *sglist,
284 unsigned int sg_dma_len,
285 int direction)
286{
287 struct scatterlist *sg;
288 unsigned int i;
289
290 for_each_sg(sglist, sg, sg_dma_len, i) {
291 ib_dma_sync_single_for_cpu(dev,
292 ib_sg_dma_address(dev, sg),
293 ib_sg_dma_len(dev, sg),
294 direction);
295 }
296}
297#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
298
299static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
300 struct scatterlist *sglist,
301 unsigned int sg_dma_len,
302 int direction)
303{
304 struct scatterlist *sg;
305 unsigned int i;
306
307 for_each_sg(sglist, sg, sg_dma_len, i) {
308 ib_dma_sync_single_for_device(dev,
309 ib_sg_dma_address(dev, sg),
310 ib_sg_dma_len(dev, sg),
311 direction);
312 }
313}
314#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
315
316
317/* ib.c */
318extern struct rds_transport rds_ib_transport;
319struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
320void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
321extern struct ib_client rds_ib_client;
322
323extern unsigned int rds_ib_retry_count;
324
325extern spinlock_t ib_nodev_conns_lock;
326extern struct list_head ib_nodev_conns;
327
328/* ib_cm.c */
329int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
330void rds_ib_conn_free(void *arg);
331int rds_ib_conn_path_connect(struct rds_conn_path *cp);
332void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
333void rds_ib_state_change(struct sock *sk);
334int rds_ib_listen_init(void);
335void rds_ib_listen_stop(void);
336__printf(2, 3)
337void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
338int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
339 struct rdma_cm_event *event);
340int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
341void rds_ib_cm_connect_complete(struct rds_connection *conn,
342 struct rdma_cm_event *event);
343
344
345#define rds_ib_conn_error(conn, fmt...) \
346 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
347
348/* ib_rdma.c */
349int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
350void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
351void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
352void rds_ib_destroy_nodev_conns(void);
353void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
354
355/* ib_recv.c */
356int rds_ib_recv_init(void);
357void rds_ib_recv_exit(void);
358int rds_ib_recv_path(struct rds_conn_path *conn);
359int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
360void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
361void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
362void rds_ib_inc_free(struct rds_incoming *inc);
363int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
364void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
365 struct rds_ib_ack_state *state);
366void rds_ib_recv_tasklet_fn(unsigned long data);
367void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
368void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
369void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
370void rds_ib_attempt_ack(struct rds_ib_connection *ic);
371void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
372u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
373void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
374
375/* ib_ring.c */
376void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
377void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
378u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
379void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
380void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
381int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
382int rds_ib_ring_low(struct rds_ib_work_ring *ring);
383u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
384u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
385extern wait_queue_head_t rds_ib_ring_empty_wait;
386
387/* ib_send.c */
388void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
389int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
390 unsigned int hdr_off, unsigned int sg, unsigned int off);
391void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
392void rds_ib_send_init_ring(struct rds_ib_connection *ic);
393void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
394int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
395void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
396void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
397int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
398 u32 *adv_credits, int need_posted, int max_posted);
399int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
400
401/* ib_stats.c */
402DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
403#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
404unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
405 unsigned int avail);
406
407/* ib_sysctl.c */
408int rds_ib_sysctl_init(void);
409void rds_ib_sysctl_exit(void);
410extern unsigned long rds_ib_sysctl_max_send_wr;
411extern unsigned long rds_ib_sysctl_max_recv_wr;
412extern unsigned long rds_ib_sysctl_max_unsig_wrs;
413extern unsigned long rds_ib_sysctl_max_unsig_bytes;
414extern unsigned long rds_ib_sysctl_max_recv_allocation;
415extern unsigned int rds_ib_sysctl_flow_control;
416
417#endif