Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/slab.h>
 35#include <net/tcp.h>
 
 36
 37#include "rds.h"
 38#include "tcp.h"
 39
 40static struct kmem_cache *rds_tcp_incoming_slab;
 41
 42static void rds_tcp_inc_purge(struct rds_incoming *inc)
 43{
 44	struct rds_tcp_incoming *tinc;
 45	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
 46	rdsdebug("purging tinc %p inc %p\n", tinc, inc);
 47	skb_queue_purge(&tinc->ti_skb_list);
 48}
 49
 50void rds_tcp_inc_free(struct rds_incoming *inc)
 51{
 52	struct rds_tcp_incoming *tinc;
 53	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
 54	rds_tcp_inc_purge(inc);
 55	rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
 56	kmem_cache_free(rds_tcp_incoming_slab, tinc);
 57}
 58
 59/*
 60 * this is pretty lame, but, whatever.
 61 */
 62int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
 63			     size_t size)
 64{
 65	struct rds_tcp_incoming *tinc;
 66	struct iovec *iov, tmp;
 67	struct sk_buff *skb;
 68	unsigned long to_copy, skb_off;
 69	int ret = 0;
 70
 71	if (size == 0)
 72		goto out;
 73
 74	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
 75	iov = first_iov;
 76	tmp = *iov;
 77
 78	skb_queue_walk(&tinc->ti_skb_list, skb) {
 79		skb_off = 0;
 80		while (skb_off < skb->len) {
 81			while (tmp.iov_len == 0) {
 82				iov++;
 83				tmp = *iov;
 84			}
 85
 86			to_copy = min(tmp.iov_len, size);
 87			to_copy = min(to_copy, skb->len - skb_off);
 88
 89			rdsdebug("ret %d size %zu skb %p skb_off %lu "
 90				 "skblen %d iov_base %p iov_len %zu cpy %lu\n",
 91				 ret, size, skb, skb_off, skb->len,
 92				 tmp.iov_base, tmp.iov_len, to_copy);
 93
 94			/* modifies tmp as it copies */
 95			if (skb_copy_datagram_iovec(skb, skb_off, &tmp,
 96						    to_copy)) {
 97				ret = -EFAULT;
 98				goto out;
 99			}
100
101			rds_stats_add(s_copy_to_user, to_copy);
102			size -= to_copy;
103			ret += to_copy;
104			skb_off += to_copy;
105			if (size == 0)
106				goto out;
107		}
108	}
109out:
110	return ret;
111}
112
113/*
114 * We have a series of skbs that have fragmented pieces of the congestion
115 * bitmap.  They must add up to the exact size of the congestion bitmap.  We
116 * use the skb helpers to copy those into the pages that make up the in-memory
117 * congestion bitmap for the remote address of this connection.  We then tell
118 * the congestion core that the bitmap has been changed so that it can wake up
119 * sleepers.
120 *
121 * This is racing with sending paths which are using test_bit to see if the
122 * bitmap indicates that their recipient is congested.
123 */
124
125static void rds_tcp_cong_recv(struct rds_connection *conn,
126			      struct rds_tcp_incoming *tinc)
127{
128	struct sk_buff *skb;
129	unsigned int to_copy, skb_off;
130	unsigned int map_off;
131	unsigned int map_page;
132	struct rds_cong_map *map;
133	int ret;
134
135	/* catch completely corrupt packets */
136	if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
137		return;
138
139	map_page = 0;
140	map_off = 0;
141	map = conn->c_fcong;
142
143	skb_queue_walk(&tinc->ti_skb_list, skb) {
144		skb_off = 0;
145		while (skb_off < skb->len) {
146			to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
147					skb->len - skb_off);
148
149			BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
150
151			/* only returns 0 or -error */
152			ret = skb_copy_bits(skb, skb_off,
153				(void *)map->m_page_addrs[map_page] + map_off,
154				to_copy);
155			BUG_ON(ret != 0);
156
157			skb_off += to_copy;
158			map_off += to_copy;
159			if (map_off == PAGE_SIZE) {
160				map_off = 0;
161				map_page++;
162			}
163		}
164	}
165
166	rds_cong_map_updated(map, ~(u64) 0);
167}
168
169struct rds_tcp_desc_arg {
170	struct rds_connection *conn;
171	gfp_t gfp;
172};
173
174static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
175			     unsigned int offset, size_t len)
176{
177	struct rds_tcp_desc_arg *arg = desc->arg.data;
178	struct rds_connection *conn = arg->conn;
179	struct rds_tcp_connection *tc = conn->c_transport_data;
180	struct rds_tcp_incoming *tinc = tc->t_tinc;
181	struct sk_buff *clone;
182	size_t left = len, to_copy;
183
184	rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
185		 len);
186
187	/*
188	 * tcp_read_sock() interprets partial progress as an indication to stop
189	 * processing.
190	 */
191	while (left) {
192		if (!tinc) {
193			tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
194					        arg->gfp);
195			if (!tinc) {
196				desc->error = -ENOMEM;
197				goto out;
198			}
199			tc->t_tinc = tinc;
200			rdsdebug("alloced tinc %p\n", tinc);
201			rds_inc_init(&tinc->ti_inc, conn, conn->c_faddr);
 
 
 
 
202			/*
203			 * XXX * we might be able to use the __ variants when
204			 * we've already serialized at a higher level.
205			 */
206			skb_queue_head_init(&tinc->ti_skb_list);
207		}
208
209		if (left && tc->t_tinc_hdr_rem) {
210			to_copy = min(tc->t_tinc_hdr_rem, left);
211			rdsdebug("copying %zu header from skb %p\n", to_copy,
212				 skb);
213			skb_copy_bits(skb, offset,
214				      (char *)&tinc->ti_inc.i_hdr +
215						sizeof(struct rds_header) -
216						tc->t_tinc_hdr_rem,
217				      to_copy);
218			tc->t_tinc_hdr_rem -= to_copy;
219			left -= to_copy;
220			offset += to_copy;
221
222			if (tc->t_tinc_hdr_rem == 0) {
223				/* could be 0 for a 0 len message */
224				tc->t_tinc_data_rem =
225					be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
 
 
226			}
227		}
228
229		if (left && tc->t_tinc_data_rem) {
230			clone = skb_clone(skb, arg->gfp);
 
 
231			if (!clone) {
232				desc->error = -ENOMEM;
233				goto out;
234			}
235
236			to_copy = min(tc->t_tinc_data_rem, left);
237			pskb_pull(clone, offset);
238			pskb_trim(clone, to_copy);
239			skb_queue_tail(&tinc->ti_skb_list, clone);
240
241			rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
242				 "clone %p data %p len %d\n",
243				 skb, skb->data, skb->len, offset, to_copy,
244				 clone, clone->data, clone->len);
245
246			tc->t_tinc_data_rem -= to_copy;
247			left -= to_copy;
248			offset += to_copy;
249		}
250
251		if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
 
 
252			if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
253				rds_tcp_cong_recv(conn, tinc);
254			else
255				rds_recv_incoming(conn, conn->c_faddr,
256						  conn->c_laddr, &tinc->ti_inc,
 
257						  arg->gfp);
258
259			tc->t_tinc_hdr_rem = sizeof(struct rds_header);
260			tc->t_tinc_data_rem = 0;
261			tc->t_tinc = NULL;
262			rds_inc_put(&tinc->ti_inc);
263			tinc = NULL;
264		}
265	}
266out:
267	rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
268		 len, left, skb->len,
269		 skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
270	return len - left;
271}
272
273/* the caller has to hold the sock lock */
274static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp)
275{
276	struct rds_tcp_connection *tc = conn->c_transport_data;
277	struct socket *sock = tc->t_sock;
278	read_descriptor_t desc;
279	struct rds_tcp_desc_arg arg;
280
281	/* It's like glib in the kernel! */
282	arg.conn = conn;
283	arg.gfp = gfp;
284	desc.arg.data = &arg;
285	desc.error = 0;
286	desc.count = 1; /* give more than one skb per call */
287
288	tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
289	rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
290		 desc.error);
291
292	return desc.error;
293}
294
295/*
296 * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
297 * data_ready.
298 *
299 * if we fail to allocate we're in trouble.. blindly wait some time before
300 * trying again to see if the VM can free up something for us.
301 */
302int rds_tcp_recv(struct rds_connection *conn)
303{
304	struct rds_tcp_connection *tc = conn->c_transport_data;
305	struct socket *sock = tc->t_sock;
306	int ret = 0;
307
308	rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock);
 
309
310	lock_sock(sock->sk);
311	ret = rds_tcp_read_sock(conn, GFP_KERNEL);
312	release_sock(sock->sk);
313
314	return ret;
315}
316
317void rds_tcp_data_ready(struct sock *sk, int bytes)
318{
319	void (*ready)(struct sock *sk, int bytes);
320	struct rds_connection *conn;
321	struct rds_tcp_connection *tc;
322
323	rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
 
324
325	read_lock_bh(&sk->sk_callback_lock);
326	conn = sk->sk_user_data;
327	if (!conn) { /* check for teardown race */
328		ready = sk->sk_data_ready;
329		goto out;
330	}
331
332	tc = conn->c_transport_data;
333	ready = tc->t_orig_data_ready;
334	rds_tcp_stats_inc(s_tcp_data_ready_calls);
335
336	if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
337		queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 
 
 
 
338out:
339	read_unlock_bh(&sk->sk_callback_lock);
340	ready(sk, bytes);
341}
342
343int rds_tcp_recv_init(void)
344{
345	rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
346					sizeof(struct rds_tcp_incoming),
347					0, 0, NULL);
348	if (!rds_tcp_incoming_slab)
349		return -ENOMEM;
350	return 0;
351}
352
353void rds_tcp_recv_exit(void)
354{
355	kmem_cache_destroy(rds_tcp_incoming_slab);
356}
v6.13.7
  1/*
  2 * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/slab.h>
 35#include <net/tcp.h>
 36#include <trace/events/sock.h>
 37
 38#include "rds.h"
 39#include "tcp.h"
 40
 41static struct kmem_cache *rds_tcp_incoming_slab;
 42
 43static void rds_tcp_inc_purge(struct rds_incoming *inc)
 44{
 45	struct rds_tcp_incoming *tinc;
 46	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
 47	rdsdebug("purging tinc %p inc %p\n", tinc, inc);
 48	skb_queue_purge(&tinc->ti_skb_list);
 49}
 50
 51void rds_tcp_inc_free(struct rds_incoming *inc)
 52{
 53	struct rds_tcp_incoming *tinc;
 54	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
 55	rds_tcp_inc_purge(inc);
 56	rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
 57	kmem_cache_free(rds_tcp_incoming_slab, tinc);
 58}
 59
 60/*
 61 * this is pretty lame, but, whatever.
 62 */
 63int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
 
 64{
 65	struct rds_tcp_incoming *tinc;
 
 66	struct sk_buff *skb;
 
 67	int ret = 0;
 68
 69	if (!iov_iter_count(to))
 70		goto out;
 71
 72	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
 
 
 73
 74	skb_queue_walk(&tinc->ti_skb_list, skb) {
 75		unsigned long to_copy, skb_off;
 76		for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
 77			to_copy = iov_iter_count(to);
 
 
 
 
 
 78			to_copy = min(to_copy, skb->len - skb_off);
 79
 80			if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
 81				return -EFAULT;
 
 
 
 
 
 
 
 
 
 82
 83			rds_stats_add(s_copy_to_user, to_copy);
 
 84			ret += to_copy;
 85
 86			if (!iov_iter_count(to))
 87				goto out;
 88		}
 89	}
 90out:
 91	return ret;
 92}
 93
 94/*
 95 * We have a series of skbs that have fragmented pieces of the congestion
 96 * bitmap.  They must add up to the exact size of the congestion bitmap.  We
 97 * use the skb helpers to copy those into the pages that make up the in-memory
 98 * congestion bitmap for the remote address of this connection.  We then tell
 99 * the congestion core that the bitmap has been changed so that it can wake up
100 * sleepers.
101 *
102 * This is racing with sending paths which are using test_bit to see if the
103 * bitmap indicates that their recipient is congested.
104 */
105
106static void rds_tcp_cong_recv(struct rds_connection *conn,
107			      struct rds_tcp_incoming *tinc)
108{
109	struct sk_buff *skb;
110	unsigned int to_copy, skb_off;
111	unsigned int map_off;
112	unsigned int map_page;
113	struct rds_cong_map *map;
114	int ret;
115
116	/* catch completely corrupt packets */
117	if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
118		return;
119
120	map_page = 0;
121	map_off = 0;
122	map = conn->c_fcong;
123
124	skb_queue_walk(&tinc->ti_skb_list, skb) {
125		skb_off = 0;
126		while (skb_off < skb->len) {
127			to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
128					skb->len - skb_off);
129
130			BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
131
132			/* only returns 0 or -error */
133			ret = skb_copy_bits(skb, skb_off,
134				(void *)map->m_page_addrs[map_page] + map_off,
135				to_copy);
136			BUG_ON(ret != 0);
137
138			skb_off += to_copy;
139			map_off += to_copy;
140			if (map_off == PAGE_SIZE) {
141				map_off = 0;
142				map_page++;
143			}
144		}
145	}
146
147	rds_cong_map_updated(map, ~(u64) 0);
148}
149
150struct rds_tcp_desc_arg {
151	struct rds_conn_path *conn_path;
152	gfp_t gfp;
153};
154
155static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
156			     unsigned int offset, size_t len)
157{
158	struct rds_tcp_desc_arg *arg = desc->arg.data;
159	struct rds_conn_path *cp = arg->conn_path;
160	struct rds_tcp_connection *tc = cp->cp_transport_data;
161	struct rds_tcp_incoming *tinc = tc->t_tinc;
162	struct sk_buff *clone;
163	size_t left = len, to_copy;
164
165	rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
166		 len);
167
168	/*
169	 * tcp_read_sock() interprets partial progress as an indication to stop
170	 * processing.
171	 */
172	while (left) {
173		if (!tinc) {
174			tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
175						arg->gfp);
176			if (!tinc) {
177				desc->error = -ENOMEM;
178				goto out;
179			}
180			tc->t_tinc = tinc;
181			rdsdebug("allocated tinc %p\n", tinc);
182			rds_inc_path_init(&tinc->ti_inc, cp,
183					  &cp->cp_conn->c_faddr);
184			tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
185					local_clock();
186
187			/*
188			 * XXX * we might be able to use the __ variants when
189			 * we've already serialized at a higher level.
190			 */
191			skb_queue_head_init(&tinc->ti_skb_list);
192		}
193
194		if (left && tc->t_tinc_hdr_rem) {
195			to_copy = min(tc->t_tinc_hdr_rem, left);
196			rdsdebug("copying %zu header from skb %p\n", to_copy,
197				 skb);
198			skb_copy_bits(skb, offset,
199				      (char *)&tinc->ti_inc.i_hdr +
200						sizeof(struct rds_header) -
201						tc->t_tinc_hdr_rem,
202				      to_copy);
203			tc->t_tinc_hdr_rem -= to_copy;
204			left -= to_copy;
205			offset += to_copy;
206
207			if (tc->t_tinc_hdr_rem == 0) {
208				/* could be 0 for a 0 len message */
209				tc->t_tinc_data_rem =
210					be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
211				tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
212					local_clock();
213			}
214		}
215
216		if (left && tc->t_tinc_data_rem) {
217			to_copy = min(tc->t_tinc_data_rem, left);
218
219			clone = pskb_extract(skb, offset, to_copy, arg->gfp);
220			if (!clone) {
221				desc->error = -ENOMEM;
222				goto out;
223			}
224
 
 
 
225			skb_queue_tail(&tinc->ti_skb_list, clone);
226
227			rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
228				 "clone %p data %p len %d\n",
229				 skb, skb->data, skb->len, offset, to_copy,
230				 clone, clone->data, clone->len);
231
232			tc->t_tinc_data_rem -= to_copy;
233			left -= to_copy;
234			offset += to_copy;
235		}
236
237		if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
238			struct rds_connection *conn = cp->cp_conn;
239
240			if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
241				rds_tcp_cong_recv(conn, tinc);
242			else
243				rds_recv_incoming(conn, &conn->c_faddr,
244						  &conn->c_laddr,
245						  &tinc->ti_inc,
246						  arg->gfp);
247
248			tc->t_tinc_hdr_rem = sizeof(struct rds_header);
249			tc->t_tinc_data_rem = 0;
250			tc->t_tinc = NULL;
251			rds_inc_put(&tinc->ti_inc);
252			tinc = NULL;
253		}
254	}
255out:
256	rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
257		 len, left, skb->len,
258		 skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
259	return len - left;
260}
261
262/* the caller has to hold the sock lock */
263static int rds_tcp_read_sock(struct rds_conn_path *cp, gfp_t gfp)
264{
265	struct rds_tcp_connection *tc = cp->cp_transport_data;
266	struct socket *sock = tc->t_sock;
267	read_descriptor_t desc;
268	struct rds_tcp_desc_arg arg;
269
270	/* It's like glib in the kernel! */
271	arg.conn_path = cp;
272	arg.gfp = gfp;
273	desc.arg.data = &arg;
274	desc.error = 0;
275	desc.count = 1; /* give more than one skb per call */
276
277	tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
278	rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
279		 desc.error);
280
281	return desc.error;
282}
283
284/*
285 * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
286 * data_ready.
287 *
288 * if we fail to allocate we're in trouble.. blindly wait some time before
289 * trying again to see if the VM can free up something for us.
290 */
291int rds_tcp_recv_path(struct rds_conn_path *cp)
292{
293	struct rds_tcp_connection *tc = cp->cp_transport_data;
294	struct socket *sock = tc->t_sock;
295	int ret = 0;
296
297	rdsdebug("recv worker path [%d] tc %p sock %p\n",
298		 cp->cp_index, tc, sock);
299
300	lock_sock(sock->sk);
301	ret = rds_tcp_read_sock(cp, GFP_KERNEL);
302	release_sock(sock->sk);
303
304	return ret;
305}
306
307void rds_tcp_data_ready(struct sock *sk)
308{
309	void (*ready)(struct sock *sk);
310	struct rds_conn_path *cp;
311	struct rds_tcp_connection *tc;
312
313	trace_sk_data_ready(sk);
314	rdsdebug("data ready sk %p\n", sk);
315
316	read_lock_bh(&sk->sk_callback_lock);
317	cp = sk->sk_user_data;
318	if (!cp) { /* check for teardown race */
319		ready = sk->sk_data_ready;
320		goto out;
321	}
322
323	tc = cp->cp_transport_data;
324	ready = tc->t_orig_data_ready;
325	rds_tcp_stats_inc(s_tcp_data_ready_calls);
326
327	if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
328		rcu_read_lock();
329		if (!rds_destroy_pending(cp->cp_conn))
330			queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
331		rcu_read_unlock();
332	}
333out:
334	read_unlock_bh(&sk->sk_callback_lock);
335	ready(sk);
336}
337
338int rds_tcp_recv_init(void)
339{
340	rds_tcp_incoming_slab = KMEM_CACHE(rds_tcp_incoming, 0);
 
 
341	if (!rds_tcp_incoming_slab)
342		return -ENOMEM;
343	return 0;
344}
345
346void rds_tcp_recv_exit(void)
347{
348	kmem_cache_destroy(rds_tcp_incoming_slab);
349}