Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/net/sunrpc/socklib.c
  4 *
  5 * Common socket helper routines for RPC client and server
  6 *
  7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  8 */
  9
 10#include <linux/compiler.h>
 11#include <linux/netdevice.h>
 12#include <linux/gfp.h>
 13#include <linux/skbuff.h>
 14#include <linux/types.h>
 15#include <linux/pagemap.h>
 16#include <linux/udp.h>
 17#include <linux/sunrpc/msg_prot.h>
 18#include <linux/sunrpc/xdr.h>
 19#include <linux/export.h>
 20
 21#include "socklib.h"
 22
 23/*
 24 * Helper structure for copying from an sk_buff.
 25 */
 26struct xdr_skb_reader {
 27	struct sk_buff	*skb;
 28	unsigned int	offset;
 29	size_t		count;
 30	__wsum		csum;
 31};
 32
 33typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to,
 34				     size_t len);
 35
 36/**
 37 * xdr_skb_read_bits - copy some data bits from skb to internal buffer
 38 * @desc: sk_buff copy helper
 39 * @to: copy destination
 40 * @len: number of bytes to copy
 41 *
 42 * Possibly called several times to iterate over an sk_buff and copy
 43 * data out of it.
 44 */
 45static size_t
 46xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
 47{
 48	if (len > desc->count)
 49		len = desc->count;
 50	if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
 51		return 0;
 52	desc->count -= len;
 53	desc->offset += len;
 54	return len;
 55}
 
 56
 57/**
 58 * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
 59 * @desc: sk_buff copy helper
 60 * @to: copy destination
 61 * @len: number of bytes to copy
 62 *
 63 * Same as skb_read_bits, but calculate a checksum at the same time.
 64 */
 65static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
 66{
 67	unsigned int pos;
 68	__wsum csum2;
 69
 70	if (len > desc->count)
 71		len = desc->count;
 72	pos = desc->offset;
 73	csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
 74	desc->csum = csum_block_add(desc->csum, csum2, pos);
 75	desc->count -= len;
 76	desc->offset += len;
 77	return len;
 78}
 79
 80/**
 81 * xdr_partial_copy_from_skb - copy data out of an skb
 82 * @xdr: target XDR buffer
 83 * @base: starting offset
 84 * @desc: sk_buff copy helper
 85 * @copy_actor: virtual method for copying data
 86 *
 87 */
 88static ssize_t
 89xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
 90{
 91	struct page	**ppage = xdr->pages;
 92	unsigned int	len, pglen = xdr->page_len;
 93	ssize_t		copied = 0;
 94	size_t		ret;
 95
 96	len = xdr->head[0].iov_len;
 97	if (base < len) {
 98		len -= base;
 99		ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
100		copied += ret;
101		if (ret != len || !desc->count)
102			goto out;
103		base = 0;
104	} else
105		base -= len;
106
107	if (unlikely(pglen == 0))
108		goto copy_tail;
109	if (unlikely(base >= pglen)) {
110		base -= pglen;
111		goto copy_tail;
112	}
113	if (base || xdr->page_base) {
114		pglen -= base;
115		base += xdr->page_base;
116		ppage += base >> PAGE_SHIFT;
117		base &= ~PAGE_MASK;
118	}
119	do {
120		char *kaddr;
121
122		/* ACL likes to be lazy in allocating pages - ACLs
123		 * are small by default but can get huge. */
124		if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) {
125			*ppage = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
126			if (unlikely(*ppage == NULL)) {
127				if (copied == 0)
128					copied = -ENOMEM;
129				goto out;
130			}
131		}
132
133		len = PAGE_SIZE;
134		kaddr = kmap_atomic(*ppage);
135		if (base) {
136			len -= base;
137			if (pglen < len)
138				len = pglen;
139			ret = copy_actor(desc, kaddr + base, len);
140			base = 0;
141		} else {
142			if (pglen < len)
143				len = pglen;
144			ret = copy_actor(desc, kaddr, len);
145		}
146		flush_dcache_page(*ppage);
147		kunmap_atomic(kaddr);
148		copied += ret;
149		if (ret != len || !desc->count)
150			goto out;
151		ppage++;
152	} while ((pglen -= len) != 0);
153copy_tail:
154	len = xdr->tail[0].iov_len;
155	if (base < len)
156		copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
157out:
158	return copied;
159}
 
160
161/**
162 * csum_partial_copy_to_xdr - checksum and copy data
163 * @xdr: target XDR buffer
164 * @skb: source skb
165 *
166 * We have set things up such that we perform the checksum of the UDP
167 * packet in parallel with the copies into the RPC client iovec.  -DaveM
168 */
169int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
170{
171	struct xdr_skb_reader	desc;
172
173	desc.skb = skb;
174	desc.offset = 0;
175	desc.count = skb->len - desc.offset;
176
177	if (skb_csum_unnecessary(skb))
178		goto no_checksum;
179
180	desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
181	if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
182		return -1;
183	if (desc.offset != skb->len) {
184		__wsum csum2;
185		csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
186		desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
187	}
188	if (desc.count)
189		return -1;
190	if (csum_fold(desc.csum))
191		return -1;
192	if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
193	    !skb->csum_complete_sw)
194		netdev_rx_csum_fault(skb->dev, skb);
195	return 0;
196no_checksum:
197	if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
198		return -1;
199	if (desc.count)
200		return -1;
201	return 0;
202}
203EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);
204
205static inline int xprt_sendmsg(struct socket *sock, struct msghdr *msg,
206			       size_t seek)
207{
208	if (seek)
209		iov_iter_advance(&msg->msg_iter, seek);
210	return sock_sendmsg(sock, msg);
211}
212
213static int xprt_send_kvec(struct socket *sock, struct msghdr *msg,
214			  struct kvec *vec, size_t seek)
215{
216	iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len);
217	return xprt_sendmsg(sock, msg, seek);
218}
219
220static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
221			      struct xdr_buf *xdr, size_t base)
222{
223	int err;
224
225	err = xdr_alloc_bvec(xdr, GFP_KERNEL);
226	if (err < 0)
227		return err;
228
229	iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr),
230		      xdr->page_len + xdr->page_base);
231	return xprt_sendmsg(sock, msg, base + xdr->page_base);
232}
233
234/* Common case:
235 *  - stream transport
236 *  - sending from byte 0 of the message
237 *  - the message is wholly contained in @xdr's head iovec
238 */
239static int xprt_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
240				 rpc_fraghdr marker, struct kvec *vec,
241				 size_t base)
242{
243	struct kvec iov[2] = {
244		[0] = {
245			.iov_base	= &marker,
246			.iov_len	= sizeof(marker)
247		},
248		[1] = *vec,
249	};
250	size_t len = iov[0].iov_len + iov[1].iov_len;
251
252	iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
253	return xprt_sendmsg(sock, msg, base);
254}
255
256/**
257 * xprt_sock_sendmsg - write an xdr_buf directly to a socket
258 * @sock: open socket to send on
259 * @msg: socket message metadata
260 * @xdr: xdr_buf containing this request
261 * @base: starting position in the buffer
262 * @marker: stream record marker field
263 * @sent_p: return the total number of bytes successfully queued for sending
264 *
265 * Return values:
266 *   On success, returns zero and fills in @sent_p.
267 *   %-ENOTSOCK if  @sock is not a struct socket.
268 */
269int xprt_sock_sendmsg(struct socket *sock, struct msghdr *msg,
270		      struct xdr_buf *xdr, unsigned int base,
271		      rpc_fraghdr marker, unsigned int *sent_p)
272{
273	unsigned int rmsize = marker ? sizeof(marker) : 0;
274	unsigned int remainder = rmsize + xdr->len - base;
275	unsigned int want;
276	int err = 0;
277
278	*sent_p = 0;
279
280	if (unlikely(!sock))
281		return -ENOTSOCK;
282
283	msg->msg_flags |= MSG_MORE;
284	want = xdr->head[0].iov_len + rmsize;
285	if (base < want) {
286		unsigned int len = want - base;
287
288		remainder -= len;
289		if (remainder == 0)
290			msg->msg_flags &= ~MSG_MORE;
291		if (rmsize)
292			err = xprt_send_rm_and_kvec(sock, msg, marker,
293						    &xdr->head[0], base);
294		else
295			err = xprt_send_kvec(sock, msg, &xdr->head[0], base);
296		if (remainder == 0 || err != len)
297			goto out;
298		*sent_p += err;
299		base = 0;
300	} else {
301		base -= want;
302	}
303
304	if (base < xdr->page_len) {
305		unsigned int len = xdr->page_len - base;
306
307		remainder -= len;
308		if (remainder == 0)
309			msg->msg_flags &= ~MSG_MORE;
310		err = xprt_send_pagedata(sock, msg, xdr, base);
311		if (remainder == 0 || err != len)
312			goto out;
313		*sent_p += err;
314		base = 0;
315	} else {
316		base -= xdr->page_len;
317	}
318
319	if (base >= xdr->tail[0].iov_len)
320		return 0;
321	msg->msg_flags &= ~MSG_MORE;
322	err = xprt_send_kvec(sock, msg, &xdr->tail[0], base);
323out:
324	if (err > 0) {
325		*sent_p += err;
326		err = 0;
327	}
328	return err;
329}
v4.6
 
  1/*
  2 * linux/net/sunrpc/socklib.c
  3 *
  4 * Common socket helper routines for RPC client and server
  5 *
  6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7 */
  8
  9#include <linux/compiler.h>
 10#include <linux/netdevice.h>
 11#include <linux/gfp.h>
 12#include <linux/skbuff.h>
 13#include <linux/types.h>
 14#include <linux/pagemap.h>
 15#include <linux/udp.h>
 
 16#include <linux/sunrpc/xdr.h>
 17#include <linux/export.h>
 18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19
 20/**
 21 * xdr_skb_read_bits - copy some data bits from skb to internal buffer
 22 * @desc: sk_buff copy helper
 23 * @to: copy destination
 24 * @len: number of bytes to copy
 25 *
 26 * Possibly called several times to iterate over an sk_buff and copy
 27 * data out of it.
 28 */
 29size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
 
 30{
 31	if (len > desc->count)
 32		len = desc->count;
 33	if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
 34		return 0;
 35	desc->count -= len;
 36	desc->offset += len;
 37	return len;
 38}
 39EXPORT_SYMBOL_GPL(xdr_skb_read_bits);
 40
 41/**
 42 * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
 43 * @desc: sk_buff copy helper
 44 * @to: copy destination
 45 * @len: number of bytes to copy
 46 *
 47 * Same as skb_read_bits, but calculate a checksum at the same time.
 48 */
 49static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
 50{
 51	unsigned int pos;
 52	__wsum csum2;
 53
 54	if (len > desc->count)
 55		len = desc->count;
 56	pos = desc->offset;
 57	csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
 58	desc->csum = csum_block_add(desc->csum, csum2, pos);
 59	desc->count -= len;
 60	desc->offset += len;
 61	return len;
 62}
 63
 64/**
 65 * xdr_partial_copy_from_skb - copy data out of an skb
 66 * @xdr: target XDR buffer
 67 * @base: starting offset
 68 * @desc: sk_buff copy helper
 69 * @copy_actor: virtual method for copying data
 70 *
 71 */
 72ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
 
 73{
 74	struct page	**ppage = xdr->pages;
 75	unsigned int	len, pglen = xdr->page_len;
 76	ssize_t		copied = 0;
 77	size_t		ret;
 78
 79	len = xdr->head[0].iov_len;
 80	if (base < len) {
 81		len -= base;
 82		ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
 83		copied += ret;
 84		if (ret != len || !desc->count)
 85			goto out;
 86		base = 0;
 87	} else
 88		base -= len;
 89
 90	if (unlikely(pglen == 0))
 91		goto copy_tail;
 92	if (unlikely(base >= pglen)) {
 93		base -= pglen;
 94		goto copy_tail;
 95	}
 96	if (base || xdr->page_base) {
 97		pglen -= base;
 98		base += xdr->page_base;
 99		ppage += base >> PAGE_SHIFT;
100		base &= ~PAGE_MASK;
101	}
102	do {
103		char *kaddr;
104
105		/* ACL likes to be lazy in allocating pages - ACLs
106		 * are small by default but can get huge. */
107		if (unlikely(*ppage == NULL)) {
108			*ppage = alloc_page(GFP_ATOMIC);
109			if (unlikely(*ppage == NULL)) {
110				if (copied == 0)
111					copied = -ENOMEM;
112				goto out;
113			}
114		}
115
116		len = PAGE_SIZE;
117		kaddr = kmap_atomic(*ppage);
118		if (base) {
119			len -= base;
120			if (pglen < len)
121				len = pglen;
122			ret = copy_actor(desc, kaddr + base, len);
123			base = 0;
124		} else {
125			if (pglen < len)
126				len = pglen;
127			ret = copy_actor(desc, kaddr, len);
128		}
129		flush_dcache_page(*ppage);
130		kunmap_atomic(kaddr);
131		copied += ret;
132		if (ret != len || !desc->count)
133			goto out;
134		ppage++;
135	} while ((pglen -= len) != 0);
136copy_tail:
137	len = xdr->tail[0].iov_len;
138	if (base < len)
139		copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
140out:
141	return copied;
142}
143EXPORT_SYMBOL_GPL(xdr_partial_copy_from_skb);
144
145/**
146 * csum_partial_copy_to_xdr - checksum and copy data
147 * @xdr: target XDR buffer
148 * @skb: source skb
149 *
150 * We have set things up such that we perform the checksum of the UDP
151 * packet in parallel with the copies into the RPC client iovec.  -DaveM
152 */
153int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
154{
155	struct xdr_skb_reader	desc;
156
157	desc.skb = skb;
158	desc.offset = sizeof(struct udphdr);
159	desc.count = skb->len - desc.offset;
160
161	if (skb_csum_unnecessary(skb))
162		goto no_checksum;
163
164	desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
165	if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
166		return -1;
167	if (desc.offset != skb->len) {
168		__wsum csum2;
169		csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
170		desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
171	}
172	if (desc.count)
173		return -1;
174	if (csum_fold(desc.csum))
175		return -1;
176	if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
177	    !skb->csum_complete_sw)
178		netdev_rx_csum_fault(skb->dev);
179	return 0;
180no_checksum:
181	if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
182		return -1;
183	if (desc.count)
184		return -1;
185	return 0;
186}
187EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);