Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * IP Payload Compression Protocol (IPComp) - RFC3173.
  4 *
  5 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
  6 * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au>
  7 *
 
 
 
 
 
  8 * Todo:
  9 *   - Tunable compression parameters.
 10 *   - Compression stats.
 11 *   - Adaptive compression.
 12 */
 13
 14#include <linux/crypto.h>
 15#include <linux/err.h>
 16#include <linux/list.h>
 17#include <linux/module.h>
 18#include <linux/mutex.h>
 19#include <linux/percpu.h>
 20#include <linux/slab.h>
 21#include <linux/smp.h>
 22#include <linux/vmalloc.h>
 23#include <net/ip.h>
 24#include <net/ipcomp.h>
 25#include <net/xfrm.h>
 26
 27struct ipcomp_tfms {
 28	struct list_head list;
 29	struct crypto_comp * __percpu *tfms;
 30	int users;
 31};
 32
 33static DEFINE_MUTEX(ipcomp_resource_mutex);
 34static void * __percpu *ipcomp_scratches;
 35static int ipcomp_scratch_users;
 36static LIST_HEAD(ipcomp_tfms_list);
 37
 38static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
 39{
 40	struct ipcomp_data *ipcd = x->data;
 41	const int plen = skb->len;
 42	int dlen = IPCOMP_SCRATCH_SIZE;
 43	const u8 *start = skb->data;
 44	u8 *scratch = *this_cpu_ptr(ipcomp_scratches);
 45	struct crypto_comp *tfm = *this_cpu_ptr(ipcd->tfms);
 
 46	int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
 47	int len;
 48
 49	if (err)
 50		return err;
 51
 52	if (dlen < (plen + sizeof(struct ip_comp_hdr)))
 53		return -EINVAL;
 
 
 54
 55	len = dlen - plen;
 56	if (len > skb_tailroom(skb))
 57		len = skb_tailroom(skb);
 58
 59	__skb_put(skb, len);
 60
 61	len += plen;
 62	skb_copy_to_linear_data(skb, scratch, len);
 63
 64	while ((scratch += len, dlen -= len) > 0) {
 65		skb_frag_t *frag;
 66		struct page *page;
 67
 
 68		if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
 69			return -EMSGSIZE;
 70
 71		frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
 72		page = alloc_page(GFP_ATOMIC);
 73
 74		if (!page)
 75			return -ENOMEM;
 76
 77		__skb_frag_set_page(frag, page);
 
 
 78
 79		len = PAGE_SIZE;
 80		if (dlen < len)
 81			len = dlen;
 82
 83		skb_frag_off_set(frag, 0);
 84		skb_frag_size_set(frag, len);
 85		memcpy(skb_frag_address(frag), scratch, len);
 86
 
 
 87		skb->truesize += len;
 88		skb->data_len += len;
 89		skb->len += len;
 90
 91		skb_shinfo(skb)->nr_frags++;
 92	}
 93
 94	return 0;
 
 
 
 
 95}
 96
 97int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
 98{
 99	int nexthdr;
100	int err = -ENOMEM;
101	struct ip_comp_hdr *ipch;
102
103	if (skb_linearize_cow(skb))
104		goto out;
105
106	skb->ip_summed = CHECKSUM_NONE;
107
108	/* Remove ipcomp header and decompress original payload */
109	ipch = (void *)skb->data;
110	nexthdr = ipch->nexthdr;
111
112	skb->transport_header = skb->network_header + sizeof(*ipch);
113	__skb_pull(skb, sizeof(*ipch));
114	err = ipcomp_decompress(x, skb);
115	if (err)
116		goto out;
117
118	err = nexthdr;
119
120out:
121	return err;
122}
123EXPORT_SYMBOL_GPL(ipcomp_input);
124
125static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
126{
127	struct ipcomp_data *ipcd = x->data;
128	const int plen = skb->len;
129	int dlen = IPCOMP_SCRATCH_SIZE;
130	u8 *start = skb->data;
131	struct crypto_comp *tfm;
132	u8 *scratch;
 
133	int err;
134
135	local_bh_disable();
136	scratch = *this_cpu_ptr(ipcomp_scratches);
137	tfm = *this_cpu_ptr(ipcd->tfms);
138	err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
 
139	if (err)
140		goto out;
141
142	if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
143		err = -EMSGSIZE;
144		goto out;
145	}
146
147	memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
148	local_bh_enable();
149
150	pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
151	return 0;
152
153out:
154	local_bh_enable();
155	return err;
156}
157
158int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
159{
160	int err;
161	struct ip_comp_hdr *ipch;
162	struct ipcomp_data *ipcd = x->data;
163
164	if (skb->len < ipcd->threshold) {
165		/* Don't bother compressing */
166		goto out_ok;
167	}
168
169	if (skb_linearize_cow(skb))
170		goto out_ok;
171
172	err = ipcomp_compress(x, skb);
173
174	if (err) {
175		goto out_ok;
176	}
177
178	/* Install ipcomp header, convert into ipcomp datagram. */
179	ipch = ip_comp_hdr(skb);
180	ipch->nexthdr = *skb_mac_header(skb);
181	ipch->flags = 0;
182	ipch->cpi = htons((u16 )ntohl(x->id.spi));
183	*skb_mac_header(skb) = IPPROTO_COMP;
184out_ok:
185	skb_push(skb, -skb_network_offset(skb));
186	return 0;
187}
188EXPORT_SYMBOL_GPL(ipcomp_output);
189
190static void ipcomp_free_scratches(void)
191{
192	int i;
193	void * __percpu *scratches;
194
195	if (--ipcomp_scratch_users)
196		return;
197
198	scratches = ipcomp_scratches;
199	if (!scratches)
200		return;
201
202	for_each_possible_cpu(i)
203		vfree(*per_cpu_ptr(scratches, i));
204
205	free_percpu(scratches);
206}
207
208static void * __percpu *ipcomp_alloc_scratches(void)
209{
210	void * __percpu *scratches;
211	int i;
 
212
213	if (ipcomp_scratch_users++)
214		return ipcomp_scratches;
215
216	scratches = alloc_percpu(void *);
217	if (!scratches)
218		return NULL;
219
220	ipcomp_scratches = scratches;
221
222	for_each_possible_cpu(i) {
223		void *scratch;
224
225		scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i));
226		if (!scratch)
227			return NULL;
228		*per_cpu_ptr(scratches, i) = scratch;
229	}
230
231	return scratches;
232}
233
234static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
235{
236	struct ipcomp_tfms *pos;
237	int cpu;
238
239	list_for_each_entry(pos, &ipcomp_tfms_list, list) {
240		if (pos->tfms == tfms)
241			break;
242	}
243
244	WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
245
246	if (--pos->users)
247		return;
248
249	list_del(&pos->list);
250	kfree(pos);
251
252	if (!tfms)
253		return;
254
255	for_each_possible_cpu(cpu) {
256		struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
257		crypto_free_comp(tfm);
258	}
259	free_percpu(tfms);
260}
261
262static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
263{
264	struct ipcomp_tfms *pos;
265	struct crypto_comp * __percpu *tfms;
266	int cpu;
267
 
 
268
269	list_for_each_entry(pos, &ipcomp_tfms_list, list) {
270		struct crypto_comp *tfm;
271
272		/* This can be any valid CPU ID so we don't need locking. */
273		tfm = this_cpu_read(*pos->tfms);
274
275		if (!strcmp(crypto_comp_name(tfm), alg_name)) {
276			pos->users++;
277			return pos->tfms;
278		}
279	}
280
281	pos = kmalloc(sizeof(*pos), GFP_KERNEL);
282	if (!pos)
283		return NULL;
284
285	pos->users = 1;
286	INIT_LIST_HEAD(&pos->list);
287	list_add(&pos->list, &ipcomp_tfms_list);
288
289	pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
290	if (!tfms)
291		goto error;
292
293	for_each_possible_cpu(cpu) {
294		struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
295							    CRYPTO_ALG_ASYNC);
296		if (IS_ERR(tfm))
297			goto error;
298		*per_cpu_ptr(tfms, cpu) = tfm;
299	}
300
301	return tfms;
302
303error:
304	ipcomp_free_tfms(tfms);
305	return NULL;
306}
307
308static void ipcomp_free_data(struct ipcomp_data *ipcd)
309{
310	if (ipcd->tfms)
311		ipcomp_free_tfms(ipcd->tfms);
312	ipcomp_free_scratches();
313}
314
315void ipcomp_destroy(struct xfrm_state *x)
316{
317	struct ipcomp_data *ipcd = x->data;
318	if (!ipcd)
319		return;
320	xfrm_state_delete_tunnel(x);
321	mutex_lock(&ipcomp_resource_mutex);
322	ipcomp_free_data(ipcd);
323	mutex_unlock(&ipcomp_resource_mutex);
324	kfree(ipcd);
325}
326EXPORT_SYMBOL_GPL(ipcomp_destroy);
327
328int ipcomp_init_state(struct xfrm_state *x)
329{
330	int err;
331	struct ipcomp_data *ipcd;
332	struct xfrm_algo_desc *calg_desc;
333
334	err = -EINVAL;
335	if (!x->calg)
336		goto out;
337
338	if (x->encap)
339		goto out;
340
341	err = -ENOMEM;
342	ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
343	if (!ipcd)
344		goto out;
345
346	mutex_lock(&ipcomp_resource_mutex);
347	if (!ipcomp_alloc_scratches())
348		goto error;
349
350	ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
351	if (!ipcd->tfms)
352		goto error;
353	mutex_unlock(&ipcomp_resource_mutex);
354
355	calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
356	BUG_ON(!calg_desc);
357	ipcd->threshold = calg_desc->uinfo.comp.threshold;
358	x->data = ipcd;
359	err = 0;
360out:
361	return err;
362
363error:
364	ipcomp_free_data(ipcd);
365	mutex_unlock(&ipcomp_resource_mutex);
366	kfree(ipcd);
367	goto out;
368}
369EXPORT_SYMBOL_GPL(ipcomp_init_state);
370
371MODULE_LICENSE("GPL");
372MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
373MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
v3.1
 
  1/*
  2 * IP Payload Compression Protocol (IPComp) - RFC3173.
  3 *
  4 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
  5 * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au>
  6 *
  7 * This program is free software; you can redistribute it and/or modify it
  8 * under the terms of the GNU General Public License as published by the Free
  9 * Software Foundation; either version 2 of the License, or (at your option)
 10 * any later version.
 11 *
 12 * Todo:
 13 *   - Tunable compression parameters.
 14 *   - Compression stats.
 15 *   - Adaptive compression.
 16 */
 17
 18#include <linux/crypto.h>
 19#include <linux/err.h>
 20#include <linux/list.h>
 21#include <linux/module.h>
 22#include <linux/mutex.h>
 23#include <linux/percpu.h>
 24#include <linux/slab.h>
 25#include <linux/smp.h>
 26#include <linux/vmalloc.h>
 27#include <net/ip.h>
 28#include <net/ipcomp.h>
 29#include <net/xfrm.h>
 30
 31struct ipcomp_tfms {
 32	struct list_head list;
 33	struct crypto_comp * __percpu *tfms;
 34	int users;
 35};
 36
 37static DEFINE_MUTEX(ipcomp_resource_mutex);
 38static void * __percpu *ipcomp_scratches;
 39static int ipcomp_scratch_users;
 40static LIST_HEAD(ipcomp_tfms_list);
 41
 42static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
 43{
 44	struct ipcomp_data *ipcd = x->data;
 45	const int plen = skb->len;
 46	int dlen = IPCOMP_SCRATCH_SIZE;
 47	const u8 *start = skb->data;
 48	const int cpu = get_cpu();
 49	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
 50	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
 51	int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
 52	int len;
 53
 54	if (err)
 55		goto out;
 56
 57	if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
 58		err = -EINVAL;
 59		goto out;
 60	}
 61
 62	len = dlen - plen;
 63	if (len > skb_tailroom(skb))
 64		len = skb_tailroom(skb);
 65
 66	__skb_put(skb, len);
 67
 68	len += plen;
 69	skb_copy_to_linear_data(skb, scratch, len);
 70
 71	while ((scratch += len, dlen -= len) > 0) {
 72		skb_frag_t *frag;
 
 73
 74		err = -EMSGSIZE;
 75		if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
 76			goto out;
 77
 78		frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
 79		frag->page = alloc_page(GFP_ATOMIC);
 
 
 
 80
 81		err = -ENOMEM;
 82		if (!frag->page)
 83			goto out;
 84
 85		len = PAGE_SIZE;
 86		if (dlen < len)
 87			len = dlen;
 88
 89		memcpy(page_address(frag->page), scratch, len);
 
 
 90
 91		frag->page_offset = 0;
 92		frag->size = len;
 93		skb->truesize += len;
 94		skb->data_len += len;
 95		skb->len += len;
 96
 97		skb_shinfo(skb)->nr_frags++;
 98	}
 99
100	err = 0;
101
102out:
103	put_cpu();
104	return err;
105}
106
107int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
108{
109	int nexthdr;
110	int err = -ENOMEM;
111	struct ip_comp_hdr *ipch;
112
113	if (skb_linearize_cow(skb))
114		goto out;
115
116	skb->ip_summed = CHECKSUM_NONE;
117
118	/* Remove ipcomp header and decompress original payload */
119	ipch = (void *)skb->data;
120	nexthdr = ipch->nexthdr;
121
122	skb->transport_header = skb->network_header + sizeof(*ipch);
123	__skb_pull(skb, sizeof(*ipch));
124	err = ipcomp_decompress(x, skb);
125	if (err)
126		goto out;
127
128	err = nexthdr;
129
130out:
131	return err;
132}
133EXPORT_SYMBOL_GPL(ipcomp_input);
134
135static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
136{
137	struct ipcomp_data *ipcd = x->data;
138	const int plen = skb->len;
139	int dlen = IPCOMP_SCRATCH_SIZE;
140	u8 *start = skb->data;
141	const int cpu = get_cpu();
142	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
143	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
144	int err;
145
146	local_bh_disable();
 
 
147	err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
148	local_bh_enable();
149	if (err)
150		goto out;
151
152	if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
153		err = -EMSGSIZE;
154		goto out;
155	}
156
157	memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
158	put_cpu();
159
160	pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
161	return 0;
162
163out:
164	put_cpu();
165	return err;
166}
167
168int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
169{
170	int err;
171	struct ip_comp_hdr *ipch;
172	struct ipcomp_data *ipcd = x->data;
173
174	if (skb->len < ipcd->threshold) {
175		/* Don't bother compressing */
176		goto out_ok;
177	}
178
179	if (skb_linearize_cow(skb))
180		goto out_ok;
181
182	err = ipcomp_compress(x, skb);
183
184	if (err) {
185		goto out_ok;
186	}
187
188	/* Install ipcomp header, convert into ipcomp datagram. */
189	ipch = ip_comp_hdr(skb);
190	ipch->nexthdr = *skb_mac_header(skb);
191	ipch->flags = 0;
192	ipch->cpi = htons((u16 )ntohl(x->id.spi));
193	*skb_mac_header(skb) = IPPROTO_COMP;
194out_ok:
195	skb_push(skb, -skb_network_offset(skb));
196	return 0;
197}
198EXPORT_SYMBOL_GPL(ipcomp_output);
199
200static void ipcomp_free_scratches(void)
201{
202	int i;
203	void * __percpu *scratches;
204
205	if (--ipcomp_scratch_users)
206		return;
207
208	scratches = ipcomp_scratches;
209	if (!scratches)
210		return;
211
212	for_each_possible_cpu(i)
213		vfree(*per_cpu_ptr(scratches, i));
214
215	free_percpu(scratches);
216}
217
218static void * __percpu *ipcomp_alloc_scratches(void)
219{
 
220	int i;
221	void * __percpu *scratches;
222
223	if (ipcomp_scratch_users++)
224		return ipcomp_scratches;
225
226	scratches = alloc_percpu(void *);
227	if (!scratches)
228		return NULL;
229
230	ipcomp_scratches = scratches;
231
232	for_each_possible_cpu(i) {
233		void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
 
 
234		if (!scratch)
235			return NULL;
236		*per_cpu_ptr(scratches, i) = scratch;
237	}
238
239	return scratches;
240}
241
242static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
243{
244	struct ipcomp_tfms *pos;
245	int cpu;
246
247	list_for_each_entry(pos, &ipcomp_tfms_list, list) {
248		if (pos->tfms == tfms)
249			break;
250	}
251
252	WARN_ON(!pos);
253
254	if (--pos->users)
255		return;
256
257	list_del(&pos->list);
258	kfree(pos);
259
260	if (!tfms)
261		return;
262
263	for_each_possible_cpu(cpu) {
264		struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
265		crypto_free_comp(tfm);
266	}
267	free_percpu(tfms);
268}
269
270static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
271{
272	struct ipcomp_tfms *pos;
273	struct crypto_comp * __percpu *tfms;
274	int cpu;
275
276	/* This can be any valid CPU ID so we don't need locking. */
277	cpu = raw_smp_processor_id();
278
279	list_for_each_entry(pos, &ipcomp_tfms_list, list) {
280		struct crypto_comp *tfm;
281
282		tfms = pos->tfms;
283		tfm = *per_cpu_ptr(tfms, cpu);
284
285		if (!strcmp(crypto_comp_name(tfm), alg_name)) {
286			pos->users++;
287			return tfms;
288		}
289	}
290
291	pos = kmalloc(sizeof(*pos), GFP_KERNEL);
292	if (!pos)
293		return NULL;
294
295	pos->users = 1;
296	INIT_LIST_HEAD(&pos->list);
297	list_add(&pos->list, &ipcomp_tfms_list);
298
299	pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
300	if (!tfms)
301		goto error;
302
303	for_each_possible_cpu(cpu) {
304		struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
305							    CRYPTO_ALG_ASYNC);
306		if (IS_ERR(tfm))
307			goto error;
308		*per_cpu_ptr(tfms, cpu) = tfm;
309	}
310
311	return tfms;
312
313error:
314	ipcomp_free_tfms(tfms);
315	return NULL;
316}
317
318static void ipcomp_free_data(struct ipcomp_data *ipcd)
319{
320	if (ipcd->tfms)
321		ipcomp_free_tfms(ipcd->tfms);
322	ipcomp_free_scratches();
323}
324
325void ipcomp_destroy(struct xfrm_state *x)
326{
327	struct ipcomp_data *ipcd = x->data;
328	if (!ipcd)
329		return;
330	xfrm_state_delete_tunnel(x);
331	mutex_lock(&ipcomp_resource_mutex);
332	ipcomp_free_data(ipcd);
333	mutex_unlock(&ipcomp_resource_mutex);
334	kfree(ipcd);
335}
336EXPORT_SYMBOL_GPL(ipcomp_destroy);
337
338int ipcomp_init_state(struct xfrm_state *x)
339{
340	int err;
341	struct ipcomp_data *ipcd;
342	struct xfrm_algo_desc *calg_desc;
343
344	err = -EINVAL;
345	if (!x->calg)
346		goto out;
347
348	if (x->encap)
349		goto out;
350
351	err = -ENOMEM;
352	ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
353	if (!ipcd)
354		goto out;
355
356	mutex_lock(&ipcomp_resource_mutex);
357	if (!ipcomp_alloc_scratches())
358		goto error;
359
360	ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
361	if (!ipcd->tfms)
362		goto error;
363	mutex_unlock(&ipcomp_resource_mutex);
364
365	calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
366	BUG_ON(!calg_desc);
367	ipcd->threshold = calg_desc->uinfo.comp.threshold;
368	x->data = ipcd;
369	err = 0;
370out:
371	return err;
372
373error:
374	ipcomp_free_data(ipcd);
375	mutex_unlock(&ipcomp_resource_mutex);
376	kfree(ipcd);
377	goto out;
378}
379EXPORT_SYMBOL_GPL(ipcomp_init_state);
380
381MODULE_LICENSE("GPL");
382MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
383MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");