Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * IP Payload Compression Protocol (IPComp) - RFC3173.
4 *
5 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
6 * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * Todo:
9 * - Tunable compression parameters.
10 * - Compression stats.
11 * - Adaptive compression.
12 */
13
14#include <linux/crypto.h>
15#include <linux/err.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/percpu.h>
20#include <linux/slab.h>
21#include <linux/smp.h>
22#include <linux/vmalloc.h>
23#include <net/ip.h>
24#include <net/ipcomp.h>
25#include <net/xfrm.h>
26
27struct ipcomp_tfms {
28 struct list_head list;
29 struct crypto_comp * __percpu *tfms;
30 int users;
31};
32
33static DEFINE_MUTEX(ipcomp_resource_mutex);
34static void * __percpu *ipcomp_scratches;
35static int ipcomp_scratch_users;
36static LIST_HEAD(ipcomp_tfms_list);
37
38static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
39{
40 struct ipcomp_data *ipcd = x->data;
41 const int plen = skb->len;
42 int dlen = IPCOMP_SCRATCH_SIZE;
43 const u8 *start = skb->data;
44 u8 *scratch = *this_cpu_ptr(ipcomp_scratches);
45 struct crypto_comp *tfm = *this_cpu_ptr(ipcd->tfms);
46 int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
47 int len;
48
49 if (err)
50 return err;
51
52 if (dlen < (plen + sizeof(struct ip_comp_hdr)))
53 return -EINVAL;
54
55 len = dlen - plen;
56 if (len > skb_tailroom(skb))
57 len = skb_tailroom(skb);
58
59 __skb_put(skb, len);
60
61 len += plen;
62 skb_copy_to_linear_data(skb, scratch, len);
63
64 while ((scratch += len, dlen -= len) > 0) {
65 skb_frag_t *frag;
66 struct page *page;
67
68 if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
69 return -EMSGSIZE;
70
71 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
72 page = alloc_page(GFP_ATOMIC);
73
74 if (!page)
75 return -ENOMEM;
76
77 len = PAGE_SIZE;
78 if (dlen < len)
79 len = dlen;
80
81 skb_frag_fill_page_desc(frag, page, 0, len);
82 memcpy(skb_frag_address(frag), scratch, len);
83
84 skb->truesize += len;
85 skb->data_len += len;
86 skb->len += len;
87
88 skb_shinfo(skb)->nr_frags++;
89 }
90
91 return 0;
92}
93
94int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
95{
96 int nexthdr;
97 int err = -ENOMEM;
98 struct ip_comp_hdr *ipch;
99
100 if (skb_linearize_cow(skb))
101 goto out;
102
103 skb->ip_summed = CHECKSUM_NONE;
104
105 /* Remove ipcomp header and decompress original payload */
106 ipch = (void *)skb->data;
107 nexthdr = ipch->nexthdr;
108
109 skb->transport_header = skb->network_header + sizeof(*ipch);
110 __skb_pull(skb, sizeof(*ipch));
111 err = ipcomp_decompress(x, skb);
112 if (err)
113 goto out;
114
115 err = nexthdr;
116
117out:
118 return err;
119}
120EXPORT_SYMBOL_GPL(ipcomp_input);
121
122static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
123{
124 struct ipcomp_data *ipcd = x->data;
125 const int plen = skb->len;
126 int dlen = IPCOMP_SCRATCH_SIZE;
127 u8 *start = skb->data;
128 struct crypto_comp *tfm;
129 u8 *scratch;
130 int err;
131
132 local_bh_disable();
133 scratch = *this_cpu_ptr(ipcomp_scratches);
134 tfm = *this_cpu_ptr(ipcd->tfms);
135 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
136 if (err)
137 goto out;
138
139 if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
140 err = -EMSGSIZE;
141 goto out;
142 }
143
144 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
145 local_bh_enable();
146
147 pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
148 return 0;
149
150out:
151 local_bh_enable();
152 return err;
153}
154
155int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
156{
157 int err;
158 struct ip_comp_hdr *ipch;
159 struct ipcomp_data *ipcd = x->data;
160
161 if (skb->len < ipcd->threshold) {
162 /* Don't bother compressing */
163 goto out_ok;
164 }
165
166 if (skb_linearize_cow(skb))
167 goto out_ok;
168
169 err = ipcomp_compress(x, skb);
170
171 if (err) {
172 goto out_ok;
173 }
174
175 /* Install ipcomp header, convert into ipcomp datagram. */
176 ipch = ip_comp_hdr(skb);
177 ipch->nexthdr = *skb_mac_header(skb);
178 ipch->flags = 0;
179 ipch->cpi = htons((u16 )ntohl(x->id.spi));
180 *skb_mac_header(skb) = IPPROTO_COMP;
181out_ok:
182 skb_push(skb, -skb_network_offset(skb));
183 return 0;
184}
185EXPORT_SYMBOL_GPL(ipcomp_output);
186
187static void ipcomp_free_scratches(void)
188{
189 int i;
190 void * __percpu *scratches;
191
192 if (--ipcomp_scratch_users)
193 return;
194
195 scratches = ipcomp_scratches;
196 if (!scratches)
197 return;
198
199 for_each_possible_cpu(i)
200 vfree(*per_cpu_ptr(scratches, i));
201
202 free_percpu(scratches);
203 ipcomp_scratches = NULL;
204}
205
206static void * __percpu *ipcomp_alloc_scratches(void)
207{
208 void * __percpu *scratches;
209 int i;
210
211 if (ipcomp_scratch_users++)
212 return ipcomp_scratches;
213
214 scratches = alloc_percpu(void *);
215 if (!scratches)
216 return NULL;
217
218 ipcomp_scratches = scratches;
219
220 for_each_possible_cpu(i) {
221 void *scratch;
222
223 scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i));
224 if (!scratch)
225 return NULL;
226 *per_cpu_ptr(scratches, i) = scratch;
227 }
228
229 return scratches;
230}
231
232static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
233{
234 struct ipcomp_tfms *pos;
235 int cpu;
236
237 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
238 if (pos->tfms == tfms)
239 break;
240 }
241
242 WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
243
244 if (--pos->users)
245 return;
246
247 list_del(&pos->list);
248 kfree(pos);
249
250 if (!tfms)
251 return;
252
253 for_each_possible_cpu(cpu) {
254 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
255 crypto_free_comp(tfm);
256 }
257 free_percpu(tfms);
258}
259
260static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
261{
262 struct ipcomp_tfms *pos;
263 struct crypto_comp * __percpu *tfms;
264 int cpu;
265
266
267 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
268 struct crypto_comp *tfm;
269
270 /* This can be any valid CPU ID so we don't need locking. */
271 tfm = this_cpu_read(*pos->tfms);
272
273 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
274 pos->users++;
275 return pos->tfms;
276 }
277 }
278
279 pos = kmalloc(sizeof(*pos), GFP_KERNEL);
280 if (!pos)
281 return NULL;
282
283 pos->users = 1;
284 INIT_LIST_HEAD(&pos->list);
285 list_add(&pos->list, &ipcomp_tfms_list);
286
287 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
288 if (!tfms)
289 goto error;
290
291 for_each_possible_cpu(cpu) {
292 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
293 CRYPTO_ALG_ASYNC);
294 if (IS_ERR(tfm))
295 goto error;
296 *per_cpu_ptr(tfms, cpu) = tfm;
297 }
298
299 return tfms;
300
301error:
302 ipcomp_free_tfms(tfms);
303 return NULL;
304}
305
306static void ipcomp_free_data(struct ipcomp_data *ipcd)
307{
308 if (ipcd->tfms)
309 ipcomp_free_tfms(ipcd->tfms);
310 ipcomp_free_scratches();
311}
312
313void ipcomp_destroy(struct xfrm_state *x)
314{
315 struct ipcomp_data *ipcd = x->data;
316 if (!ipcd)
317 return;
318 xfrm_state_delete_tunnel(x);
319 mutex_lock(&ipcomp_resource_mutex);
320 ipcomp_free_data(ipcd);
321 mutex_unlock(&ipcomp_resource_mutex);
322 kfree(ipcd);
323}
324EXPORT_SYMBOL_GPL(ipcomp_destroy);
325
326int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
327{
328 int err;
329 struct ipcomp_data *ipcd;
330 struct xfrm_algo_desc *calg_desc;
331
332 err = -EINVAL;
333 if (!x->calg) {
334 NL_SET_ERR_MSG(extack, "Missing required compression algorithm");
335 goto out;
336 }
337
338 if (x->encap) {
339 NL_SET_ERR_MSG(extack, "IPComp is not compatible with encapsulation");
340 goto out;
341 }
342
343 err = -ENOMEM;
344 ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
345 if (!ipcd)
346 goto out;
347
348 mutex_lock(&ipcomp_resource_mutex);
349 if (!ipcomp_alloc_scratches())
350 goto error;
351
352 ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
353 if (!ipcd->tfms)
354 goto error;
355 mutex_unlock(&ipcomp_resource_mutex);
356
357 calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
358 BUG_ON(!calg_desc);
359 ipcd->threshold = calg_desc->uinfo.comp.threshold;
360 x->data = ipcd;
361 err = 0;
362out:
363 return err;
364
365error:
366 ipcomp_free_data(ipcd);
367 mutex_unlock(&ipcomp_resource_mutex);
368 kfree(ipcd);
369 goto out;
370}
371EXPORT_SYMBOL_GPL(ipcomp_init_state);
372
373MODULE_LICENSE("GPL");
374MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
375MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
1/*
2 * IP Payload Compression Protocol (IPComp) - RFC3173.
3 *
4 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
5 * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * Todo:
13 * - Tunable compression parameters.
14 * - Compression stats.
15 * - Adaptive compression.
16 */
17
18#include <linux/crypto.h>
19#include <linux/err.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/percpu.h>
24#include <linux/slab.h>
25#include <linux/smp.h>
26#include <linux/vmalloc.h>
27#include <net/ip.h>
28#include <net/ipcomp.h>
29#include <net/xfrm.h>
30
31struct ipcomp_tfms {
32 struct list_head list;
33 struct crypto_comp * __percpu *tfms;
34 int users;
35};
36
37static DEFINE_MUTEX(ipcomp_resource_mutex);
38static void * __percpu *ipcomp_scratches;
39static int ipcomp_scratch_users;
40static LIST_HEAD(ipcomp_tfms_list);
41
42static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
43{
44 struct ipcomp_data *ipcd = x->data;
45 const int plen = skb->len;
46 int dlen = IPCOMP_SCRATCH_SIZE;
47 const u8 *start = skb->data;
48 const int cpu = get_cpu();
49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
51 int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
52 int len;
53
54 if (err)
55 goto out;
56
57 if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
58 err = -EINVAL;
59 goto out;
60 }
61
62 len = dlen - plen;
63 if (len > skb_tailroom(skb))
64 len = skb_tailroom(skb);
65
66 __skb_put(skb, len);
67
68 len += plen;
69 skb_copy_to_linear_data(skb, scratch, len);
70
71 while ((scratch += len, dlen -= len) > 0) {
72 skb_frag_t *frag;
73
74 err = -EMSGSIZE;
75 if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
76 goto out;
77
78 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
79 frag->page = alloc_page(GFP_ATOMIC);
80
81 err = -ENOMEM;
82 if (!frag->page)
83 goto out;
84
85 len = PAGE_SIZE;
86 if (dlen < len)
87 len = dlen;
88
89 memcpy(page_address(frag->page), scratch, len);
90
91 frag->page_offset = 0;
92 frag->size = len;
93 skb->truesize += len;
94 skb->data_len += len;
95 skb->len += len;
96
97 skb_shinfo(skb)->nr_frags++;
98 }
99
100 err = 0;
101
102out:
103 put_cpu();
104 return err;
105}
106
107int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
108{
109 int nexthdr;
110 int err = -ENOMEM;
111 struct ip_comp_hdr *ipch;
112
113 if (skb_linearize_cow(skb))
114 goto out;
115
116 skb->ip_summed = CHECKSUM_NONE;
117
118 /* Remove ipcomp header and decompress original payload */
119 ipch = (void *)skb->data;
120 nexthdr = ipch->nexthdr;
121
122 skb->transport_header = skb->network_header + sizeof(*ipch);
123 __skb_pull(skb, sizeof(*ipch));
124 err = ipcomp_decompress(x, skb);
125 if (err)
126 goto out;
127
128 err = nexthdr;
129
130out:
131 return err;
132}
133EXPORT_SYMBOL_GPL(ipcomp_input);
134
135static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
136{
137 struct ipcomp_data *ipcd = x->data;
138 const int plen = skb->len;
139 int dlen = IPCOMP_SCRATCH_SIZE;
140 u8 *start = skb->data;
141 const int cpu = get_cpu();
142 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
143 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
144 int err;
145
146 local_bh_disable();
147 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
148 local_bh_enable();
149 if (err)
150 goto out;
151
152 if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
153 err = -EMSGSIZE;
154 goto out;
155 }
156
157 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
158 put_cpu();
159
160 pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
161 return 0;
162
163out:
164 put_cpu();
165 return err;
166}
167
168int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
169{
170 int err;
171 struct ip_comp_hdr *ipch;
172 struct ipcomp_data *ipcd = x->data;
173
174 if (skb->len < ipcd->threshold) {
175 /* Don't bother compressing */
176 goto out_ok;
177 }
178
179 if (skb_linearize_cow(skb))
180 goto out_ok;
181
182 err = ipcomp_compress(x, skb);
183
184 if (err) {
185 goto out_ok;
186 }
187
188 /* Install ipcomp header, convert into ipcomp datagram. */
189 ipch = ip_comp_hdr(skb);
190 ipch->nexthdr = *skb_mac_header(skb);
191 ipch->flags = 0;
192 ipch->cpi = htons((u16 )ntohl(x->id.spi));
193 *skb_mac_header(skb) = IPPROTO_COMP;
194out_ok:
195 skb_push(skb, -skb_network_offset(skb));
196 return 0;
197}
198EXPORT_SYMBOL_GPL(ipcomp_output);
199
200static void ipcomp_free_scratches(void)
201{
202 int i;
203 void * __percpu *scratches;
204
205 if (--ipcomp_scratch_users)
206 return;
207
208 scratches = ipcomp_scratches;
209 if (!scratches)
210 return;
211
212 for_each_possible_cpu(i)
213 vfree(*per_cpu_ptr(scratches, i));
214
215 free_percpu(scratches);
216}
217
218static void * __percpu *ipcomp_alloc_scratches(void)
219{
220 int i;
221 void * __percpu *scratches;
222
223 if (ipcomp_scratch_users++)
224 return ipcomp_scratches;
225
226 scratches = alloc_percpu(void *);
227 if (!scratches)
228 return NULL;
229
230 ipcomp_scratches = scratches;
231
232 for_each_possible_cpu(i) {
233 void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
234 if (!scratch)
235 return NULL;
236 *per_cpu_ptr(scratches, i) = scratch;
237 }
238
239 return scratches;
240}
241
242static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
243{
244 struct ipcomp_tfms *pos;
245 int cpu;
246
247 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
248 if (pos->tfms == tfms)
249 break;
250 }
251
252 WARN_ON(!pos);
253
254 if (--pos->users)
255 return;
256
257 list_del(&pos->list);
258 kfree(pos);
259
260 if (!tfms)
261 return;
262
263 for_each_possible_cpu(cpu) {
264 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
265 crypto_free_comp(tfm);
266 }
267 free_percpu(tfms);
268}
269
270static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
271{
272 struct ipcomp_tfms *pos;
273 struct crypto_comp * __percpu *tfms;
274 int cpu;
275
276 /* This can be any valid CPU ID so we don't need locking. */
277 cpu = raw_smp_processor_id();
278
279 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
280 struct crypto_comp *tfm;
281
282 tfms = pos->tfms;
283 tfm = *per_cpu_ptr(tfms, cpu);
284
285 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
286 pos->users++;
287 return tfms;
288 }
289 }
290
291 pos = kmalloc(sizeof(*pos), GFP_KERNEL);
292 if (!pos)
293 return NULL;
294
295 pos->users = 1;
296 INIT_LIST_HEAD(&pos->list);
297 list_add(&pos->list, &ipcomp_tfms_list);
298
299 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
300 if (!tfms)
301 goto error;
302
303 for_each_possible_cpu(cpu) {
304 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
305 CRYPTO_ALG_ASYNC);
306 if (IS_ERR(tfm))
307 goto error;
308 *per_cpu_ptr(tfms, cpu) = tfm;
309 }
310
311 return tfms;
312
313error:
314 ipcomp_free_tfms(tfms);
315 return NULL;
316}
317
318static void ipcomp_free_data(struct ipcomp_data *ipcd)
319{
320 if (ipcd->tfms)
321 ipcomp_free_tfms(ipcd->tfms);
322 ipcomp_free_scratches();
323}
324
325void ipcomp_destroy(struct xfrm_state *x)
326{
327 struct ipcomp_data *ipcd = x->data;
328 if (!ipcd)
329 return;
330 xfrm_state_delete_tunnel(x);
331 mutex_lock(&ipcomp_resource_mutex);
332 ipcomp_free_data(ipcd);
333 mutex_unlock(&ipcomp_resource_mutex);
334 kfree(ipcd);
335}
336EXPORT_SYMBOL_GPL(ipcomp_destroy);
337
338int ipcomp_init_state(struct xfrm_state *x)
339{
340 int err;
341 struct ipcomp_data *ipcd;
342 struct xfrm_algo_desc *calg_desc;
343
344 err = -EINVAL;
345 if (!x->calg)
346 goto out;
347
348 if (x->encap)
349 goto out;
350
351 err = -ENOMEM;
352 ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
353 if (!ipcd)
354 goto out;
355
356 mutex_lock(&ipcomp_resource_mutex);
357 if (!ipcomp_alloc_scratches())
358 goto error;
359
360 ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
361 if (!ipcd->tfms)
362 goto error;
363 mutex_unlock(&ipcomp_resource_mutex);
364
365 calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
366 BUG_ON(!calg_desc);
367 ipcd->threshold = calg_desc->uinfo.comp.threshold;
368 x->data = ipcd;
369 err = 0;
370out:
371 return err;
372
373error:
374 ipcomp_free_data(ipcd);
375 mutex_unlock(&ipcomp_resource_mutex);
376 kfree(ipcd);
377 goto out;
378}
379EXPORT_SYMBOL_GPL(ipcomp_init_state);
380
381MODULE_LICENSE("GPL");
382MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
383MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");