Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * IP Payload Compression Protocol (IPComp) - RFC3173.
4 *
5 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
6 * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * Todo:
9 * - Tunable compression parameters.
10 * - Compression stats.
11 * - Adaptive compression.
12 */
13
14#include <linux/crypto.h>
15#include <linux/err.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/percpu.h>
20#include <linux/slab.h>
21#include <linux/smp.h>
22#include <linux/vmalloc.h>
23#include <net/ip.h>
24#include <net/ipcomp.h>
25#include <net/xfrm.h>
26
27struct ipcomp_tfms {
28 struct list_head list;
29 struct crypto_comp * __percpu *tfms;
30 int users;
31};
32
33static DEFINE_MUTEX(ipcomp_resource_mutex);
34static void * __percpu *ipcomp_scratches;
35static int ipcomp_scratch_users;
36static LIST_HEAD(ipcomp_tfms_list);
37
38static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
39{
40 struct ipcomp_data *ipcd = x->data;
41 const int plen = skb->len;
42 int dlen = IPCOMP_SCRATCH_SIZE;
43 const u8 *start = skb->data;
44 u8 *scratch = *this_cpu_ptr(ipcomp_scratches);
45 struct crypto_comp *tfm = *this_cpu_ptr(ipcd->tfms);
46 int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
47 int len;
48
49 if (err)
50 return err;
51
52 if (dlen < (plen + sizeof(struct ip_comp_hdr)))
53 return -EINVAL;
54
55 len = dlen - plen;
56 if (len > skb_tailroom(skb))
57 len = skb_tailroom(skb);
58
59 __skb_put(skb, len);
60
61 len += plen;
62 skb_copy_to_linear_data(skb, scratch, len);
63
64 while ((scratch += len, dlen -= len) > 0) {
65 skb_frag_t *frag;
66 struct page *page;
67
68 if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
69 return -EMSGSIZE;
70
71 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
72 page = alloc_page(GFP_ATOMIC);
73
74 if (!page)
75 return -ENOMEM;
76
77 __skb_frag_set_page(frag, page);
78
79 len = PAGE_SIZE;
80 if (dlen < len)
81 len = dlen;
82
83 skb_frag_off_set(frag, 0);
84 skb_frag_size_set(frag, len);
85 memcpy(skb_frag_address(frag), scratch, len);
86
87 skb->truesize += len;
88 skb->data_len += len;
89 skb->len += len;
90
91 skb_shinfo(skb)->nr_frags++;
92 }
93
94 return 0;
95}
96
97int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
98{
99 int nexthdr;
100 int err = -ENOMEM;
101 struct ip_comp_hdr *ipch;
102
103 if (skb_linearize_cow(skb))
104 goto out;
105
106 skb->ip_summed = CHECKSUM_NONE;
107
108 /* Remove ipcomp header and decompress original payload */
109 ipch = (void *)skb->data;
110 nexthdr = ipch->nexthdr;
111
112 skb->transport_header = skb->network_header + sizeof(*ipch);
113 __skb_pull(skb, sizeof(*ipch));
114 err = ipcomp_decompress(x, skb);
115 if (err)
116 goto out;
117
118 err = nexthdr;
119
120out:
121 return err;
122}
123EXPORT_SYMBOL_GPL(ipcomp_input);
124
125static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
126{
127 struct ipcomp_data *ipcd = x->data;
128 const int plen = skb->len;
129 int dlen = IPCOMP_SCRATCH_SIZE;
130 u8 *start = skb->data;
131 struct crypto_comp *tfm;
132 u8 *scratch;
133 int err;
134
135 local_bh_disable();
136 scratch = *this_cpu_ptr(ipcomp_scratches);
137 tfm = *this_cpu_ptr(ipcd->tfms);
138 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
139 if (err)
140 goto out;
141
142 if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
143 err = -EMSGSIZE;
144 goto out;
145 }
146
147 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
148 local_bh_enable();
149
150 pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
151 return 0;
152
153out:
154 local_bh_enable();
155 return err;
156}
157
158int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
159{
160 int err;
161 struct ip_comp_hdr *ipch;
162 struct ipcomp_data *ipcd = x->data;
163
164 if (skb->len < ipcd->threshold) {
165 /* Don't bother compressing */
166 goto out_ok;
167 }
168
169 if (skb_linearize_cow(skb))
170 goto out_ok;
171
172 err = ipcomp_compress(x, skb);
173
174 if (err) {
175 goto out_ok;
176 }
177
178 /* Install ipcomp header, convert into ipcomp datagram. */
179 ipch = ip_comp_hdr(skb);
180 ipch->nexthdr = *skb_mac_header(skb);
181 ipch->flags = 0;
182 ipch->cpi = htons((u16 )ntohl(x->id.spi));
183 *skb_mac_header(skb) = IPPROTO_COMP;
184out_ok:
185 skb_push(skb, -skb_network_offset(skb));
186 return 0;
187}
188EXPORT_SYMBOL_GPL(ipcomp_output);
189
190static void ipcomp_free_scratches(void)
191{
192 int i;
193 void * __percpu *scratches;
194
195 if (--ipcomp_scratch_users)
196 return;
197
198 scratches = ipcomp_scratches;
199 if (!scratches)
200 return;
201
202 for_each_possible_cpu(i)
203 vfree(*per_cpu_ptr(scratches, i));
204
205 free_percpu(scratches);
206 ipcomp_scratches = NULL;
207}
208
209static void * __percpu *ipcomp_alloc_scratches(void)
210{
211 void * __percpu *scratches;
212 int i;
213
214 if (ipcomp_scratch_users++)
215 return ipcomp_scratches;
216
217 scratches = alloc_percpu(void *);
218 if (!scratches)
219 return NULL;
220
221 ipcomp_scratches = scratches;
222
223 for_each_possible_cpu(i) {
224 void *scratch;
225
226 scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i));
227 if (!scratch)
228 return NULL;
229 *per_cpu_ptr(scratches, i) = scratch;
230 }
231
232 return scratches;
233}
234
235static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
236{
237 struct ipcomp_tfms *pos;
238 int cpu;
239
240 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
241 if (pos->tfms == tfms)
242 break;
243 }
244
245 WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
246
247 if (--pos->users)
248 return;
249
250 list_del(&pos->list);
251 kfree(pos);
252
253 if (!tfms)
254 return;
255
256 for_each_possible_cpu(cpu) {
257 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
258 crypto_free_comp(tfm);
259 }
260 free_percpu(tfms);
261}
262
263static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
264{
265 struct ipcomp_tfms *pos;
266 struct crypto_comp * __percpu *tfms;
267 int cpu;
268
269
270 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
271 struct crypto_comp *tfm;
272
273 /* This can be any valid CPU ID so we don't need locking. */
274 tfm = this_cpu_read(*pos->tfms);
275
276 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
277 pos->users++;
278 return pos->tfms;
279 }
280 }
281
282 pos = kmalloc(sizeof(*pos), GFP_KERNEL);
283 if (!pos)
284 return NULL;
285
286 pos->users = 1;
287 INIT_LIST_HEAD(&pos->list);
288 list_add(&pos->list, &ipcomp_tfms_list);
289
290 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
291 if (!tfms)
292 goto error;
293
294 for_each_possible_cpu(cpu) {
295 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
296 CRYPTO_ALG_ASYNC);
297 if (IS_ERR(tfm))
298 goto error;
299 *per_cpu_ptr(tfms, cpu) = tfm;
300 }
301
302 return tfms;
303
304error:
305 ipcomp_free_tfms(tfms);
306 return NULL;
307}
308
309static void ipcomp_free_data(struct ipcomp_data *ipcd)
310{
311 if (ipcd->tfms)
312 ipcomp_free_tfms(ipcd->tfms);
313 ipcomp_free_scratches();
314}
315
316void ipcomp_destroy(struct xfrm_state *x)
317{
318 struct ipcomp_data *ipcd = x->data;
319 if (!ipcd)
320 return;
321 xfrm_state_delete_tunnel(x);
322 mutex_lock(&ipcomp_resource_mutex);
323 ipcomp_free_data(ipcd);
324 mutex_unlock(&ipcomp_resource_mutex);
325 kfree(ipcd);
326}
327EXPORT_SYMBOL_GPL(ipcomp_destroy);
328
329int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
330{
331 int err;
332 struct ipcomp_data *ipcd;
333 struct xfrm_algo_desc *calg_desc;
334
335 err = -EINVAL;
336 if (!x->calg) {
337 NL_SET_ERR_MSG(extack, "Missing required compression algorithm");
338 goto out;
339 }
340
341 if (x->encap) {
342 NL_SET_ERR_MSG(extack, "IPComp is not compatible with encapsulation");
343 goto out;
344 }
345
346 err = -ENOMEM;
347 ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
348 if (!ipcd)
349 goto out;
350
351 mutex_lock(&ipcomp_resource_mutex);
352 if (!ipcomp_alloc_scratches())
353 goto error;
354
355 ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
356 if (!ipcd->tfms)
357 goto error;
358 mutex_unlock(&ipcomp_resource_mutex);
359
360 calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
361 BUG_ON(!calg_desc);
362 ipcd->threshold = calg_desc->uinfo.comp.threshold;
363 x->data = ipcd;
364 err = 0;
365out:
366 return err;
367
368error:
369 ipcomp_free_data(ipcd);
370 mutex_unlock(&ipcomp_resource_mutex);
371 kfree(ipcd);
372 goto out;
373}
374EXPORT_SYMBOL_GPL(ipcomp_init_state);
375
376MODULE_LICENSE("GPL");
377MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
378MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
1/*
2 * IP Payload Compression Protocol (IPComp) - RFC3173.
3 *
4 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
5 * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * Todo:
13 * - Tunable compression parameters.
14 * - Compression stats.
15 * - Adaptive compression.
16 */
17
18#include <linux/crypto.h>
19#include <linux/err.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/percpu.h>
24#include <linux/slab.h>
25#include <linux/smp.h>
26#include <linux/vmalloc.h>
27#include <net/ip.h>
28#include <net/ipcomp.h>
29#include <net/xfrm.h>
30
31struct ipcomp_tfms {
32 struct list_head list;
33 struct crypto_comp * __percpu *tfms;
34 int users;
35};
36
37static DEFINE_MUTEX(ipcomp_resource_mutex);
38static void * __percpu *ipcomp_scratches;
39static int ipcomp_scratch_users;
40static LIST_HEAD(ipcomp_tfms_list);
41
42static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
43{
44 struct ipcomp_data *ipcd = x->data;
45 const int plen = skb->len;
46 int dlen = IPCOMP_SCRATCH_SIZE;
47 const u8 *start = skb->data;
48 const int cpu = get_cpu();
49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
51 int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
52 int len;
53
54 if (err)
55 goto out;
56
57 if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
58 err = -EINVAL;
59 goto out;
60 }
61
62 len = dlen - plen;
63 if (len > skb_tailroom(skb))
64 len = skb_tailroom(skb);
65
66 __skb_put(skb, len);
67
68 len += plen;
69 skb_copy_to_linear_data(skb, scratch, len);
70
71 while ((scratch += len, dlen -= len) > 0) {
72 skb_frag_t *frag;
73 struct page *page;
74
75 err = -EMSGSIZE;
76 if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
77 goto out;
78
79 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
80 page = alloc_page(GFP_ATOMIC);
81
82 err = -ENOMEM;
83 if (!page)
84 goto out;
85
86 __skb_frag_set_page(frag, page);
87
88 len = PAGE_SIZE;
89 if (dlen < len)
90 len = dlen;
91
92 frag->page_offset = 0;
93 skb_frag_size_set(frag, len);
94 memcpy(skb_frag_address(frag), scratch, len);
95
96 skb->truesize += len;
97 skb->data_len += len;
98 skb->len += len;
99
100 skb_shinfo(skb)->nr_frags++;
101 }
102
103 err = 0;
104
105out:
106 put_cpu();
107 return err;
108}
109
110int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
111{
112 int nexthdr;
113 int err = -ENOMEM;
114 struct ip_comp_hdr *ipch;
115
116 if (skb_linearize_cow(skb))
117 goto out;
118
119 skb->ip_summed = CHECKSUM_NONE;
120
121 /* Remove ipcomp header and decompress original payload */
122 ipch = (void *)skb->data;
123 nexthdr = ipch->nexthdr;
124
125 skb->transport_header = skb->network_header + sizeof(*ipch);
126 __skb_pull(skb, sizeof(*ipch));
127 err = ipcomp_decompress(x, skb);
128 if (err)
129 goto out;
130
131 err = nexthdr;
132
133out:
134 return err;
135}
136EXPORT_SYMBOL_GPL(ipcomp_input);
137
138static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
139{
140 struct ipcomp_data *ipcd = x->data;
141 const int plen = skb->len;
142 int dlen = IPCOMP_SCRATCH_SIZE;
143 u8 *start = skb->data;
144 struct crypto_comp *tfm;
145 u8 *scratch;
146 int err;
147
148 local_bh_disable();
149 scratch = *this_cpu_ptr(ipcomp_scratches);
150 tfm = *this_cpu_ptr(ipcd->tfms);
151 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
152 if (err)
153 goto out;
154
155 if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
156 err = -EMSGSIZE;
157 goto out;
158 }
159
160 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
161 local_bh_enable();
162
163 pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
164 return 0;
165
166out:
167 local_bh_enable();
168 return err;
169}
170
171int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
172{
173 int err;
174 struct ip_comp_hdr *ipch;
175 struct ipcomp_data *ipcd = x->data;
176
177 if (skb->len < ipcd->threshold) {
178 /* Don't bother compressing */
179 goto out_ok;
180 }
181
182 if (skb_linearize_cow(skb))
183 goto out_ok;
184
185 err = ipcomp_compress(x, skb);
186
187 if (err) {
188 goto out_ok;
189 }
190
191 /* Install ipcomp header, convert into ipcomp datagram. */
192 ipch = ip_comp_hdr(skb);
193 ipch->nexthdr = *skb_mac_header(skb);
194 ipch->flags = 0;
195 ipch->cpi = htons((u16 )ntohl(x->id.spi));
196 *skb_mac_header(skb) = IPPROTO_COMP;
197out_ok:
198 skb_push(skb, -skb_network_offset(skb));
199 return 0;
200}
201EXPORT_SYMBOL_GPL(ipcomp_output);
202
203static void ipcomp_free_scratches(void)
204{
205 int i;
206 void * __percpu *scratches;
207
208 if (--ipcomp_scratch_users)
209 return;
210
211 scratches = ipcomp_scratches;
212 if (!scratches)
213 return;
214
215 for_each_possible_cpu(i)
216 vfree(*per_cpu_ptr(scratches, i));
217
218 free_percpu(scratches);
219}
220
221static void * __percpu *ipcomp_alloc_scratches(void)
222{
223 void * __percpu *scratches;
224 int i;
225
226 if (ipcomp_scratch_users++)
227 return ipcomp_scratches;
228
229 scratches = alloc_percpu(void *);
230 if (!scratches)
231 return NULL;
232
233 ipcomp_scratches = scratches;
234
235 for_each_possible_cpu(i) {
236 void *scratch;
237
238 scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i));
239 if (!scratch)
240 return NULL;
241 *per_cpu_ptr(scratches, i) = scratch;
242 }
243
244 return scratches;
245}
246
247static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
248{
249 struct ipcomp_tfms *pos;
250 int cpu;
251
252 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
253 if (pos->tfms == tfms)
254 break;
255 }
256
257 WARN_ON(!pos);
258
259 if (--pos->users)
260 return;
261
262 list_del(&pos->list);
263 kfree(pos);
264
265 if (!tfms)
266 return;
267
268 for_each_possible_cpu(cpu) {
269 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
270 crypto_free_comp(tfm);
271 }
272 free_percpu(tfms);
273}
274
275static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
276{
277 struct ipcomp_tfms *pos;
278 struct crypto_comp * __percpu *tfms;
279 int cpu;
280
281
282 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
283 struct crypto_comp *tfm;
284
285 /* This can be any valid CPU ID so we don't need locking. */
286 tfm = __this_cpu_read(*pos->tfms);
287
288 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
289 pos->users++;
290 return pos->tfms;
291 }
292 }
293
294 pos = kmalloc(sizeof(*pos), GFP_KERNEL);
295 if (!pos)
296 return NULL;
297
298 pos->users = 1;
299 INIT_LIST_HEAD(&pos->list);
300 list_add(&pos->list, &ipcomp_tfms_list);
301
302 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
303 if (!tfms)
304 goto error;
305
306 for_each_possible_cpu(cpu) {
307 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
308 CRYPTO_ALG_ASYNC);
309 if (IS_ERR(tfm))
310 goto error;
311 *per_cpu_ptr(tfms, cpu) = tfm;
312 }
313
314 return tfms;
315
316error:
317 ipcomp_free_tfms(tfms);
318 return NULL;
319}
320
321static void ipcomp_free_data(struct ipcomp_data *ipcd)
322{
323 if (ipcd->tfms)
324 ipcomp_free_tfms(ipcd->tfms);
325 ipcomp_free_scratches();
326}
327
328void ipcomp_destroy(struct xfrm_state *x)
329{
330 struct ipcomp_data *ipcd = x->data;
331 if (!ipcd)
332 return;
333 xfrm_state_delete_tunnel(x);
334 mutex_lock(&ipcomp_resource_mutex);
335 ipcomp_free_data(ipcd);
336 mutex_unlock(&ipcomp_resource_mutex);
337 kfree(ipcd);
338}
339EXPORT_SYMBOL_GPL(ipcomp_destroy);
340
341int ipcomp_init_state(struct xfrm_state *x)
342{
343 int err;
344 struct ipcomp_data *ipcd;
345 struct xfrm_algo_desc *calg_desc;
346
347 err = -EINVAL;
348 if (!x->calg)
349 goto out;
350
351 if (x->encap)
352 goto out;
353
354 err = -ENOMEM;
355 ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
356 if (!ipcd)
357 goto out;
358
359 mutex_lock(&ipcomp_resource_mutex);
360 if (!ipcomp_alloc_scratches())
361 goto error;
362
363 ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
364 if (!ipcd->tfms)
365 goto error;
366 mutex_unlock(&ipcomp_resource_mutex);
367
368 calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
369 BUG_ON(!calg_desc);
370 ipcd->threshold = calg_desc->uinfo.comp.threshold;
371 x->data = ipcd;
372 err = 0;
373out:
374 return err;
375
376error:
377 ipcomp_free_data(ipcd);
378 mutex_unlock(&ipcomp_resource_mutex);
379 kfree(ipcd);
380 goto out;
381}
382EXPORT_SYMBOL_GPL(ipcomp_init_state);
383
384MODULE_LICENSE("GPL");
385MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
386MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");