Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7#include "compress.h"
8#include <linux/module.h>
9#include <linux/lz4.h>
10
11#ifndef LZ4_DISTANCE_MAX /* history window size */
12#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
13#endif
14
15#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
16#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
17#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
18#endif
19
20struct z_erofs_decompressor {
21 /*
22 * if destpages have sparsed pages, fill them with bounce pages.
23 * it also check whether destpages indicate continuous physical memory.
24 */
25 int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
26 struct list_head *pagepool);
27 int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
28 char *name;
29};
30
31static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
32 struct list_head *pagepool)
33{
34 const unsigned int nr =
35 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
36 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
37 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
38 BITS_PER_LONG)] = { 0 };
39 void *kaddr = NULL;
40 unsigned int i, j, top;
41
42 top = 0;
43 for (i = j = 0; i < nr; ++i, ++j) {
44 struct page *const page = rq->out[i];
45 struct page *victim;
46
47 if (j >= LZ4_MAX_DISTANCE_PAGES)
48 j = 0;
49
50 /* 'valid' bounced can only be tested after a complete round */
51 if (test_bit(j, bounced)) {
52 DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES);
53 DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES);
54 availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES];
55 }
56
57 if (page) {
58 __clear_bit(j, bounced);
59 if (kaddr) {
60 if (kaddr + PAGE_SIZE == page_address(page))
61 kaddr += PAGE_SIZE;
62 else
63 kaddr = NULL;
64 } else if (!i) {
65 kaddr = page_address(page);
66 }
67 continue;
68 }
69 kaddr = NULL;
70 __set_bit(j, bounced);
71
72 if (top) {
73 victim = availables[--top];
74 get_page(victim);
75 } else {
76 victim = erofs_allocpage(pagepool, GFP_KERNEL);
77 if (!victim)
78 return -ENOMEM;
79 victim->mapping = Z_EROFS_MAPPING_STAGING;
80 }
81 rq->out[i] = victim;
82 }
83 return kaddr ? 1 : 0;
84}
85
86static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
87 u8 *src, unsigned int pageofs_in)
88{
89 /*
90 * if in-place decompression is ongoing, those decompressed
91 * pages should be copied in order to avoid being overlapped.
92 */
93 struct page **in = rq->in;
94 u8 *const tmp = erofs_get_pcpubuf(0);
95 u8 *tmpp = tmp;
96 unsigned int inlen = rq->inputsize - pageofs_in;
97 unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
98
99 while (tmpp < tmp + inlen) {
100 if (!src)
101 src = kmap_atomic(*in);
102 memcpy(tmpp, src + pageofs_in, count);
103 kunmap_atomic(src);
104 src = NULL;
105 tmpp += count;
106 pageofs_in = 0;
107 count = PAGE_SIZE;
108 ++in;
109 }
110 return tmp;
111}
112
113static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
114{
115 unsigned int inputmargin, inlen;
116 u8 *src;
117 bool copied, support_0padding;
118 int ret;
119
120 if (rq->inputsize > PAGE_SIZE)
121 return -EOPNOTSUPP;
122
123 src = kmap_atomic(*rq->in);
124 inputmargin = 0;
125 support_0padding = false;
126
127 /* decompression inplace is only safe when 0padding is enabled */
128 if (EROFS_SB(rq->sb)->feature_incompat &
129 EROFS_FEATURE_INCOMPAT_LZ4_0PADDING) {
130 support_0padding = true;
131
132 while (!src[inputmargin & ~PAGE_MASK])
133 if (!(++inputmargin & ~PAGE_MASK))
134 break;
135
136 if (inputmargin >= rq->inputsize) {
137 kunmap_atomic(src);
138 return -EIO;
139 }
140 }
141
142 copied = false;
143 inlen = rq->inputsize - inputmargin;
144 if (rq->inplace_io) {
145 const uint oend = (rq->pageofs_out +
146 rq->outputsize) & ~PAGE_MASK;
147 const uint nr = PAGE_ALIGN(rq->pageofs_out +
148 rq->outputsize) >> PAGE_SHIFT;
149
150 if (rq->partial_decoding || !support_0padding ||
151 rq->out[nr - 1] != rq->in[0] ||
152 rq->inputsize - oend <
153 LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
154 src = generic_copy_inplace_data(rq, src, inputmargin);
155 inputmargin = 0;
156 copied = true;
157 }
158 }
159
160 /* legacy format could compress extra data in a pcluster. */
161 if (rq->partial_decoding || !support_0padding)
162 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
163 inlen, rq->outputsize,
164 rq->outputsize);
165 else
166 ret = LZ4_decompress_safe(src + inputmargin, out,
167 inlen, rq->outputsize);
168
169 if (ret != rq->outputsize) {
170 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
171 ret, inlen, inputmargin, rq->outputsize);
172
173 WARN_ON(1);
174 print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
175 16, 1, src + inputmargin, inlen, true);
176 print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
177 16, 1, out, rq->outputsize, true);
178
179 if (ret >= 0)
180 memset(out + ret, 0, rq->outputsize - ret);
181 ret = -EIO;
182 }
183
184 if (copied)
185 erofs_put_pcpubuf(src);
186 else
187 kunmap_atomic(src);
188 return ret;
189}
190
191static struct z_erofs_decompressor decompressors[] = {
192 [Z_EROFS_COMPRESSION_SHIFTED] = {
193 .name = "shifted"
194 },
195 [Z_EROFS_COMPRESSION_LZ4] = {
196 .prepare_destpages = z_erofs_lz4_prepare_destpages,
197 .decompress = z_erofs_lz4_decompress,
198 .name = "lz4"
199 },
200};
201
202static void copy_from_pcpubuf(struct page **out, const char *dst,
203 unsigned short pageofs_out,
204 unsigned int outputsize)
205{
206 const char *end = dst + outputsize;
207 const unsigned int righthalf = PAGE_SIZE - pageofs_out;
208 const char *cur = dst - pageofs_out;
209
210 while (cur < end) {
211 struct page *const page = *out++;
212
213 if (page) {
214 char *buf = kmap_atomic(page);
215
216 if (cur >= dst) {
217 memcpy(buf, cur, min_t(uint, PAGE_SIZE,
218 end - cur));
219 } else {
220 memcpy(buf + pageofs_out, cur + pageofs_out,
221 min_t(uint, righthalf, end - cur));
222 }
223 kunmap_atomic(buf);
224 }
225 cur += PAGE_SIZE;
226 }
227}
228
229static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
230 struct list_head *pagepool)
231{
232 const unsigned int nrpages_out =
233 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
234 const struct z_erofs_decompressor *alg = decompressors + rq->alg;
235 unsigned int dst_maptype;
236 void *dst;
237 int ret, i;
238
239 if (nrpages_out == 1 && !rq->inplace_io) {
240 DBG_BUGON(!*rq->out);
241 dst = kmap_atomic(*rq->out);
242 dst_maptype = 0;
243 goto dstmap_out;
244 }
245
246 /*
247 * For the case of small output size (especially much less
248 * than PAGE_SIZE), memcpy the decompressed data rather than
249 * compressed data is preferred.
250 */
251 if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
252 dst = erofs_get_pcpubuf(0);
253 if (IS_ERR(dst))
254 return PTR_ERR(dst);
255
256 rq->inplace_io = false;
257 ret = alg->decompress(rq, dst);
258 if (!ret)
259 copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
260 rq->outputsize);
261
262 erofs_put_pcpubuf(dst);
263 return ret;
264 }
265
266 ret = alg->prepare_destpages(rq, pagepool);
267 if (ret < 0) {
268 return ret;
269 } else if (ret) {
270 dst = page_address(*rq->out);
271 dst_maptype = 1;
272 goto dstmap_out;
273 }
274
275 i = 0;
276 while (1) {
277 dst = vm_map_ram(rq->out, nrpages_out, -1);
278
279 /* retry two more times (totally 3 times) */
280 if (dst || ++i >= 3)
281 break;
282 vm_unmap_aliases();
283 }
284
285 if (!dst)
286 return -ENOMEM;
287
288 dst_maptype = 2;
289
290dstmap_out:
291 ret = alg->decompress(rq, dst + rq->pageofs_out);
292
293 if (!dst_maptype)
294 kunmap_atomic(dst);
295 else if (dst_maptype == 2)
296 vm_unmap_ram(dst, nrpages_out);
297 return ret;
298}
299
300static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
301 struct list_head *pagepool)
302{
303 const unsigned int nrpages_out =
304 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
305 const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
306 unsigned char *src, *dst;
307
308 if (nrpages_out > 2) {
309 DBG_BUGON(1);
310 return -EIO;
311 }
312
313 if (rq->out[0] == *rq->in) {
314 DBG_BUGON(nrpages_out != 1);
315 return 0;
316 }
317
318 src = kmap_atomic(*rq->in);
319 if (rq->out[0]) {
320 dst = kmap_atomic(rq->out[0]);
321 memcpy(dst + rq->pageofs_out, src, righthalf);
322 kunmap_atomic(dst);
323 }
324
325 if (nrpages_out == 2) {
326 DBG_BUGON(!rq->out[1]);
327 if (rq->out[1] == *rq->in) {
328 memmove(src, src + righthalf, rq->pageofs_out);
329 } else {
330 dst = kmap_atomic(rq->out[1]);
331 memcpy(dst, src + righthalf, rq->pageofs_out);
332 kunmap_atomic(dst);
333 }
334 }
335 kunmap_atomic(src);
336 return 0;
337}
338
339int z_erofs_decompress(struct z_erofs_decompress_req *rq,
340 struct list_head *pagepool)
341{
342 if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
343 return z_erofs_shifted_transform(rq, pagepool);
344 return z_erofs_decompress_generic(rq, pagepool);
345}
346
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 */
6#include "compress.h"
7#include <linux/lz4.h>
8
9#ifndef LZ4_DISTANCE_MAX /* history window size */
10#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
11#endif
12
13#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
14#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
15#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
16#endif
17
18struct z_erofs_lz4_decompress_ctx {
19 struct z_erofs_decompress_req *rq;
20 /* # of encoded, decoded pages */
21 unsigned int inpages, outpages;
22 /* decoded block total length (used for in-place decompression) */
23 unsigned int oend;
24};
25
26static int z_erofs_load_lz4_config(struct super_block *sb,
27 struct erofs_super_block *dsb, void *data, int size)
28{
29 struct erofs_sb_info *sbi = EROFS_SB(sb);
30 struct z_erofs_lz4_cfgs *lz4 = data;
31 u16 distance;
32
33 if (lz4) {
34 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
35 erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
36 return -EINVAL;
37 }
38 distance = le16_to_cpu(lz4->max_distance);
39
40 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
41 if (!sbi->lz4.max_pclusterblks) {
42 sbi->lz4.max_pclusterblks = 1; /* reserved case */
43 } else if (sbi->lz4.max_pclusterblks >
44 erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
45 erofs_err(sb, "too large lz4 pclusterblks %u",
46 sbi->lz4.max_pclusterblks);
47 return -EINVAL;
48 }
49 } else {
50 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
51 sbi->lz4.max_pclusterblks = 1;
52 }
53
54 sbi->lz4.max_distance_pages = distance ?
55 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
56 LZ4_MAX_DISTANCE_PAGES;
57 return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
58}
59
60/*
61 * Fill all gaps with bounce pages if it's a sparse page list. Also check if
62 * all physical pages are consecutive, which can be seen for moderate CR.
63 */
64static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
65 struct page **pagepool)
66{
67 struct z_erofs_decompress_req *rq = ctx->rq;
68 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
69 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
70 BITS_PER_LONG)] = { 0 };
71 unsigned int lz4_max_distance_pages =
72 EROFS_SB(rq->sb)->lz4.max_distance_pages;
73 void *kaddr = NULL;
74 unsigned int i, j, top;
75
76 top = 0;
77 for (i = j = 0; i < ctx->outpages; ++i, ++j) {
78 struct page *const page = rq->out[i];
79 struct page *victim;
80
81 if (j >= lz4_max_distance_pages)
82 j = 0;
83
84 /* 'valid' bounced can only be tested after a complete round */
85 if (!rq->fillgaps && test_bit(j, bounced)) {
86 DBG_BUGON(i < lz4_max_distance_pages);
87 DBG_BUGON(top >= lz4_max_distance_pages);
88 availables[top++] = rq->out[i - lz4_max_distance_pages];
89 }
90
91 if (page) {
92 __clear_bit(j, bounced);
93 if (!PageHighMem(page)) {
94 if (!i) {
95 kaddr = page_address(page);
96 continue;
97 }
98 if (kaddr &&
99 kaddr + PAGE_SIZE == page_address(page)) {
100 kaddr += PAGE_SIZE;
101 continue;
102 }
103 }
104 kaddr = NULL;
105 continue;
106 }
107 kaddr = NULL;
108 __set_bit(j, bounced);
109
110 if (top) {
111 victim = availables[--top];
112 get_page(victim);
113 } else {
114 victim = erofs_allocpage(pagepool, rq->gfp);
115 if (!victim)
116 return -ENOMEM;
117 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
118 }
119 rq->out[i] = victim;
120 }
121 return kaddr ? 1 : 0;
122}
123
124static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
125 void *inpage, void *out, unsigned int *inputmargin,
126 int *maptype, bool may_inplace)
127{
128 struct z_erofs_decompress_req *rq = ctx->rq;
129 unsigned int omargin, total, i;
130 struct page **in;
131 void *src, *tmp;
132
133 if (rq->inplace_io) {
134 omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
135 if (rq->partial_decoding || !may_inplace ||
136 omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
137 goto docopy;
138
139 for (i = 0; i < ctx->inpages; ++i)
140 if (rq->out[ctx->outpages - ctx->inpages + i] !=
141 rq->in[i])
142 goto docopy;
143 kunmap_local(inpage);
144 *maptype = 3;
145 return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
146 }
147
148 if (ctx->inpages <= 1) {
149 *maptype = 0;
150 return inpage;
151 }
152 kunmap_local(inpage);
153 src = erofs_vm_map_ram(rq->in, ctx->inpages);
154 if (!src)
155 return ERR_PTR(-ENOMEM);
156 *maptype = 1;
157 return src;
158
159docopy:
160 /* Or copy compressed data which can be overlapped to per-CPU buffer */
161 in = rq->in;
162 src = erofs_get_pcpubuf(ctx->inpages);
163 if (!src) {
164 DBG_BUGON(1);
165 kunmap_local(inpage);
166 return ERR_PTR(-EFAULT);
167 }
168
169 tmp = src;
170 total = rq->inputsize;
171 while (total) {
172 unsigned int page_copycnt =
173 min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
174
175 if (!inpage)
176 inpage = kmap_local_page(*in);
177 memcpy(tmp, inpage + *inputmargin, page_copycnt);
178 kunmap_local(inpage);
179 inpage = NULL;
180 tmp += page_copycnt;
181 total -= page_copycnt;
182 ++in;
183 *inputmargin = 0;
184 }
185 *maptype = 2;
186 return src;
187}
188
189/*
190 * Get the exact inputsize with zero_padding feature.
191 * - For LZ4, it should work if zero_padding feature is on (5.3+);
192 * - For MicroLZMA, it'd be enabled all the time.
193 */
194int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
195 unsigned int padbufsize)
196{
197 const char *padend;
198
199 padend = memchr_inv(padbuf, 0, padbufsize);
200 if (!padend)
201 return -EFSCORRUPTED;
202 rq->inputsize -= padend - padbuf;
203 rq->pageofs_in += padend - padbuf;
204 return 0;
205}
206
207static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
208 u8 *dst)
209{
210 struct z_erofs_decompress_req *rq = ctx->rq;
211 bool support_0padding = false, may_inplace = false;
212 unsigned int inputmargin;
213 u8 *out, *headpage, *src;
214 int ret, maptype;
215
216 DBG_BUGON(*rq->in == NULL);
217 headpage = kmap_local_page(*rq->in);
218
219 /* LZ4 decompression inplace is only safe if zero_padding is enabled */
220 if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
221 support_0padding = true;
222 ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
223 min_t(unsigned int, rq->inputsize,
224 rq->sb->s_blocksize - rq->pageofs_in));
225 if (ret) {
226 kunmap_local(headpage);
227 return ret;
228 }
229 may_inplace = !((rq->pageofs_in + rq->inputsize) &
230 (rq->sb->s_blocksize - 1));
231 }
232
233 inputmargin = rq->pageofs_in;
234 src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
235 &maptype, may_inplace);
236 if (IS_ERR(src))
237 return PTR_ERR(src);
238
239 out = dst + rq->pageofs_out;
240 /* legacy format could compress extra data in a pcluster. */
241 if (rq->partial_decoding || !support_0padding)
242 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
243 rq->inputsize, rq->outputsize, rq->outputsize);
244 else
245 ret = LZ4_decompress_safe(src + inputmargin, out,
246 rq->inputsize, rq->outputsize);
247
248 if (ret != rq->outputsize) {
249 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
250 ret, rq->inputsize, inputmargin, rq->outputsize);
251 if (ret >= 0)
252 memset(out + ret, 0, rq->outputsize - ret);
253 ret = -EFSCORRUPTED;
254 } else {
255 ret = 0;
256 }
257
258 if (maptype == 0) {
259 kunmap_local(headpage);
260 } else if (maptype == 1) {
261 vm_unmap_ram(src, ctx->inpages);
262 } else if (maptype == 2) {
263 erofs_put_pcpubuf(src);
264 } else if (maptype != 3) {
265 DBG_BUGON(1);
266 return -EFAULT;
267 }
268 return ret;
269}
270
271static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
272 struct page **pagepool)
273{
274 struct z_erofs_lz4_decompress_ctx ctx;
275 unsigned int dst_maptype;
276 void *dst;
277 int ret;
278
279 ctx.rq = rq;
280 ctx.oend = rq->pageofs_out + rq->outputsize;
281 ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
282 ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
283
284 /* one optimized fast path only for non bigpcluster cases yet */
285 if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
286 DBG_BUGON(!*rq->out);
287 dst = kmap_local_page(*rq->out);
288 dst_maptype = 0;
289 goto dstmap_out;
290 }
291
292 /* general decoding path which can be used for all cases */
293 ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
294 if (ret < 0) {
295 return ret;
296 } else if (ret > 0) {
297 dst = page_address(*rq->out);
298 dst_maptype = 1;
299 } else {
300 dst = erofs_vm_map_ram(rq->out, ctx.outpages);
301 if (!dst)
302 return -ENOMEM;
303 dst_maptype = 2;
304 }
305
306dstmap_out:
307 ret = z_erofs_lz4_decompress_mem(&ctx, dst);
308 if (!dst_maptype)
309 kunmap_local(dst);
310 else if (dst_maptype == 2)
311 vm_unmap_ram(dst, ctx.outpages);
312 return ret;
313}
314
315static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
316 struct page **pagepool)
317{
318 const unsigned int nrpages_in =
319 PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT;
320 const unsigned int nrpages_out =
321 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
322 const unsigned int bs = rq->sb->s_blocksize;
323 unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
324 u8 *kin;
325
326 if (rq->outputsize > rq->inputsize)
327 return -EOPNOTSUPP;
328 if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
329 cur = bs - (rq->pageofs_out & (bs - 1));
330 pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
331 cur = min(cur, rq->outputsize);
332 if (cur && rq->out[0]) {
333 kin = kmap_local_page(rq->in[nrpages_in - 1]);
334 if (rq->out[0] == rq->in[nrpages_in - 1]) {
335 memmove(kin + rq->pageofs_out, kin + pi, cur);
336 flush_dcache_page(rq->out[0]);
337 } else {
338 memcpy_to_page(rq->out[0], rq->pageofs_out,
339 kin + pi, cur);
340 }
341 kunmap_local(kin);
342 }
343 rq->outputsize -= cur;
344 }
345
346 for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) {
347 insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
348 rq->outputsize -= insz;
349 if (!rq->in[ni])
350 continue;
351 kin = kmap_local_page(rq->in[ni]);
352 pi = 0;
353 do {
354 no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
355 po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
356 DBG_BUGON(no >= nrpages_out);
357 cnt = min(insz - pi, PAGE_SIZE - po);
358 if (rq->out[no] == rq->in[ni]) {
359 memmove(kin + po,
360 kin + rq->pageofs_in + pi, cnt);
361 flush_dcache_page(rq->out[no]);
362 } else if (rq->out[no]) {
363 memcpy_to_page(rq->out[no], po,
364 kin + rq->pageofs_in + pi, cnt);
365 }
366 pi += cnt;
367 } while (pi < insz);
368 kunmap_local(kin);
369 }
370 DBG_BUGON(ni > nrpages_in);
371 return 0;
372}
373
374const struct z_erofs_decompressor erofs_decompressors[] = {
375 [Z_EROFS_COMPRESSION_SHIFTED] = {
376 .decompress = z_erofs_transform_plain,
377 .name = "shifted"
378 },
379 [Z_EROFS_COMPRESSION_INTERLACED] = {
380 .decompress = z_erofs_transform_plain,
381 .name = "interlaced"
382 },
383 [Z_EROFS_COMPRESSION_LZ4] = {
384 .config = z_erofs_load_lz4_config,
385 .decompress = z_erofs_lz4_decompress,
386 .name = "lz4"
387 },
388#ifdef CONFIG_EROFS_FS_ZIP_LZMA
389 [Z_EROFS_COMPRESSION_LZMA] = {
390 .config = z_erofs_load_lzma_config,
391 .decompress = z_erofs_lzma_decompress,
392 .name = "lzma"
393 },
394#endif
395#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
396 [Z_EROFS_COMPRESSION_DEFLATE] = {
397 .config = z_erofs_load_deflate_config,
398 .decompress = z_erofs_deflate_decompress,
399 .name = "deflate"
400 },
401#endif
402};
403
404int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
405{
406 struct erofs_sb_info *sbi = EROFS_SB(sb);
407 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
408 unsigned int algs, alg;
409 erofs_off_t offset;
410 int size, ret = 0;
411
412 if (!erofs_sb_has_compr_cfgs(sbi)) {
413 sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
414 return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
415 }
416
417 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
418 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
419 erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
420 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
421 return -EOPNOTSUPP;
422 }
423
424 erofs_init_metabuf(&buf, sb);
425 offset = EROFS_SUPER_OFFSET + sbi->sb_size;
426 alg = 0;
427 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
428 void *data;
429
430 if (!(algs & 1))
431 continue;
432
433 data = erofs_read_metadata(sb, &buf, &offset, &size);
434 if (IS_ERR(data)) {
435 ret = PTR_ERR(data);
436 break;
437 }
438
439 if (alg >= ARRAY_SIZE(erofs_decompressors) ||
440 !erofs_decompressors[alg].config) {
441 erofs_err(sb, "algorithm %d isn't enabled on this kernel",
442 alg);
443 ret = -EOPNOTSUPP;
444 } else {
445 ret = erofs_decompressors[alg].config(sb,
446 dsb, data, size);
447 }
448
449 kfree(data);
450 if (ret)
451 break;
452 }
453 erofs_put_metabuf(&buf);
454 return ret;
455}