Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2016-present, Facebook, Inc.
  4 * All rights reserved.
  5 *
  6 */
  7
  8#include <linux/bio.h>
  9#include <linux/bitmap.h>
 10#include <linux/err.h>
 11#include <linux/init.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/sched/mm.h>
 15#include <linux/pagemap.h>
 16#include <linux/refcount.h>
 17#include <linux/sched.h>
 18#include <linux/slab.h>
 19#include <linux/zstd.h>
 20#include "misc.h"
 
 21#include "compression.h"
 22#include "ctree.h"
 23
 24#define ZSTD_BTRFS_MAX_WINDOWLOG 17
 25#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
 26#define ZSTD_BTRFS_DEFAULT_LEVEL 3
 27#define ZSTD_BTRFS_MAX_LEVEL 15
 28/* 307s to avoid pathologically clashing with transaction commit */
 29#define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
 30
 31static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level,
 32						 size_t src_len)
 33{
 34	ZSTD_parameters params = ZSTD_getParams(level, src_len, 0);
 35
 36	if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
 37		params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
 38	WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT);
 39	return params;
 40}
 41
 42struct workspace {
 43	void *mem;
 44	size_t size;
 45	char *buf;
 46	unsigned int level;
 47	unsigned int req_level;
 48	unsigned long last_used; /* jiffies */
 49	struct list_head list;
 50	struct list_head lru_list;
 51	ZSTD_inBuffer in_buf;
 52	ZSTD_outBuffer out_buf;
 53};
 54
 55/*
 56 * Zstd Workspace Management
 57 *
 58 * Zstd workspaces have different memory requirements depending on the level.
 59 * The zstd workspaces are managed by having individual lists for each level
 60 * and a global lru.  Forward progress is maintained by protecting a max level
 61 * workspace.
 62 *
 63 * Getting a workspace is done by using the bitmap to identify the levels that
 64 * have available workspaces and scans up.  This lets us recycle higher level
 65 * workspaces because of the monotonic memory guarantee.  A workspace's
 66 * last_used is only updated if it is being used by the corresponding memory
 67 * level.  Putting a workspace involves adding it back to the appropriate places
 68 * and adding it back to the lru if necessary.
 69 *
 70 * A timer is used to reclaim workspaces if they have not been used for
 71 * ZSTD_BTRFS_RECLAIM_JIFFIES.  This helps keep only active workspaces around.
 72 * The upper bound is provided by the workqueue limit which is 2 (percpu limit).
 73 */
 74
 75struct zstd_workspace_manager {
 76	const struct btrfs_compress_op *ops;
 77	spinlock_t lock;
 78	struct list_head lru_list;
 79	struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL];
 80	unsigned long active_map;
 81	wait_queue_head_t wait;
 82	struct timer_list timer;
 83};
 84
 85static struct zstd_workspace_manager wsm;
 86
 87static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
 88
 89static inline struct workspace *list_to_workspace(struct list_head *list)
 90{
 91	return container_of(list, struct workspace, list);
 92}
 93
 94static void zstd_free_workspace(struct list_head *ws);
 95static struct list_head *zstd_alloc_workspace(unsigned int level);
 96
 97/*
 98 * zstd_reclaim_timer_fn - reclaim timer
 
 99 * @t: timer
100 *
101 * This scans the lru_list and attempts to reclaim any workspace that hasn't
102 * been used for ZSTD_BTRFS_RECLAIM_JIFFIES.
 
 
103 */
104static void zstd_reclaim_timer_fn(struct timer_list *timer)
105{
106	unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
107	struct list_head *pos, *next;
108
109	spin_lock_bh(&wsm.lock);
110
111	if (list_empty(&wsm.lru_list)) {
112		spin_unlock_bh(&wsm.lock);
113		return;
114	}
115
116	list_for_each_prev_safe(pos, next, &wsm.lru_list) {
117		struct workspace *victim = container_of(pos, struct workspace,
118							lru_list);
119		unsigned int level;
120
121		if (time_after(victim->last_used, reclaim_threshold))
122			break;
123
124		/* workspace is in use */
125		if (victim->req_level)
126			continue;
127
128		level = victim->level;
129		list_del(&victim->lru_list);
130		list_del(&victim->list);
131		zstd_free_workspace(&victim->list);
132
133		if (list_empty(&wsm.idle_ws[level - 1]))
134			clear_bit(level - 1, &wsm.active_map);
135
136	}
137
138	if (!list_empty(&wsm.lru_list))
139		mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
140
141	spin_unlock_bh(&wsm.lock);
142}
143
144/*
145 * zstd_calc_ws_mem_sizes - calculate monotonic memory bounds
146 *
147 * It is possible based on the level configurations that a higher level
148 * workspace uses less memory than a lower level workspace.  In order to reuse
149 * workspaces, this must be made a monotonic relationship.  This precomputes
150 * the required memory for each level and enforces the monotonicity between
151 * level and memory required.
152 */
153static void zstd_calc_ws_mem_sizes(void)
154{
155	size_t max_size = 0;
156	unsigned int level;
157
158	for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
159		ZSTD_parameters params =
160			zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
161		size_t level_size =
162			max_t(size_t,
163			      ZSTD_CStreamWorkspaceBound(params.cParams),
164			      ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
165
166		max_size = max_t(size_t, max_size, level_size);
167		zstd_ws_mem_sizes[level - 1] = max_size;
168	}
169}
170
171static void zstd_init_workspace_manager(void)
172{
173	struct list_head *ws;
174	int i;
175
176	zstd_calc_ws_mem_sizes();
177
178	wsm.ops = &btrfs_zstd_compress;
179	spin_lock_init(&wsm.lock);
180	init_waitqueue_head(&wsm.wait);
181	timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0);
182
183	INIT_LIST_HEAD(&wsm.lru_list);
184	for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
185		INIT_LIST_HEAD(&wsm.idle_ws[i]);
186
187	ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL);
188	if (IS_ERR(ws)) {
189		pr_warn(
190		"BTRFS: cannot preallocate zstd compression workspace\n");
191	} else {
192		set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map);
193		list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
194	}
195}
196
197static void zstd_cleanup_workspace_manager(void)
198{
199	struct workspace *workspace;
200	int i;
201
202	spin_lock_bh(&wsm.lock);
203	for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
204		while (!list_empty(&wsm.idle_ws[i])) {
205			workspace = container_of(wsm.idle_ws[i].next,
206						 struct workspace, list);
207			list_del(&workspace->list);
208			list_del(&workspace->lru_list);
209			zstd_free_workspace(&workspace->list);
210		}
211	}
212	spin_unlock_bh(&wsm.lock);
213
214	del_timer_sync(&wsm.timer);
215}
216
217/*
218 * zstd_find_workspace - find workspace
 
219 * @level: compression level
220 *
221 * This iterates over the set bits in the active_map beginning at the requested
222 * compression level.  This lets us utilize already allocated workspaces before
223 * allocating a new one.  If the workspace is of a larger size, it is used, but
224 * the place in the lru_list and last_used times are not updated.  This is to
225 * offer the opportunity to reclaim the workspace in favor of allocating an
226 * appropriately sized one in the future.
227 */
228static struct list_head *zstd_find_workspace(unsigned int level)
229{
230	struct list_head *ws;
231	struct workspace *workspace;
232	int i = level - 1;
233
234	spin_lock_bh(&wsm.lock);
235	for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) {
236		if (!list_empty(&wsm.idle_ws[i])) {
237			ws = wsm.idle_ws[i].next;
238			workspace = list_to_workspace(ws);
239			list_del_init(ws);
240			/* keep its place if it's a lower level using this */
241			workspace->req_level = level;
242			if (level == workspace->level)
243				list_del(&workspace->lru_list);
244			if (list_empty(&wsm.idle_ws[i]))
245				clear_bit(i, &wsm.active_map);
246			spin_unlock_bh(&wsm.lock);
247			return ws;
248		}
249	}
250	spin_unlock_bh(&wsm.lock);
251
252	return NULL;
253}
254
255/*
256 * zstd_get_workspace - zstd's get_workspace
 
257 * @level: compression level
258 *
259 * If @level is 0, then any compression level can be used.  Therefore, we begin
260 * scanning from 1.  We first scan through possible workspaces and then after
261 * attempt to allocate a new workspace.  If we fail to allocate one due to
262 * memory pressure, go to sleep waiting for the max level workspace to free up.
263 */
264static struct list_head *zstd_get_workspace(unsigned int level)
265{
266	struct list_head *ws;
267	unsigned int nofs_flag;
268
269	/* level == 0 means we can use any workspace */
270	if (!level)
271		level = 1;
272
273again:
274	ws = zstd_find_workspace(level);
275	if (ws)
276		return ws;
277
278	nofs_flag = memalloc_nofs_save();
279	ws = zstd_alloc_workspace(level);
280	memalloc_nofs_restore(nofs_flag);
281
282	if (IS_ERR(ws)) {
283		DEFINE_WAIT(wait);
284
285		prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE);
286		schedule();
287		finish_wait(&wsm.wait, &wait);
288
289		goto again;
290	}
291
292	return ws;
293}
294
295/*
296 * zstd_put_workspace - zstd put_workspace
 
297 * @ws: list_head for the workspace
298 *
299 * When putting back a workspace, we only need to update the LRU if we are of
300 * the requested compression level.  Here is where we continue to protect the
301 * max level workspace or update last_used accordingly.  If the reclaim timer
302 * isn't set, it is also set here.  Only the max level workspace tries and wakes
303 * up waiting workspaces.
304 */
305static void zstd_put_workspace(struct list_head *ws)
306{
307	struct workspace *workspace = list_to_workspace(ws);
308
309	spin_lock_bh(&wsm.lock);
310
311	/* A node is only taken off the lru if we are the corresponding level */
312	if (workspace->req_level == workspace->level) {
313		/* Hide a max level workspace from reclaim */
314		if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
315			INIT_LIST_HEAD(&workspace->lru_list);
316		} else {
317			workspace->last_used = jiffies;
318			list_add(&workspace->lru_list, &wsm.lru_list);
319			if (!timer_pending(&wsm.timer))
320				mod_timer(&wsm.timer,
321					  jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
322		}
323	}
324
325	set_bit(workspace->level - 1, &wsm.active_map);
326	list_add(&workspace->list, &wsm.idle_ws[workspace->level - 1]);
327	workspace->req_level = 0;
328
329	spin_unlock_bh(&wsm.lock);
330
331	if (workspace->level == ZSTD_BTRFS_MAX_LEVEL)
332		cond_wake_up(&wsm.wait);
333}
334
335static void zstd_free_workspace(struct list_head *ws)
336{
337	struct workspace *workspace = list_entry(ws, struct workspace, list);
338
339	kvfree(workspace->mem);
340	kfree(workspace->buf);
341	kfree(workspace);
342}
343
344static struct list_head *zstd_alloc_workspace(unsigned int level)
345{
346	struct workspace *workspace;
347
348	workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
349	if (!workspace)
350		return ERR_PTR(-ENOMEM);
351
352	workspace->size = zstd_ws_mem_sizes[level - 1];
353	workspace->level = level;
354	workspace->req_level = level;
355	workspace->last_used = jiffies;
356	workspace->mem = kvmalloc(workspace->size, GFP_KERNEL);
357	workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
358	if (!workspace->mem || !workspace->buf)
359		goto fail;
360
361	INIT_LIST_HEAD(&workspace->list);
362	INIT_LIST_HEAD(&workspace->lru_list);
363
364	return &workspace->list;
365fail:
366	zstd_free_workspace(&workspace->list);
367	return ERR_PTR(-ENOMEM);
368}
369
370static int zstd_compress_pages(struct list_head *ws,
371		struct address_space *mapping,
372		u64 start,
373		struct page **pages,
374		unsigned long *out_pages,
375		unsigned long *total_in,
376		unsigned long *total_out)
377{
378	struct workspace *workspace = list_entry(ws, struct workspace, list);
379	ZSTD_CStream *stream;
380	int ret = 0;
381	int nr_pages = 0;
382	struct page *in_page = NULL;  /* The current page to read */
383	struct page *out_page = NULL; /* The current page to write to */
384	unsigned long tot_in = 0;
385	unsigned long tot_out = 0;
386	unsigned long len = *total_out;
387	const unsigned long nr_dest_pages = *out_pages;
388	unsigned long max_out = nr_dest_pages * PAGE_SIZE;
389	ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
390							   len);
391
392	*out_pages = 0;
393	*total_out = 0;
394	*total_in = 0;
395
396	/* Initialize the stream */
397	stream = ZSTD_initCStream(params, len, workspace->mem,
398			workspace->size);
399	if (!stream) {
400		pr_warn("BTRFS: ZSTD_initCStream failed\n");
401		ret = -EIO;
402		goto out;
403	}
404
405	/* map in the first page of input data */
406	in_page = find_get_page(mapping, start >> PAGE_SHIFT);
407	workspace->in_buf.src = kmap(in_page);
408	workspace->in_buf.pos = 0;
409	workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
410
411
412	/* Allocate and map in the output buffer */
413	out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
414	if (out_page == NULL) {
415		ret = -ENOMEM;
416		goto out;
417	}
418	pages[nr_pages++] = out_page;
419	workspace->out_buf.dst = kmap(out_page);
420	workspace->out_buf.pos = 0;
421	workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
422
423	while (1) {
424		size_t ret2;
425
426		ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
427				&workspace->in_buf);
428		if (ZSTD_isError(ret2)) {
429			pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
430					ZSTD_getErrorCode(ret2));
431			ret = -EIO;
432			goto out;
433		}
434
435		/* Check to see if we are making it bigger */
436		if (tot_in + workspace->in_buf.pos > 8192 &&
437				tot_in + workspace->in_buf.pos <
438				tot_out + workspace->out_buf.pos) {
439			ret = -E2BIG;
440			goto out;
441		}
442
443		/* We've reached the end of our output range */
444		if (workspace->out_buf.pos >= max_out) {
445			tot_out += workspace->out_buf.pos;
446			ret = -E2BIG;
447			goto out;
448		}
449
450		/* Check if we need more output space */
451		if (workspace->out_buf.pos == workspace->out_buf.size) {
452			tot_out += PAGE_SIZE;
453			max_out -= PAGE_SIZE;
454			kunmap(out_page);
455			if (nr_pages == nr_dest_pages) {
456				out_page = NULL;
457				ret = -E2BIG;
458				goto out;
459			}
460			out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
461			if (out_page == NULL) {
462				ret = -ENOMEM;
463				goto out;
464			}
465			pages[nr_pages++] = out_page;
466			workspace->out_buf.dst = kmap(out_page);
467			workspace->out_buf.pos = 0;
468			workspace->out_buf.size = min_t(size_t, max_out,
469							PAGE_SIZE);
470		}
471
472		/* We've reached the end of the input */
473		if (workspace->in_buf.pos >= len) {
474			tot_in += workspace->in_buf.pos;
475			break;
476		}
477
478		/* Check if we need more input */
479		if (workspace->in_buf.pos == workspace->in_buf.size) {
480			tot_in += PAGE_SIZE;
481			kunmap(in_page);
482			put_page(in_page);
483
484			start += PAGE_SIZE;
485			len -= PAGE_SIZE;
486			in_page = find_get_page(mapping, start >> PAGE_SHIFT);
487			workspace->in_buf.src = kmap(in_page);
488			workspace->in_buf.pos = 0;
489			workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
490		}
491	}
492	while (1) {
493		size_t ret2;
494
495		ret2 = ZSTD_endStream(stream, &workspace->out_buf);
496		if (ZSTD_isError(ret2)) {
497			pr_debug("BTRFS: ZSTD_endStream returned %d\n",
498					ZSTD_getErrorCode(ret2));
499			ret = -EIO;
500			goto out;
501		}
502		if (ret2 == 0) {
503			tot_out += workspace->out_buf.pos;
504			break;
505		}
506		if (workspace->out_buf.pos >= max_out) {
507			tot_out += workspace->out_buf.pos;
508			ret = -E2BIG;
509			goto out;
510		}
511
512		tot_out += PAGE_SIZE;
513		max_out -= PAGE_SIZE;
514		kunmap(out_page);
515		if (nr_pages == nr_dest_pages) {
516			out_page = NULL;
517			ret = -E2BIG;
518			goto out;
519		}
520		out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
521		if (out_page == NULL) {
522			ret = -ENOMEM;
523			goto out;
524		}
525		pages[nr_pages++] = out_page;
526		workspace->out_buf.dst = kmap(out_page);
527		workspace->out_buf.pos = 0;
528		workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
529	}
530
531	if (tot_out >= tot_in) {
532		ret = -E2BIG;
533		goto out;
534	}
535
536	ret = 0;
537	*total_in = tot_in;
538	*total_out = tot_out;
539out:
540	*out_pages = nr_pages;
541	/* Cleanup */
542	if (in_page) {
543		kunmap(in_page);
544		put_page(in_page);
545	}
546	if (out_page)
547		kunmap(out_page);
548	return ret;
549}
550
551static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
552{
553	struct workspace *workspace = list_entry(ws, struct workspace, list);
554	struct page **pages_in = cb->compressed_pages;
555	u64 disk_start = cb->start;
556	struct bio *orig_bio = cb->orig_bio;
557	size_t srclen = cb->compressed_len;
558	ZSTD_DStream *stream;
559	int ret = 0;
560	unsigned long page_in_index = 0;
561	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
562	unsigned long buf_start;
563	unsigned long total_out = 0;
564
565	stream = ZSTD_initDStream(
566			ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
567	if (!stream) {
568		pr_debug("BTRFS: ZSTD_initDStream failed\n");
569		ret = -EIO;
570		goto done;
571	}
572
573	workspace->in_buf.src = kmap(pages_in[page_in_index]);
574	workspace->in_buf.pos = 0;
575	workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
576
577	workspace->out_buf.dst = workspace->buf;
578	workspace->out_buf.pos = 0;
579	workspace->out_buf.size = PAGE_SIZE;
580
581	while (1) {
582		size_t ret2;
583
584		ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
585				&workspace->in_buf);
586		if (ZSTD_isError(ret2)) {
587			pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
588					ZSTD_getErrorCode(ret2));
589			ret = -EIO;
590			goto done;
591		}
592		buf_start = total_out;
593		total_out += workspace->out_buf.pos;
594		workspace->out_buf.pos = 0;
595
596		ret = btrfs_decompress_buf2page(workspace->out_buf.dst,
597				buf_start, total_out, disk_start, orig_bio);
598		if (ret == 0)
599			break;
600
601		if (workspace->in_buf.pos >= srclen)
602			break;
603
604		/* Check if we've hit the end of a frame */
605		if (ret2 == 0)
606			break;
607
608		if (workspace->in_buf.pos == workspace->in_buf.size) {
609			kunmap(pages_in[page_in_index++]);
 
610			if (page_in_index >= total_pages_in) {
611				workspace->in_buf.src = NULL;
612				ret = -EIO;
613				goto done;
614			}
615			srclen -= PAGE_SIZE;
616			workspace->in_buf.src = kmap(pages_in[page_in_index]);
617			workspace->in_buf.pos = 0;
618			workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
619		}
620	}
621	ret = 0;
622	zero_fill_bio(orig_bio);
623done:
624	if (workspace->in_buf.src)
625		kunmap(pages_in[page_in_index]);
626	return ret;
627}
628
629static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
630		struct page *dest_page,
631		unsigned long start_byte,
632		size_t srclen, size_t destlen)
633{
634	struct workspace *workspace = list_entry(ws, struct workspace, list);
635	ZSTD_DStream *stream;
 
 
636	int ret = 0;
637	size_t ret2;
638	unsigned long total_out = 0;
639	unsigned long pg_offset = 0;
640	char *kaddr;
641
642	stream = ZSTD_initDStream(
643			ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
644	if (!stream) {
645		pr_warn("BTRFS: ZSTD_initDStream failed\n");
646		ret = -EIO;
647		goto finish;
648	}
649
650	destlen = min_t(size_t, destlen, PAGE_SIZE);
651
652	workspace->in_buf.src = data_in;
653	workspace->in_buf.pos = 0;
654	workspace->in_buf.size = srclen;
655
656	workspace->out_buf.dst = workspace->buf;
657	workspace->out_buf.pos = 0;
658	workspace->out_buf.size = PAGE_SIZE;
659
660	ret2 = 1;
661	while (pg_offset < destlen
662	       && workspace->in_buf.pos < workspace->in_buf.size) {
663		unsigned long buf_start;
664		unsigned long buf_offset;
665		unsigned long bytes;
666
667		/* Check if the frame is over and we still need more input */
668		if (ret2 == 0) {
669			pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
670			ret = -EIO;
671			goto finish;
672		}
673		ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
674				&workspace->in_buf);
675		if (ZSTD_isError(ret2)) {
676			pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
677					ZSTD_getErrorCode(ret2));
678			ret = -EIO;
679			goto finish;
680		}
681
682		buf_start = total_out;
683		total_out += workspace->out_buf.pos;
684		workspace->out_buf.pos = 0;
685
686		if (total_out <= start_byte)
687			continue;
688
689		if (total_out > start_byte && buf_start < start_byte)
690			buf_offset = start_byte - buf_start;
691		else
692			buf_offset = 0;
693
694		bytes = min_t(unsigned long, destlen - pg_offset,
695				workspace->out_buf.size - buf_offset);
696
697		kaddr = kmap_atomic(dest_page);
698		memcpy(kaddr + pg_offset, workspace->out_buf.dst + buf_offset,
699				bytes);
700		kunmap_atomic(kaddr);
701
702		pg_offset += bytes;
703	}
704	ret = 0;
 
705finish:
706	if (pg_offset < destlen) {
707		kaddr = kmap_atomic(dest_page);
708		memset(kaddr + pg_offset, 0, destlen - pg_offset);
709		kunmap_atomic(kaddr);
710	}
711	return ret;
712}
713
714const struct btrfs_compress_op btrfs_zstd_compress = {
715	.init_workspace_manager = zstd_init_workspace_manager,
716	.cleanup_workspace_manager = zstd_cleanup_workspace_manager,
717	.get_workspace = zstd_get_workspace,
718	.put_workspace = zstd_put_workspace,
719	.alloc_workspace = zstd_alloc_workspace,
720	.free_workspace = zstd_free_workspace,
721	.compress_pages = zstd_compress_pages,
722	.decompress_bio = zstd_decompress_bio,
723	.decompress = zstd_decompress,
724	.max_level	= ZSTD_BTRFS_MAX_LEVEL,
725	.default_level	= ZSTD_BTRFS_DEFAULT_LEVEL,
726};
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2016-present, Facebook, Inc.
  4 * All rights reserved.
  5 *
  6 */
  7
  8#include <linux/bio.h>
  9#include <linux/bitmap.h>
 10#include <linux/err.h>
 11#include <linux/init.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/sched/mm.h>
 15#include <linux/pagemap.h>
 16#include <linux/refcount.h>
 17#include <linux/sched.h>
 18#include <linux/slab.h>
 19#include <linux/zstd.h>
 20#include "misc.h"
 21#include "fs.h"
 22#include "compression.h"
 23#include "super.h"
 24
 25#define ZSTD_BTRFS_MAX_WINDOWLOG 17
 26#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
 27#define ZSTD_BTRFS_DEFAULT_LEVEL 3
 28#define ZSTD_BTRFS_MAX_LEVEL 15
 29/* 307s to avoid pathologically clashing with transaction commit */
 30#define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
 31
 32static zstd_parameters zstd_get_btrfs_parameters(unsigned int level,
 33						 size_t src_len)
 34{
 35	zstd_parameters params = zstd_get_params(level, src_len);
 36
 37	if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
 38		params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
 39	WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT);
 40	return params;
 41}
 42
 43struct workspace {
 44	void *mem;
 45	size_t size;
 46	char *buf;
 47	unsigned int level;
 48	unsigned int req_level;
 49	unsigned long last_used; /* jiffies */
 50	struct list_head list;
 51	struct list_head lru_list;
 52	zstd_in_buffer in_buf;
 53	zstd_out_buffer out_buf;
 54};
 55
 56/*
 57 * Zstd Workspace Management
 58 *
 59 * Zstd workspaces have different memory requirements depending on the level.
 60 * The zstd workspaces are managed by having individual lists for each level
 61 * and a global lru.  Forward progress is maintained by protecting a max level
 62 * workspace.
 63 *
 64 * Getting a workspace is done by using the bitmap to identify the levels that
 65 * have available workspaces and scans up.  This lets us recycle higher level
 66 * workspaces because of the monotonic memory guarantee.  A workspace's
 67 * last_used is only updated if it is being used by the corresponding memory
 68 * level.  Putting a workspace involves adding it back to the appropriate places
 69 * and adding it back to the lru if necessary.
 70 *
 71 * A timer is used to reclaim workspaces if they have not been used for
 72 * ZSTD_BTRFS_RECLAIM_JIFFIES.  This helps keep only active workspaces around.
 73 * The upper bound is provided by the workqueue limit which is 2 (percpu limit).
 74 */
 75
 76struct zstd_workspace_manager {
 77	const struct btrfs_compress_op *ops;
 78	spinlock_t lock;
 79	struct list_head lru_list;
 80	struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL];
 81	unsigned long active_map;
 82	wait_queue_head_t wait;
 83	struct timer_list timer;
 84};
 85
 86static struct zstd_workspace_manager wsm;
 87
 88static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
 89
 90static inline struct workspace *list_to_workspace(struct list_head *list)
 91{
 92	return container_of(list, struct workspace, list);
 93}
 94
 95void zstd_free_workspace(struct list_head *ws);
 96struct list_head *zstd_alloc_workspace(unsigned int level);
 97
 98/*
 99 * Timer callback to free unused workspaces.
100 *
101 * @t: timer
102 *
103 * This scans the lru_list and attempts to reclaim any workspace that hasn't
104 * been used for ZSTD_BTRFS_RECLAIM_JIFFIES.
105 *
106 * The context is softirq and does not need the _bh locking primitives.
107 */
108static void zstd_reclaim_timer_fn(struct timer_list *timer)
109{
110	unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
111	struct list_head *pos, *next;
112
113	spin_lock(&wsm.lock);
114
115	if (list_empty(&wsm.lru_list)) {
116		spin_unlock(&wsm.lock);
117		return;
118	}
119
120	list_for_each_prev_safe(pos, next, &wsm.lru_list) {
121		struct workspace *victim = container_of(pos, struct workspace,
122							lru_list);
123		unsigned int level;
124
125		if (time_after(victim->last_used, reclaim_threshold))
126			break;
127
128		/* workspace is in use */
129		if (victim->req_level)
130			continue;
131
132		level = victim->level;
133		list_del(&victim->lru_list);
134		list_del(&victim->list);
135		zstd_free_workspace(&victim->list);
136
137		if (list_empty(&wsm.idle_ws[level - 1]))
138			clear_bit(level - 1, &wsm.active_map);
139
140	}
141
142	if (!list_empty(&wsm.lru_list))
143		mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
144
145	spin_unlock(&wsm.lock);
146}
147
148/*
149 * Calculate monotonic memory bounds.
150 *
151 * It is possible based on the level configurations that a higher level
152 * workspace uses less memory than a lower level workspace.  In order to reuse
153 * workspaces, this must be made a monotonic relationship.  This precomputes
154 * the required memory for each level and enforces the monotonicity between
155 * level and memory required.
156 */
157static void zstd_calc_ws_mem_sizes(void)
158{
159	size_t max_size = 0;
160	unsigned int level;
161
162	for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
163		zstd_parameters params =
164			zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
165		size_t level_size =
166			max_t(size_t,
167			      zstd_cstream_workspace_bound(&params.cParams),
168			      zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
169
170		max_size = max_t(size_t, max_size, level_size);
171		zstd_ws_mem_sizes[level - 1] = max_size;
172	}
173}
174
175void zstd_init_workspace_manager(void)
176{
177	struct list_head *ws;
178	int i;
179
180	zstd_calc_ws_mem_sizes();
181
182	wsm.ops = &btrfs_zstd_compress;
183	spin_lock_init(&wsm.lock);
184	init_waitqueue_head(&wsm.wait);
185	timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0);
186
187	INIT_LIST_HEAD(&wsm.lru_list);
188	for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
189		INIT_LIST_HEAD(&wsm.idle_ws[i]);
190
191	ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL);
192	if (IS_ERR(ws)) {
193		pr_warn(
194		"BTRFS: cannot preallocate zstd compression workspace\n");
195	} else {
196		set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map);
197		list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
198	}
199}
200
201void zstd_cleanup_workspace_manager(void)
202{
203	struct workspace *workspace;
204	int i;
205
206	spin_lock_bh(&wsm.lock);
207	for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
208		while (!list_empty(&wsm.idle_ws[i])) {
209			workspace = container_of(wsm.idle_ws[i].next,
210						 struct workspace, list);
211			list_del(&workspace->list);
212			list_del(&workspace->lru_list);
213			zstd_free_workspace(&workspace->list);
214		}
215	}
216	spin_unlock_bh(&wsm.lock);
217
218	del_timer_sync(&wsm.timer);
219}
220
221/*
222 * Find workspace for given level.
223 *
224 * @level: compression level
225 *
226 * This iterates over the set bits in the active_map beginning at the requested
227 * compression level.  This lets us utilize already allocated workspaces before
228 * allocating a new one.  If the workspace is of a larger size, it is used, but
229 * the place in the lru_list and last_used times are not updated.  This is to
230 * offer the opportunity to reclaim the workspace in favor of allocating an
231 * appropriately sized one in the future.
232 */
233static struct list_head *zstd_find_workspace(unsigned int level)
234{
235	struct list_head *ws;
236	struct workspace *workspace;
237	int i = level - 1;
238
239	spin_lock_bh(&wsm.lock);
240	for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) {
241		if (!list_empty(&wsm.idle_ws[i])) {
242			ws = wsm.idle_ws[i].next;
243			workspace = list_to_workspace(ws);
244			list_del_init(ws);
245			/* keep its place if it's a lower level using this */
246			workspace->req_level = level;
247			if (level == workspace->level)
248				list_del(&workspace->lru_list);
249			if (list_empty(&wsm.idle_ws[i]))
250				clear_bit(i, &wsm.active_map);
251			spin_unlock_bh(&wsm.lock);
252			return ws;
253		}
254	}
255	spin_unlock_bh(&wsm.lock);
256
257	return NULL;
258}
259
260/*
261 * Zstd get_workspace for level.
262 *
263 * @level: compression level
264 *
265 * If @level is 0, then any compression level can be used.  Therefore, we begin
266 * scanning from 1.  We first scan through possible workspaces and then after
267 * attempt to allocate a new workspace.  If we fail to allocate one due to
268 * memory pressure, go to sleep waiting for the max level workspace to free up.
269 */
270struct list_head *zstd_get_workspace(unsigned int level)
271{
272	struct list_head *ws;
273	unsigned int nofs_flag;
274
275	/* level == 0 means we can use any workspace */
276	if (!level)
277		level = 1;
278
279again:
280	ws = zstd_find_workspace(level);
281	if (ws)
282		return ws;
283
284	nofs_flag = memalloc_nofs_save();
285	ws = zstd_alloc_workspace(level);
286	memalloc_nofs_restore(nofs_flag);
287
288	if (IS_ERR(ws)) {
289		DEFINE_WAIT(wait);
290
291		prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE);
292		schedule();
293		finish_wait(&wsm.wait, &wait);
294
295		goto again;
296	}
297
298	return ws;
299}
300
301/*
302 * Zstd put_workspace.
303 *
304 * @ws: list_head for the workspace
305 *
306 * When putting back a workspace, we only need to update the LRU if we are of
307 * the requested compression level.  Here is where we continue to protect the
308 * max level workspace or update last_used accordingly.  If the reclaim timer
309 * isn't set, it is also set here.  Only the max level workspace tries and wakes
310 * up waiting workspaces.
311 */
312void zstd_put_workspace(struct list_head *ws)
313{
314	struct workspace *workspace = list_to_workspace(ws);
315
316	spin_lock_bh(&wsm.lock);
317
318	/* A node is only taken off the lru if we are the corresponding level */
319	if (workspace->req_level == workspace->level) {
320		/* Hide a max level workspace from reclaim */
321		if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
322			INIT_LIST_HEAD(&workspace->lru_list);
323		} else {
324			workspace->last_used = jiffies;
325			list_add(&workspace->lru_list, &wsm.lru_list);
326			if (!timer_pending(&wsm.timer))
327				mod_timer(&wsm.timer,
328					  jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
329		}
330	}
331
332	set_bit(workspace->level - 1, &wsm.active_map);
333	list_add(&workspace->list, &wsm.idle_ws[workspace->level - 1]);
334	workspace->req_level = 0;
335
336	spin_unlock_bh(&wsm.lock);
337
338	if (workspace->level == ZSTD_BTRFS_MAX_LEVEL)
339		cond_wake_up(&wsm.wait);
340}
341
342void zstd_free_workspace(struct list_head *ws)
343{
344	struct workspace *workspace = list_entry(ws, struct workspace, list);
345
346	kvfree(workspace->mem);
347	kfree(workspace->buf);
348	kfree(workspace);
349}
350
351struct list_head *zstd_alloc_workspace(unsigned int level)
352{
353	struct workspace *workspace;
354
355	workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
356	if (!workspace)
357		return ERR_PTR(-ENOMEM);
358
359	workspace->size = zstd_ws_mem_sizes[level - 1];
360	workspace->level = level;
361	workspace->req_level = level;
362	workspace->last_used = jiffies;
363	workspace->mem = kvmalloc(workspace->size, GFP_KERNEL | __GFP_NOWARN);
364	workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
365	if (!workspace->mem || !workspace->buf)
366		goto fail;
367
368	INIT_LIST_HEAD(&workspace->list);
369	INIT_LIST_HEAD(&workspace->lru_list);
370
371	return &workspace->list;
372fail:
373	zstd_free_workspace(&workspace->list);
374	return ERR_PTR(-ENOMEM);
375}
376
377int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
378		u64 start, struct page **pages, unsigned long *out_pages,
379		unsigned long *total_in, unsigned long *total_out)
 
 
 
 
380{
381	struct workspace *workspace = list_entry(ws, struct workspace, list);
382	zstd_cstream *stream;
383	int ret = 0;
384	int nr_pages = 0;
385	struct page *in_page = NULL;  /* The current page to read */
386	struct page *out_page = NULL; /* The current page to write to */
387	unsigned long tot_in = 0;
388	unsigned long tot_out = 0;
389	unsigned long len = *total_out;
390	const unsigned long nr_dest_pages = *out_pages;
391	unsigned long max_out = nr_dest_pages * PAGE_SIZE;
392	zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
393							   len);
394
395	*out_pages = 0;
396	*total_out = 0;
397	*total_in = 0;
398
399	/* Initialize the stream */
400	stream = zstd_init_cstream(&params, len, workspace->mem,
401			workspace->size);
402	if (!stream) {
403		pr_warn("BTRFS: zstd_init_cstream failed\n");
404		ret = -EIO;
405		goto out;
406	}
407
408	/* map in the first page of input data */
409	in_page = find_get_page(mapping, start >> PAGE_SHIFT);
410	workspace->in_buf.src = kmap_local_page(in_page);
411	workspace->in_buf.pos = 0;
412	workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
413
 
414	/* Allocate and map in the output buffer */
415	out_page = btrfs_alloc_compr_page();
416	if (out_page == NULL) {
417		ret = -ENOMEM;
418		goto out;
419	}
420	pages[nr_pages++] = out_page;
421	workspace->out_buf.dst = page_address(out_page);
422	workspace->out_buf.pos = 0;
423	workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
424
425	while (1) {
426		size_t ret2;
427
428		ret2 = zstd_compress_stream(stream, &workspace->out_buf,
429				&workspace->in_buf);
430		if (zstd_is_error(ret2)) {
431			pr_debug("BTRFS: zstd_compress_stream returned %d\n",
432					zstd_get_error_code(ret2));
433			ret = -EIO;
434			goto out;
435		}
436
437		/* Check to see if we are making it bigger */
438		if (tot_in + workspace->in_buf.pos > 8192 &&
439				tot_in + workspace->in_buf.pos <
440				tot_out + workspace->out_buf.pos) {
441			ret = -E2BIG;
442			goto out;
443		}
444
445		/* We've reached the end of our output range */
446		if (workspace->out_buf.pos >= max_out) {
447			tot_out += workspace->out_buf.pos;
448			ret = -E2BIG;
449			goto out;
450		}
451
452		/* Check if we need more output space */
453		if (workspace->out_buf.pos == workspace->out_buf.size) {
454			tot_out += PAGE_SIZE;
455			max_out -= PAGE_SIZE;
 
456			if (nr_pages == nr_dest_pages) {
 
457				ret = -E2BIG;
458				goto out;
459			}
460			out_page = btrfs_alloc_compr_page();
461			if (out_page == NULL) {
462				ret = -ENOMEM;
463				goto out;
464			}
465			pages[nr_pages++] = out_page;
466			workspace->out_buf.dst = page_address(out_page);
467			workspace->out_buf.pos = 0;
468			workspace->out_buf.size = min_t(size_t, max_out,
469							PAGE_SIZE);
470		}
471
472		/* We've reached the end of the input */
473		if (workspace->in_buf.pos >= len) {
474			tot_in += workspace->in_buf.pos;
475			break;
476		}
477
478		/* Check if we need more input */
479		if (workspace->in_buf.pos == workspace->in_buf.size) {
480			tot_in += PAGE_SIZE;
481			kunmap_local(workspace->in_buf.src);
482			put_page(in_page);
 
483			start += PAGE_SIZE;
484			len -= PAGE_SIZE;
485			in_page = find_get_page(mapping, start >> PAGE_SHIFT);
486			workspace->in_buf.src = kmap_local_page(in_page);
487			workspace->in_buf.pos = 0;
488			workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
489		}
490	}
491	while (1) {
492		size_t ret2;
493
494		ret2 = zstd_end_stream(stream, &workspace->out_buf);
495		if (zstd_is_error(ret2)) {
496			pr_debug("BTRFS: zstd_end_stream returned %d\n",
497					zstd_get_error_code(ret2));
498			ret = -EIO;
499			goto out;
500		}
501		if (ret2 == 0) {
502			tot_out += workspace->out_buf.pos;
503			break;
504		}
505		if (workspace->out_buf.pos >= max_out) {
506			tot_out += workspace->out_buf.pos;
507			ret = -E2BIG;
508			goto out;
509		}
510
511		tot_out += PAGE_SIZE;
512		max_out -= PAGE_SIZE;
 
513		if (nr_pages == nr_dest_pages) {
 
514			ret = -E2BIG;
515			goto out;
516		}
517		out_page = btrfs_alloc_compr_page();
518		if (out_page == NULL) {
519			ret = -ENOMEM;
520			goto out;
521		}
522		pages[nr_pages++] = out_page;
523		workspace->out_buf.dst = page_address(out_page);
524		workspace->out_buf.pos = 0;
525		workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
526	}
527
528	if (tot_out >= tot_in) {
529		ret = -E2BIG;
530		goto out;
531	}
532
533	ret = 0;
534	*total_in = tot_in;
535	*total_out = tot_out;
536out:
537	*out_pages = nr_pages;
538	if (workspace->in_buf.src) {
539		kunmap_local(workspace->in_buf.src);
 
540		put_page(in_page);
541	}
 
 
542	return ret;
543}
544
545int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
546{
547	struct workspace *workspace = list_entry(ws, struct workspace, list);
548	struct page **pages_in = cb->compressed_pages;
 
 
549	size_t srclen = cb->compressed_len;
550	zstd_dstream *stream;
551	int ret = 0;
552	unsigned long page_in_index = 0;
553	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
554	unsigned long buf_start;
555	unsigned long total_out = 0;
556
557	stream = zstd_init_dstream(
558			ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
559	if (!stream) {
560		pr_debug("BTRFS: zstd_init_dstream failed\n");
561		ret = -EIO;
562		goto done;
563	}
564
565	workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
566	workspace->in_buf.pos = 0;
567	workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
568
569	workspace->out_buf.dst = workspace->buf;
570	workspace->out_buf.pos = 0;
571	workspace->out_buf.size = PAGE_SIZE;
572
573	while (1) {
574		size_t ret2;
575
576		ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
577				&workspace->in_buf);
578		if (zstd_is_error(ret2)) {
579			pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
580					zstd_get_error_code(ret2));
581			ret = -EIO;
582			goto done;
583		}
584		buf_start = total_out;
585		total_out += workspace->out_buf.pos;
586		workspace->out_buf.pos = 0;
587
588		ret = btrfs_decompress_buf2page(workspace->out_buf.dst,
589				total_out - buf_start, cb, buf_start);
590		if (ret == 0)
591			break;
592
593		if (workspace->in_buf.pos >= srclen)
594			break;
595
596		/* Check if we've hit the end of a frame */
597		if (ret2 == 0)
598			break;
599
600		if (workspace->in_buf.pos == workspace->in_buf.size) {
601			kunmap_local(workspace->in_buf.src);
602			page_in_index++;
603			if (page_in_index >= total_pages_in) {
604				workspace->in_buf.src = NULL;
605				ret = -EIO;
606				goto done;
607			}
608			srclen -= PAGE_SIZE;
609			workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
610			workspace->in_buf.pos = 0;
611			workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
612		}
613	}
614	ret = 0;
 
615done:
616	if (workspace->in_buf.src)
617		kunmap_local(workspace->in_buf.src);
618	return ret;
619}
620
621int zstd_decompress(struct list_head *ws, const u8 *data_in,
622		struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
623		size_t destlen)
 
624{
625	struct workspace *workspace = list_entry(ws, struct workspace, list);
626	struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
627	const u32 sectorsize = fs_info->sectorsize;
628	zstd_dstream *stream;
629	int ret = 0;
630	unsigned long to_copy = 0;
 
 
 
631
632	stream = zstd_init_dstream(
633			ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
634	if (!stream) {
635		pr_warn("BTRFS: zstd_init_dstream failed\n");
 
636		goto finish;
637	}
638
 
 
639	workspace->in_buf.src = data_in;
640	workspace->in_buf.pos = 0;
641	workspace->in_buf.size = srclen;
642
643	workspace->out_buf.dst = workspace->buf;
644	workspace->out_buf.pos = 0;
645	workspace->out_buf.size = sectorsize;
 
 
 
 
 
 
 
646
647	/*
648	 * Since both input and output buffers should not exceed one sector,
649	 * one call should end the decompression.
650	 */
651	ret = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf);
652	if (zstd_is_error(ret)) {
653		pr_warn_ratelimited("BTRFS: zstd_decompress_stream return %d\n",
654				    zstd_get_error_code(ret));
655		goto finish;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656	}
657	to_copy = workspace->out_buf.pos;
658	memcpy_to_page(dest_page, dest_pgoff, workspace->out_buf.dst, to_copy);
659finish:
660	/* Error or early end. */
661	if (unlikely(to_copy < destlen)) {
662		ret = -EIO;
663		memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy);
664	}
665	return ret;
666}
667
668const struct btrfs_compress_op btrfs_zstd_compress = {
669	/* ZSTD uses own workspace manager */
670	.workspace_manager = NULL,
 
 
 
 
 
 
 
671	.max_level	= ZSTD_BTRFS_MAX_LEVEL,
672	.default_level	= ZSTD_BTRFS_DEFAULT_LEVEL,
673};