Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * f2fs compress support
   4 *
   5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
   6 */
   7
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/moduleparam.h>
  11#include <linux/writeback.h>
  12#include <linux/backing-dev.h>
  13#include <linux/lzo.h>
  14#include <linux/lz4.h>
  15#include <linux/zstd.h>
  16#include <linux/pagevec.h>
  17
  18#include "f2fs.h"
  19#include "node.h"
  20#include "segment.h"
  21#include <trace/events/f2fs.h>
  22
  23static struct kmem_cache *cic_entry_slab;
  24static struct kmem_cache *dic_entry_slab;
  25
  26static void *page_array_alloc(struct inode *inode, int nr)
  27{
  28	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  29	unsigned int size = sizeof(struct page *) * nr;
  30
  31	if (likely(size <= sbi->page_array_slab_size))
  32		return f2fs_kmem_cache_alloc(sbi->page_array_slab,
  33					GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
  34	return f2fs_kzalloc(sbi, size, GFP_NOFS);
  35}
  36
  37static void page_array_free(struct inode *inode, void *pages, int nr)
  38{
  39	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  40	unsigned int size = sizeof(struct page *) * nr;
  41
  42	if (!pages)
  43		return;
  44
  45	if (likely(size <= sbi->page_array_slab_size))
  46		kmem_cache_free(sbi->page_array_slab, pages);
  47	else
  48		kfree(pages);
  49}
  50
  51struct f2fs_compress_ops {
  52	int (*init_compress_ctx)(struct compress_ctx *cc);
  53	void (*destroy_compress_ctx)(struct compress_ctx *cc);
  54	int (*compress_pages)(struct compress_ctx *cc);
  55	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
  56	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
  57	int (*decompress_pages)(struct decompress_io_ctx *dic);
  58};
  59
  60static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
  61{
  62	return index & (cc->cluster_size - 1);
  63}
  64
  65static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
  66{
  67	return index >> cc->log_cluster_size;
  68}
  69
  70static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
  71{
  72	return cc->cluster_idx << cc->log_cluster_size;
  73}
  74
  75bool f2fs_is_compressed_page(struct page *page)
  76{
  77	if (!PagePrivate(page))
  78		return false;
  79	if (!page_private(page))
  80		return false;
  81	if (page_private_nonpointer(page))
  82		return false;
  83
  84	f2fs_bug_on(F2FS_M_SB(page->mapping),
  85		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
  86	return true;
  87}
  88
  89static void f2fs_set_compressed_page(struct page *page,
  90		struct inode *inode, pgoff_t index, void *data)
  91{
  92	attach_page_private(page, (void *)data);
  93
  94	/* i_crypto_info and iv index */
  95	page->index = index;
  96	page->mapping = inode->i_mapping;
  97}
  98
  99static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
 100{
 101	int i;
 102
 103	for (i = 0; i < len; i++) {
 104		if (!cc->rpages[i])
 105			continue;
 106		if (unlock)
 107			unlock_page(cc->rpages[i]);
 108		else
 109			put_page(cc->rpages[i]);
 110	}
 111}
 112
 113static void f2fs_put_rpages(struct compress_ctx *cc)
 114{
 115	f2fs_drop_rpages(cc, cc->cluster_size, false);
 116}
 117
 118static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
 119{
 120	f2fs_drop_rpages(cc, len, true);
 121}
 122
 123static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
 124		struct writeback_control *wbc, bool redirty, int unlock)
 125{
 126	unsigned int i;
 127
 128	for (i = 0; i < cc->cluster_size; i++) {
 129		if (!cc->rpages[i])
 130			continue;
 131		if (redirty)
 132			redirty_page_for_writepage(wbc, cc->rpages[i]);
 133		f2fs_put_page(cc->rpages[i], unlock);
 134	}
 135}
 136
 137struct page *f2fs_compress_control_page(struct page *page)
 138{
 139	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
 140}
 141
 142int f2fs_init_compress_ctx(struct compress_ctx *cc)
 143{
 144	if (cc->rpages)
 145		return 0;
 146
 147	cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
 148	return cc->rpages ? 0 : -ENOMEM;
 149}
 150
 151void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
 152{
 153	page_array_free(cc->inode, cc->rpages, cc->cluster_size);
 154	cc->rpages = NULL;
 155	cc->nr_rpages = 0;
 156	cc->nr_cpages = 0;
 157	cc->valid_nr_cpages = 0;
 158	if (!reuse)
 159		cc->cluster_idx = NULL_CLUSTER;
 160}
 161
 162void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
 163{
 164	unsigned int cluster_ofs;
 165
 166	if (!f2fs_cluster_can_merge_page(cc, page->index))
 167		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
 168
 169	cluster_ofs = offset_in_cluster(cc, page->index);
 170	cc->rpages[cluster_ofs] = page;
 171	cc->nr_rpages++;
 172	cc->cluster_idx = cluster_idx(cc, page->index);
 173}
 174
 175#ifdef CONFIG_F2FS_FS_LZO
 176static int lzo_init_compress_ctx(struct compress_ctx *cc)
 177{
 178	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
 179				LZO1X_MEM_COMPRESS, GFP_NOFS);
 180	if (!cc->private)
 181		return -ENOMEM;
 182
 183	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
 184	return 0;
 185}
 186
 187static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
 188{
 189	kvfree(cc->private);
 190	cc->private = NULL;
 191}
 192
 193static int lzo_compress_pages(struct compress_ctx *cc)
 194{
 195	int ret;
 196
 197	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
 198					&cc->clen, cc->private);
 199	if (ret != LZO_E_OK) {
 200		printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
 201				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
 202		return -EIO;
 203	}
 204	return 0;
 205}
 206
 207static int lzo_decompress_pages(struct decompress_io_ctx *dic)
 208{
 209	int ret;
 210
 211	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
 212						dic->rbuf, &dic->rlen);
 213	if (ret != LZO_E_OK) {
 214		printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
 215				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
 216		return -EIO;
 217	}
 218
 219	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
 220		printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
 221					"expected:%lu\n", KERN_ERR,
 222					F2FS_I_SB(dic->inode)->sb->s_id,
 223					dic->rlen,
 224					PAGE_SIZE << dic->log_cluster_size);
 225		return -EIO;
 226	}
 227	return 0;
 228}
 229
 230static const struct f2fs_compress_ops f2fs_lzo_ops = {
 231	.init_compress_ctx	= lzo_init_compress_ctx,
 232	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
 233	.compress_pages		= lzo_compress_pages,
 234	.decompress_pages	= lzo_decompress_pages,
 235};
 236#endif
 237
 238#ifdef CONFIG_F2FS_FS_LZ4
 239static int lz4_init_compress_ctx(struct compress_ctx *cc)
 240{
 241	unsigned int size = LZ4_MEM_COMPRESS;
 242
 243#ifdef CONFIG_F2FS_FS_LZ4HC
 244	if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
 245		size = LZ4HC_MEM_COMPRESS;
 246#endif
 247
 248	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
 249	if (!cc->private)
 250		return -ENOMEM;
 251
 252	/*
 253	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
 254	 * adapt worst compress case, because lz4 compressor can handle
 255	 * output budget properly.
 256	 */
 257	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 258	return 0;
 259}
 260
 261static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
 262{
 263	kvfree(cc->private);
 264	cc->private = NULL;
 265}
 266
 267#ifdef CONFIG_F2FS_FS_LZ4HC
 268static int lz4hc_compress_pages(struct compress_ctx *cc)
 269{
 270	unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
 271						COMPRESS_LEVEL_OFFSET;
 272	int len;
 273
 274	if (level)
 275		len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 276					cc->clen, level, cc->private);
 277	else
 278		len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 279						cc->clen, cc->private);
 280	if (!len)
 281		return -EAGAIN;
 282
 283	cc->clen = len;
 284	return 0;
 285}
 286#endif
 287
 288static int lz4_compress_pages(struct compress_ctx *cc)
 289{
 290	int len;
 291
 292#ifdef CONFIG_F2FS_FS_LZ4HC
 293	return lz4hc_compress_pages(cc);
 294#endif
 295	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
 296						cc->clen, cc->private);
 297	if (!len)
 298		return -EAGAIN;
 299
 300	cc->clen = len;
 301	return 0;
 302}
 303
 304static int lz4_decompress_pages(struct decompress_io_ctx *dic)
 305{
 306	int ret;
 307
 308	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
 309						dic->clen, dic->rlen);
 310	if (ret < 0) {
 311		printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
 312				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
 313		return -EIO;
 314	}
 315
 316	if (ret != PAGE_SIZE << dic->log_cluster_size) {
 317		printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
 318					"expected:%lu\n", KERN_ERR,
 319					F2FS_I_SB(dic->inode)->sb->s_id, ret,
 320					PAGE_SIZE << dic->log_cluster_size);
 321		return -EIO;
 322	}
 323	return 0;
 324}
 325
 326static const struct f2fs_compress_ops f2fs_lz4_ops = {
 327	.init_compress_ctx	= lz4_init_compress_ctx,
 328	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
 329	.compress_pages		= lz4_compress_pages,
 330	.decompress_pages	= lz4_decompress_pages,
 331};
 332#endif
 333
 334#ifdef CONFIG_F2FS_FS_ZSTD
 335#define F2FS_ZSTD_DEFAULT_CLEVEL	1
 336
 337static int zstd_init_compress_ctx(struct compress_ctx *cc)
 338{
 339	zstd_parameters params;
 340	zstd_cstream *stream;
 341	void *workspace;
 342	unsigned int workspace_size;
 343	unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
 344						COMPRESS_LEVEL_OFFSET;
 345
 346	if (!level)
 347		level = F2FS_ZSTD_DEFAULT_CLEVEL;
 348
 349	params = zstd_get_params(level, cc->rlen);
 350	workspace_size = zstd_cstream_workspace_bound(&params.cParams);
 351
 352	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
 353					workspace_size, GFP_NOFS);
 354	if (!workspace)
 355		return -ENOMEM;
 356
 357	stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
 358	if (!stream) {
 359		printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
 360				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 361				__func__);
 362		kvfree(workspace);
 363		return -EIO;
 364	}
 365
 366	cc->private = workspace;
 367	cc->private2 = stream;
 368
 369	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 370	return 0;
 371}
 372
 373static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
 374{
 375	kvfree(cc->private);
 376	cc->private = NULL;
 377	cc->private2 = NULL;
 378}
 379
 380static int zstd_compress_pages(struct compress_ctx *cc)
 381{
 382	zstd_cstream *stream = cc->private2;
 383	zstd_in_buffer inbuf;
 384	zstd_out_buffer outbuf;
 385	int src_size = cc->rlen;
 386	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
 387	int ret;
 388
 389	inbuf.pos = 0;
 390	inbuf.src = cc->rbuf;
 391	inbuf.size = src_size;
 392
 393	outbuf.pos = 0;
 394	outbuf.dst = cc->cbuf->cdata;
 395	outbuf.size = dst_size;
 396
 397	ret = zstd_compress_stream(stream, &outbuf, &inbuf);
 398	if (zstd_is_error(ret)) {
 399		printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
 400				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 401				__func__, zstd_get_error_code(ret));
 402		return -EIO;
 403	}
 404
 405	ret = zstd_end_stream(stream, &outbuf);
 406	if (zstd_is_error(ret)) {
 407		printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
 408				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
 409				__func__, zstd_get_error_code(ret));
 410		return -EIO;
 411	}
 412
 413	/*
 414	 * there is compressed data remained in intermediate buffer due to
 415	 * no more space in cbuf.cdata
 416	 */
 417	if (ret)
 418		return -EAGAIN;
 419
 420	cc->clen = outbuf.pos;
 421	return 0;
 422}
 423
 424static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
 425{
 426	zstd_dstream *stream;
 427	void *workspace;
 428	unsigned int workspace_size;
 429	unsigned int max_window_size =
 430			MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
 431
 432	workspace_size = zstd_dstream_workspace_bound(max_window_size);
 433
 434	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
 435					workspace_size, GFP_NOFS);
 436	if (!workspace)
 437		return -ENOMEM;
 438
 439	stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
 440	if (!stream) {
 441		printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
 442				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
 443				__func__);
 444		kvfree(workspace);
 445		return -EIO;
 446	}
 447
 448	dic->private = workspace;
 449	dic->private2 = stream;
 450
 451	return 0;
 452}
 453
 454static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
 455{
 456	kvfree(dic->private);
 457	dic->private = NULL;
 458	dic->private2 = NULL;
 459}
 460
 461static int zstd_decompress_pages(struct decompress_io_ctx *dic)
 462{
 463	zstd_dstream *stream = dic->private2;
 464	zstd_in_buffer inbuf;
 465	zstd_out_buffer outbuf;
 466	int ret;
 467
 468	inbuf.pos = 0;
 469	inbuf.src = dic->cbuf->cdata;
 470	inbuf.size = dic->clen;
 471
 472	outbuf.pos = 0;
 473	outbuf.dst = dic->rbuf;
 474	outbuf.size = dic->rlen;
 475
 476	ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
 477	if (zstd_is_error(ret)) {
 478		printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
 479				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
 480				__func__, zstd_get_error_code(ret));
 481		return -EIO;
 482	}
 483
 484	if (dic->rlen != outbuf.pos) {
 485		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
 486				"expected:%lu\n", KERN_ERR,
 487				F2FS_I_SB(dic->inode)->sb->s_id,
 488				__func__, dic->rlen,
 489				PAGE_SIZE << dic->log_cluster_size);
 490		return -EIO;
 491	}
 492
 493	return 0;
 494}
 495
 496static const struct f2fs_compress_ops f2fs_zstd_ops = {
 497	.init_compress_ctx	= zstd_init_compress_ctx,
 498	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
 499	.compress_pages		= zstd_compress_pages,
 500	.init_decompress_ctx	= zstd_init_decompress_ctx,
 501	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
 502	.decompress_pages	= zstd_decompress_pages,
 503};
 504#endif
 505
 506#ifdef CONFIG_F2FS_FS_LZO
 507#ifdef CONFIG_F2FS_FS_LZORLE
 508static int lzorle_compress_pages(struct compress_ctx *cc)
 509{
 510	int ret;
 511
 512	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
 513					&cc->clen, cc->private);
 514	if (ret != LZO_E_OK) {
 515		printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
 516				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
 517		return -EIO;
 518	}
 519	return 0;
 520}
 521
 522static const struct f2fs_compress_ops f2fs_lzorle_ops = {
 523	.init_compress_ctx	= lzo_init_compress_ctx,
 524	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
 525	.compress_pages		= lzorle_compress_pages,
 526	.decompress_pages	= lzo_decompress_pages,
 527};
 528#endif
 529#endif
 530
 531static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
 532#ifdef CONFIG_F2FS_FS_LZO
 533	&f2fs_lzo_ops,
 534#else
 535	NULL,
 536#endif
 537#ifdef CONFIG_F2FS_FS_LZ4
 538	&f2fs_lz4_ops,
 539#else
 540	NULL,
 541#endif
 542#ifdef CONFIG_F2FS_FS_ZSTD
 543	&f2fs_zstd_ops,
 544#else
 545	NULL,
 546#endif
 547#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
 548	&f2fs_lzorle_ops,
 549#else
 550	NULL,
 551#endif
 552};
 553
 554bool f2fs_is_compress_backend_ready(struct inode *inode)
 555{
 556	if (!f2fs_compressed_file(inode))
 557		return true;
 558	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
 559}
 560
 561static mempool_t *compress_page_pool;
 562static int num_compress_pages = 512;
 563module_param(num_compress_pages, uint, 0444);
 564MODULE_PARM_DESC(num_compress_pages,
 565		"Number of intermediate compress pages to preallocate");
 566
 567int f2fs_init_compress_mempool(void)
 568{
 569	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
 570	return compress_page_pool ? 0 : -ENOMEM;
 571}
 572
 573void f2fs_destroy_compress_mempool(void)
 574{
 575	mempool_destroy(compress_page_pool);
 576}
 577
 578static struct page *f2fs_compress_alloc_page(void)
 579{
 580	struct page *page;
 581
 582	page = mempool_alloc(compress_page_pool, GFP_NOFS);
 583	lock_page(page);
 584
 585	return page;
 586}
 587
 588static void f2fs_compress_free_page(struct page *page)
 589{
 590	if (!page)
 591		return;
 592	detach_page_private(page);
 593	page->mapping = NULL;
 594	unlock_page(page);
 595	mempool_free(page, compress_page_pool);
 596}
 597
 598#define MAX_VMAP_RETRIES	3
 599
 600static void *f2fs_vmap(struct page **pages, unsigned int count)
 601{
 602	int i;
 603	void *buf = NULL;
 604
 605	for (i = 0; i < MAX_VMAP_RETRIES; i++) {
 606		buf = vm_map_ram(pages, count, -1);
 607		if (buf)
 608			break;
 609		vm_unmap_aliases();
 610	}
 611	return buf;
 612}
 613
 614static int f2fs_compress_pages(struct compress_ctx *cc)
 615{
 616	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
 617	const struct f2fs_compress_ops *cops =
 618				f2fs_cops[fi->i_compress_algorithm];
 619	unsigned int max_len, new_nr_cpages;
 620	u32 chksum = 0;
 621	int i, ret;
 622
 623	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
 624				cc->cluster_size, fi->i_compress_algorithm);
 625
 626	if (cops->init_compress_ctx) {
 627		ret = cops->init_compress_ctx(cc);
 628		if (ret)
 629			goto out;
 630	}
 631
 632	max_len = COMPRESS_HEADER_SIZE + cc->clen;
 633	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
 634	cc->valid_nr_cpages = cc->nr_cpages;
 635
 636	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
 637	if (!cc->cpages) {
 638		ret = -ENOMEM;
 639		goto destroy_compress_ctx;
 640	}
 641
 642	for (i = 0; i < cc->nr_cpages; i++) {
 643		cc->cpages[i] = f2fs_compress_alloc_page();
 644		if (!cc->cpages[i]) {
 645			ret = -ENOMEM;
 646			goto out_free_cpages;
 647		}
 648	}
 649
 650	cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
 651	if (!cc->rbuf) {
 652		ret = -ENOMEM;
 653		goto out_free_cpages;
 654	}
 655
 656	cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
 657	if (!cc->cbuf) {
 658		ret = -ENOMEM;
 659		goto out_vunmap_rbuf;
 660	}
 661
 662	ret = cops->compress_pages(cc);
 663	if (ret)
 664		goto out_vunmap_cbuf;
 665
 666	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
 667
 668	if (cc->clen > max_len) {
 669		ret = -EAGAIN;
 670		goto out_vunmap_cbuf;
 671	}
 672
 673	cc->cbuf->clen = cpu_to_le32(cc->clen);
 674
 675	if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
 676		chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
 677					cc->cbuf->cdata, cc->clen);
 678	cc->cbuf->chksum = cpu_to_le32(chksum);
 679
 680	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
 681		cc->cbuf->reserved[i] = cpu_to_le32(0);
 682
 683	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
 684
 685	/* zero out any unused part of the last page */
 686	memset(&cc->cbuf->cdata[cc->clen], 0,
 687			(new_nr_cpages * PAGE_SIZE) -
 688			(cc->clen + COMPRESS_HEADER_SIZE));
 689
 690	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
 691	vm_unmap_ram(cc->rbuf, cc->cluster_size);
 692
 693	for (i = 0; i < cc->nr_cpages; i++) {
 694		if (i < new_nr_cpages)
 695			continue;
 696		f2fs_compress_free_page(cc->cpages[i]);
 697		cc->cpages[i] = NULL;
 698	}
 699
 700	if (cops->destroy_compress_ctx)
 701		cops->destroy_compress_ctx(cc);
 702
 703	cc->valid_nr_cpages = new_nr_cpages;
 704
 705	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
 706							cc->clen, ret);
 707	return 0;
 708
 709out_vunmap_cbuf:
 710	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
 711out_vunmap_rbuf:
 712	vm_unmap_ram(cc->rbuf, cc->cluster_size);
 713out_free_cpages:
 714	for (i = 0; i < cc->nr_cpages; i++) {
 715		if (cc->cpages[i])
 716			f2fs_compress_free_page(cc->cpages[i]);
 717	}
 718	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
 719	cc->cpages = NULL;
 720destroy_compress_ctx:
 721	if (cops->destroy_compress_ctx)
 722		cops->destroy_compress_ctx(cc);
 723out:
 724	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
 725							cc->clen, ret);
 726	return ret;
 727}
 728
 729static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
 730		bool pre_alloc);
 731static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
 732		bool bypass_destroy_callback, bool pre_alloc);
 733
 734void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
 735{
 736	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
 737	struct f2fs_inode_info *fi = F2FS_I(dic->inode);
 738	const struct f2fs_compress_ops *cops =
 739			f2fs_cops[fi->i_compress_algorithm];
 740	bool bypass_callback = false;
 741	int ret;
 742
 743	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
 744				dic->cluster_size, fi->i_compress_algorithm);
 745
 746	if (dic->failed) {
 747		ret = -EIO;
 748		goto out_end_io;
 749	}
 750
 751	ret = f2fs_prepare_decomp_mem(dic, false);
 752	if (ret) {
 753		bypass_callback = true;
 754		goto out_release;
 755	}
 756
 757	dic->clen = le32_to_cpu(dic->cbuf->clen);
 758	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
 759
 760	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
 761		ret = -EFSCORRUPTED;
 762		f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
 763		goto out_release;
 764	}
 765
 766	ret = cops->decompress_pages(dic);
 767
 768	if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
 769		u32 provided = le32_to_cpu(dic->cbuf->chksum);
 770		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
 771
 772		if (provided != calculated) {
 773			if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
 774				set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
 775				printk_ratelimited(
 776					"%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
 777					KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
 778					provided, calculated);
 779			}
 780			set_sbi_flag(sbi, SBI_NEED_FSCK);
 781		}
 782	}
 783
 784out_release:
 785	f2fs_release_decomp_mem(dic, bypass_callback, false);
 786
 787out_end_io:
 788	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
 789							dic->clen, ret);
 790	f2fs_decompress_end_io(dic, ret, in_task);
 791}
 792
 793/*
 794 * This is called when a page of a compressed cluster has been read from disk
 795 * (or failed to be read from disk).  It checks whether this page was the last
 796 * page being waited on in the cluster, and if so, it decompresses the cluster
 797 * (or in the case of a failure, cleans up without actually decompressing).
 798 */
 799void f2fs_end_read_compressed_page(struct page *page, bool failed,
 800		block_t blkaddr, bool in_task)
 801{
 802	struct decompress_io_ctx *dic =
 803			(struct decompress_io_ctx *)page_private(page);
 804	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
 805
 806	dec_page_count(sbi, F2FS_RD_DATA);
 807
 808	if (failed)
 809		WRITE_ONCE(dic->failed, true);
 810	else if (blkaddr && in_task)
 811		f2fs_cache_compressed_page(sbi, page,
 812					dic->inode->i_ino, blkaddr);
 813
 814	if (atomic_dec_and_test(&dic->remaining_pages))
 815		f2fs_decompress_cluster(dic, in_task);
 816}
 817
 818static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
 819{
 820	if (cc->cluster_idx == NULL_CLUSTER)
 821		return true;
 822	return cc->cluster_idx == cluster_idx(cc, index);
 823}
 824
 825bool f2fs_cluster_is_empty(struct compress_ctx *cc)
 826{
 827	return cc->nr_rpages == 0;
 828}
 829
 830static bool f2fs_cluster_is_full(struct compress_ctx *cc)
 831{
 832	return cc->cluster_size == cc->nr_rpages;
 833}
 834
 835bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
 836{
 837	if (f2fs_cluster_is_empty(cc))
 838		return true;
 839	return is_page_in_cluster(cc, index);
 840}
 841
 842bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
 843				int index, int nr_pages, bool uptodate)
 844{
 845	unsigned long pgidx = pages[index]->index;
 846	int i = uptodate ? 0 : 1;
 847
 848	/*
 849	 * when uptodate set to true, try to check all pages in cluster is
 850	 * uptodate or not.
 851	 */
 852	if (uptodate && (pgidx % cc->cluster_size))
 853		return false;
 854
 855	if (nr_pages - index < cc->cluster_size)
 856		return false;
 857
 858	for (; i < cc->cluster_size; i++) {
 859		if (pages[index + i]->index != pgidx + i)
 860			return false;
 861		if (uptodate && !PageUptodate(pages[index + i]))
 862			return false;
 863	}
 864
 865	return true;
 866}
 867
 868static bool cluster_has_invalid_data(struct compress_ctx *cc)
 869{
 870	loff_t i_size = i_size_read(cc->inode);
 871	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
 872	int i;
 873
 874	for (i = 0; i < cc->cluster_size; i++) {
 875		struct page *page = cc->rpages[i];
 876
 877		f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
 878
 879		/* beyond EOF */
 880		if (page->index >= nr_pages)
 881			return true;
 882	}
 883	return false;
 884}
 885
 886bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
 887{
 888	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 889	unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
 890	bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
 891	int cluster_end = 0;
 892	int i;
 893	char *reason = "";
 894
 895	if (!compressed)
 896		return false;
 897
 898	/* [..., COMPR_ADDR, ...] */
 899	if (dn->ofs_in_node % cluster_size) {
 900		reason = "[*|C|*|*]";
 901		goto out;
 902	}
 903
 904	for (i = 1; i < cluster_size; i++) {
 905		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
 906							dn->ofs_in_node + i);
 907
 908		/* [COMPR_ADDR, ..., COMPR_ADDR] */
 909		if (blkaddr == COMPRESS_ADDR) {
 910			reason = "[C|*|C|*]";
 911			goto out;
 912		}
 913		if (!__is_valid_data_blkaddr(blkaddr)) {
 914			if (!cluster_end)
 915				cluster_end = i;
 916			continue;
 917		}
 918		/* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
 919		if (cluster_end) {
 920			reason = "[C|N|N|V]";
 921			goto out;
 922		}
 923	}
 924	return false;
 925out:
 926	f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
 927			dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
 928	set_sbi_flag(sbi, SBI_NEED_FSCK);
 929	return true;
 930}
 931
 932static int __f2fs_cluster_blocks(struct inode *inode,
 933				unsigned int cluster_idx, bool compr)
 934{
 935	struct dnode_of_data dn;
 936	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
 937	unsigned int start_idx = cluster_idx <<
 938				F2FS_I(inode)->i_log_cluster_size;
 939	int ret;
 940
 941	set_new_dnode(&dn, inode, NULL, NULL, 0);
 942	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
 943	if (ret) {
 944		if (ret == -ENOENT)
 945			ret = 0;
 946		goto fail;
 947	}
 948
 949	if (f2fs_sanity_check_cluster(&dn)) {
 950		ret = -EFSCORRUPTED;
 951		f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER);
 952		goto fail;
 953	}
 954
 955	if (dn.data_blkaddr == COMPRESS_ADDR) {
 956		int i;
 957
 958		ret = 1;
 959		for (i = 1; i < cluster_size; i++) {
 960			block_t blkaddr;
 961
 962			blkaddr = data_blkaddr(dn.inode,
 963					dn.node_page, dn.ofs_in_node + i);
 964			if (compr) {
 965				if (__is_valid_data_blkaddr(blkaddr))
 966					ret++;
 967			} else {
 968				if (blkaddr != NULL_ADDR)
 969					ret++;
 970			}
 971		}
 972
 973		f2fs_bug_on(F2FS_I_SB(inode),
 974			!compr && ret != cluster_size &&
 975			!is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
 976	}
 977fail:
 978	f2fs_put_dnode(&dn);
 979	return ret;
 980}
 981
 982/* return # of compressed blocks in compressed cluster */
 983static int f2fs_compressed_blocks(struct compress_ctx *cc)
 984{
 985	return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
 986}
 987
 988/* return # of valid blocks in compressed cluster */
 989int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
 990{
 991	return __f2fs_cluster_blocks(inode,
 992		index >> F2FS_I(inode)->i_log_cluster_size,
 993		false);
 994}
 995
 996static bool cluster_may_compress(struct compress_ctx *cc)
 997{
 998	if (!f2fs_need_compress_data(cc->inode))
 999		return false;
1000	if (f2fs_is_atomic_file(cc->inode))
1001		return false;
1002	if (!f2fs_cluster_is_full(cc))
1003		return false;
1004	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1005		return false;
1006	return !cluster_has_invalid_data(cc);
1007}
1008
1009static void set_cluster_writeback(struct compress_ctx *cc)
1010{
1011	int i;
1012
1013	for (i = 0; i < cc->cluster_size; i++) {
1014		if (cc->rpages[i])
1015			set_page_writeback(cc->rpages[i]);
1016	}
1017}
1018
1019static void set_cluster_dirty(struct compress_ctx *cc)
1020{
1021	int i;
1022
1023	for (i = 0; i < cc->cluster_size; i++)
1024		if (cc->rpages[i])
1025			set_page_dirty(cc->rpages[i]);
1026}
1027
1028static int prepare_compress_overwrite(struct compress_ctx *cc,
1029		struct page **pagep, pgoff_t index, void **fsdata)
1030{
1031	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1032	struct address_space *mapping = cc->inode->i_mapping;
1033	struct page *page;
1034	sector_t last_block_in_bio;
1035	unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1036	pgoff_t start_idx = start_idx_of_cluster(cc);
1037	int i, ret;
1038
1039retry:
1040	ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1041	if (ret <= 0)
1042		return ret;
1043
1044	ret = f2fs_init_compress_ctx(cc);
1045	if (ret)
1046		return ret;
1047
1048	/* keep page reference to avoid page reclaim */
1049	for (i = 0; i < cc->cluster_size; i++) {
1050		page = f2fs_pagecache_get_page(mapping, start_idx + i,
1051							fgp_flag, GFP_NOFS);
1052		if (!page) {
1053			ret = -ENOMEM;
1054			goto unlock_pages;
1055		}
1056
1057		if (PageUptodate(page))
1058			f2fs_put_page(page, 1);
1059		else
1060			f2fs_compress_ctx_add_page(cc, page);
1061	}
1062
1063	if (!f2fs_cluster_is_empty(cc)) {
1064		struct bio *bio = NULL;
1065
1066		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1067					&last_block_in_bio, false, true);
1068		f2fs_put_rpages(cc);
1069		f2fs_destroy_compress_ctx(cc, true);
1070		if (ret)
1071			goto out;
1072		if (bio)
1073			f2fs_submit_bio(sbi, bio, DATA);
1074
1075		ret = f2fs_init_compress_ctx(cc);
1076		if (ret)
1077			goto out;
1078	}
1079
1080	for (i = 0; i < cc->cluster_size; i++) {
1081		f2fs_bug_on(sbi, cc->rpages[i]);
1082
1083		page = find_lock_page(mapping, start_idx + i);
1084		if (!page) {
1085			/* page can be truncated */
1086			goto release_and_retry;
1087		}
1088
1089		f2fs_wait_on_page_writeback(page, DATA, true, true);
1090		f2fs_compress_ctx_add_page(cc, page);
1091
1092		if (!PageUptodate(page)) {
1093release_and_retry:
1094			f2fs_put_rpages(cc);
1095			f2fs_unlock_rpages(cc, i + 1);
1096			f2fs_destroy_compress_ctx(cc, true);
1097			goto retry;
1098		}
1099	}
1100
1101	if (likely(!ret)) {
1102		*fsdata = cc->rpages;
1103		*pagep = cc->rpages[offset_in_cluster(cc, index)];
1104		return cc->cluster_size;
1105	}
1106
1107unlock_pages:
1108	f2fs_put_rpages(cc);
1109	f2fs_unlock_rpages(cc, i);
1110	f2fs_destroy_compress_ctx(cc, true);
1111out:
1112	return ret;
1113}
1114
1115int f2fs_prepare_compress_overwrite(struct inode *inode,
1116		struct page **pagep, pgoff_t index, void **fsdata)
1117{
1118	struct compress_ctx cc = {
1119		.inode = inode,
1120		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1121		.cluster_size = F2FS_I(inode)->i_cluster_size,
1122		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1123		.rpages = NULL,
1124		.nr_rpages = 0,
1125	};
1126
1127	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1128}
1129
1130bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1131					pgoff_t index, unsigned copied)
1132
1133{
1134	struct compress_ctx cc = {
1135		.inode = inode,
1136		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1137		.cluster_size = F2FS_I(inode)->i_cluster_size,
1138		.rpages = fsdata,
1139	};
1140	bool first_index = (index == cc.rpages[0]->index);
1141
1142	if (copied)
1143		set_cluster_dirty(&cc);
1144
1145	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1146	f2fs_destroy_compress_ctx(&cc, false);
1147
1148	return first_index;
1149}
1150
1151int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1152{
1153	void *fsdata = NULL;
1154	struct page *pagep;
1155	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1156	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1157							log_cluster_size;
1158	int err;
1159
1160	err = f2fs_is_compressed_cluster(inode, start_idx);
1161	if (err < 0)
1162		return err;
1163
1164	/* truncate normal cluster */
1165	if (!err)
1166		return f2fs_do_truncate_blocks(inode, from, lock);
1167
1168	/* truncate compressed cluster */
1169	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1170						start_idx, &fsdata);
1171
1172	/* should not be a normal cluster */
1173	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1174
1175	if (err <= 0)
1176		return err;
1177
1178	if (err > 0) {
1179		struct page **rpages = fsdata;
1180		int cluster_size = F2FS_I(inode)->i_cluster_size;
1181		int i;
1182
1183		for (i = cluster_size - 1; i >= 0; i--) {
1184			loff_t start = rpages[i]->index << PAGE_SHIFT;
1185
1186			if (from <= start) {
1187				zero_user_segment(rpages[i], 0, PAGE_SIZE);
1188			} else {
1189				zero_user_segment(rpages[i], from - start,
1190								PAGE_SIZE);
1191				break;
1192			}
1193		}
1194
1195		f2fs_compress_write_end(inode, fsdata, start_idx, true);
1196	}
1197	return 0;
1198}
1199
1200static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1201					int *submitted,
1202					struct writeback_control *wbc,
1203					enum iostat_type io_type)
1204{
1205	struct inode *inode = cc->inode;
1206	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1207	struct f2fs_inode_info *fi = F2FS_I(inode);
1208	struct f2fs_io_info fio = {
1209		.sbi = sbi,
1210		.ino = cc->inode->i_ino,
1211		.type = DATA,
1212		.op = REQ_OP_WRITE,
1213		.op_flags = wbc_to_write_flags(wbc),
1214		.old_blkaddr = NEW_ADDR,
1215		.page = NULL,
1216		.encrypted_page = NULL,
1217		.compressed_page = NULL,
1218		.submitted = false,
1219		.io_type = io_type,
1220		.io_wbc = wbc,
1221		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1222	};
1223	struct dnode_of_data dn;
1224	struct node_info ni;
1225	struct compress_io_ctx *cic;
1226	pgoff_t start_idx = start_idx_of_cluster(cc);
1227	unsigned int last_index = cc->cluster_size - 1;
1228	loff_t psize;
1229	int i, err;
1230
1231	/* we should bypass data pages to proceed the kworkder jobs */
1232	if (unlikely(f2fs_cp_error(sbi))) {
1233		mapping_set_error(cc->rpages[0]->mapping, -EIO);
1234		goto out_free;
1235	}
1236
1237	if (IS_NOQUOTA(inode)) {
1238		/*
1239		 * We need to wait for node_write to avoid block allocation during
1240		 * checkpoint. This can only happen to quota writes which can cause
1241		 * the below discard race condition.
1242		 */
1243		f2fs_down_read(&sbi->node_write);
1244	} else if (!f2fs_trylock_op(sbi)) {
1245		goto out_free;
1246	}
1247
1248	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1249
1250	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1251	if (err)
1252		goto out_unlock_op;
1253
1254	for (i = 0; i < cc->cluster_size; i++) {
1255		if (data_blkaddr(dn.inode, dn.node_page,
1256					dn.ofs_in_node + i) == NULL_ADDR)
1257			goto out_put_dnode;
1258	}
1259
1260	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1261
1262	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1263	if (err)
1264		goto out_put_dnode;
1265
1266	fio.version = ni.version;
1267
1268	cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1269	if (!cic)
1270		goto out_put_dnode;
1271
1272	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1273	cic->inode = inode;
1274	atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1275	cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1276	if (!cic->rpages)
1277		goto out_put_cic;
1278
1279	cic->nr_rpages = cc->cluster_size;
1280
1281	for (i = 0; i < cc->valid_nr_cpages; i++) {
1282		f2fs_set_compressed_page(cc->cpages[i], inode,
1283					cc->rpages[i + 1]->index, cic);
1284		fio.compressed_page = cc->cpages[i];
1285
1286		fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1287						dn.ofs_in_node + i + 1);
1288
1289		/* wait for GCed page writeback via META_MAPPING */
1290		f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1291
1292		if (fio.encrypted) {
1293			fio.page = cc->rpages[i + 1];
1294			err = f2fs_encrypt_one_page(&fio);
1295			if (err)
1296				goto out_destroy_crypt;
1297			cc->cpages[i] = fio.encrypted_page;
1298		}
1299	}
1300
1301	set_cluster_writeback(cc);
1302
1303	for (i = 0; i < cc->cluster_size; i++)
1304		cic->rpages[i] = cc->rpages[i];
1305
1306	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1307		block_t blkaddr;
1308
1309		blkaddr = f2fs_data_blkaddr(&dn);
1310		fio.page = cc->rpages[i];
1311		fio.old_blkaddr = blkaddr;
1312
1313		/* cluster header */
1314		if (i == 0) {
1315			if (blkaddr == COMPRESS_ADDR)
1316				fio.compr_blocks++;
1317			if (__is_valid_data_blkaddr(blkaddr))
1318				f2fs_invalidate_blocks(sbi, blkaddr);
1319			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1320			goto unlock_continue;
1321		}
1322
1323		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1324			fio.compr_blocks++;
1325
1326		if (i > cc->valid_nr_cpages) {
1327			if (__is_valid_data_blkaddr(blkaddr)) {
1328				f2fs_invalidate_blocks(sbi, blkaddr);
1329				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1330			}
1331			goto unlock_continue;
1332		}
1333
1334		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1335
1336		if (fio.encrypted)
1337			fio.encrypted_page = cc->cpages[i - 1];
1338		else
1339			fio.compressed_page = cc->cpages[i - 1];
1340
1341		cc->cpages[i - 1] = NULL;
1342		f2fs_outplace_write_data(&dn, &fio);
1343		(*submitted)++;
1344unlock_continue:
1345		inode_dec_dirty_pages(cc->inode);
1346		unlock_page(fio.page);
1347	}
1348
1349	if (fio.compr_blocks)
1350		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1351	f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1352	add_compr_block_stat(inode, cc->valid_nr_cpages);
1353
1354	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1355	if (cc->cluster_idx == 0)
1356		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1357
1358	f2fs_put_dnode(&dn);
1359	if (IS_NOQUOTA(inode))
1360		f2fs_up_read(&sbi->node_write);
1361	else
1362		f2fs_unlock_op(sbi);
1363
1364	spin_lock(&fi->i_size_lock);
1365	if (fi->last_disk_size < psize)
1366		fi->last_disk_size = psize;
1367	spin_unlock(&fi->i_size_lock);
1368
1369	f2fs_put_rpages(cc);
1370	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1371	cc->cpages = NULL;
1372	f2fs_destroy_compress_ctx(cc, false);
1373	return 0;
1374
1375out_destroy_crypt:
1376	page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1377
1378	for (--i; i >= 0; i--)
1379		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1380out_put_cic:
1381	kmem_cache_free(cic_entry_slab, cic);
1382out_put_dnode:
1383	f2fs_put_dnode(&dn);
1384out_unlock_op:
1385	if (IS_NOQUOTA(inode))
1386		f2fs_up_read(&sbi->node_write);
1387	else
1388		f2fs_unlock_op(sbi);
1389out_free:
1390	for (i = 0; i < cc->valid_nr_cpages; i++) {
1391		f2fs_compress_free_page(cc->cpages[i]);
1392		cc->cpages[i] = NULL;
1393	}
1394	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1395	cc->cpages = NULL;
1396	return -EAGAIN;
1397}
1398
1399void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1400{
1401	struct f2fs_sb_info *sbi = bio->bi_private;
1402	struct compress_io_ctx *cic =
1403			(struct compress_io_ctx *)page_private(page);
1404	int i;
1405
1406	if (unlikely(bio->bi_status))
1407		mapping_set_error(cic->inode->i_mapping, -EIO);
1408
1409	f2fs_compress_free_page(page);
1410
1411	dec_page_count(sbi, F2FS_WB_DATA);
1412
1413	if (atomic_dec_return(&cic->pending_pages))
1414		return;
1415
1416	for (i = 0; i < cic->nr_rpages; i++) {
1417		WARN_ON(!cic->rpages[i]);
1418		clear_page_private_gcing(cic->rpages[i]);
1419		end_page_writeback(cic->rpages[i]);
1420	}
1421
1422	page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1423	kmem_cache_free(cic_entry_slab, cic);
1424}
1425
1426static int f2fs_write_raw_pages(struct compress_ctx *cc,
1427					int *submitted,
1428					struct writeback_control *wbc,
1429					enum iostat_type io_type)
1430{
1431	struct address_space *mapping = cc->inode->i_mapping;
1432	int _submitted, compr_blocks, ret, i;
1433
1434	compr_blocks = f2fs_compressed_blocks(cc);
1435
1436	for (i = 0; i < cc->cluster_size; i++) {
1437		if (!cc->rpages[i])
1438			continue;
1439
1440		redirty_page_for_writepage(wbc, cc->rpages[i]);
1441		unlock_page(cc->rpages[i]);
1442	}
1443
1444	if (compr_blocks < 0)
1445		return compr_blocks;
1446
1447	for (i = 0; i < cc->cluster_size; i++) {
1448		if (!cc->rpages[i])
1449			continue;
1450retry_write:
1451		lock_page(cc->rpages[i]);
1452
1453		if (cc->rpages[i]->mapping != mapping) {
1454continue_unlock:
1455			unlock_page(cc->rpages[i]);
1456			continue;
1457		}
1458
1459		if (!PageDirty(cc->rpages[i]))
1460			goto continue_unlock;
1461
1462		if (!clear_page_dirty_for_io(cc->rpages[i]))
1463			goto continue_unlock;
1464
1465		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1466						NULL, NULL, wbc, io_type,
1467						compr_blocks, false);
1468		if (ret) {
1469			if (ret == AOP_WRITEPAGE_ACTIVATE) {
1470				unlock_page(cc->rpages[i]);
1471				ret = 0;
1472			} else if (ret == -EAGAIN) {
1473				/*
1474				 * for quota file, just redirty left pages to
1475				 * avoid deadlock caused by cluster update race
1476				 * from foreground operation.
1477				 */
1478				if (IS_NOQUOTA(cc->inode))
1479					return 0;
1480				ret = 0;
1481				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1482				goto retry_write;
1483			}
1484			return ret;
1485		}
1486
1487		*submitted += _submitted;
1488	}
1489
1490	f2fs_balance_fs(F2FS_M_SB(mapping), true);
1491
1492	return 0;
1493}
1494
1495int f2fs_write_multi_pages(struct compress_ctx *cc,
1496					int *submitted,
1497					struct writeback_control *wbc,
1498					enum iostat_type io_type)
1499{
1500	int err;
1501
1502	*submitted = 0;
1503	if (cluster_may_compress(cc)) {
1504		err = f2fs_compress_pages(cc);
1505		if (err == -EAGAIN) {
1506			add_compr_block_stat(cc->inode, cc->cluster_size);
1507			goto write;
1508		} else if (err) {
1509			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1510			goto destroy_out;
1511		}
1512
1513		err = f2fs_write_compressed_pages(cc, submitted,
1514							wbc, io_type);
1515		if (!err)
1516			return 0;
1517		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1518	}
1519write:
1520	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1521
1522	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1523	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1524destroy_out:
1525	f2fs_destroy_compress_ctx(cc, false);
1526	return err;
1527}
1528
1529static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1530		bool pre_alloc)
1531{
1532	return pre_alloc ^ f2fs_low_mem_mode(sbi);
1533}
1534
1535static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1536		bool pre_alloc)
1537{
1538	const struct f2fs_compress_ops *cops =
1539		f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1540	int i;
1541
1542	if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1543		return 0;
1544
1545	dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1546	if (!dic->tpages)
1547		return -ENOMEM;
1548
1549	for (i = 0; i < dic->cluster_size; i++) {
1550		if (dic->rpages[i]) {
1551			dic->tpages[i] = dic->rpages[i];
1552			continue;
1553		}
1554
1555		dic->tpages[i] = f2fs_compress_alloc_page();
1556		if (!dic->tpages[i])
1557			return -ENOMEM;
1558	}
1559
1560	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1561	if (!dic->rbuf)
1562		return -ENOMEM;
1563
1564	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1565	if (!dic->cbuf)
1566		return -ENOMEM;
1567
1568	if (cops->init_decompress_ctx)
1569		return cops->init_decompress_ctx(dic);
1570
1571	return 0;
1572}
1573
1574static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1575		bool bypass_destroy_callback, bool pre_alloc)
1576{
1577	const struct f2fs_compress_ops *cops =
1578		f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1579
1580	if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1581		return;
1582
1583	if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1584		cops->destroy_decompress_ctx(dic);
1585
1586	if (dic->cbuf)
1587		vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1588
1589	if (dic->rbuf)
1590		vm_unmap_ram(dic->rbuf, dic->cluster_size);
1591}
1592
1593static void f2fs_free_dic(struct decompress_io_ctx *dic,
1594		bool bypass_destroy_callback);
1595
1596struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1597{
1598	struct decompress_io_ctx *dic;
1599	pgoff_t start_idx = start_idx_of_cluster(cc);
1600	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1601	int i, ret;
1602
1603	dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1604	if (!dic)
1605		return ERR_PTR(-ENOMEM);
1606
1607	dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1608	if (!dic->rpages) {
1609		kmem_cache_free(dic_entry_slab, dic);
1610		return ERR_PTR(-ENOMEM);
1611	}
1612
1613	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1614	dic->inode = cc->inode;
1615	atomic_set(&dic->remaining_pages, cc->nr_cpages);
1616	dic->cluster_idx = cc->cluster_idx;
1617	dic->cluster_size = cc->cluster_size;
1618	dic->log_cluster_size = cc->log_cluster_size;
1619	dic->nr_cpages = cc->nr_cpages;
1620	refcount_set(&dic->refcnt, 1);
1621	dic->failed = false;
1622	dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1623
1624	for (i = 0; i < dic->cluster_size; i++)
1625		dic->rpages[i] = cc->rpages[i];
1626	dic->nr_rpages = cc->cluster_size;
1627
1628	dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1629	if (!dic->cpages) {
1630		ret = -ENOMEM;
1631		goto out_free;
1632	}
1633
1634	for (i = 0; i < dic->nr_cpages; i++) {
1635		struct page *page;
1636
1637		page = f2fs_compress_alloc_page();
1638		if (!page) {
1639			ret = -ENOMEM;
1640			goto out_free;
1641		}
1642
1643		f2fs_set_compressed_page(page, cc->inode,
1644					start_idx + i + 1, dic);
1645		dic->cpages[i] = page;
1646	}
1647
1648	ret = f2fs_prepare_decomp_mem(dic, true);
1649	if (ret)
1650		goto out_free;
1651
1652	return dic;
1653
1654out_free:
1655	f2fs_free_dic(dic, true);
1656	return ERR_PTR(ret);
1657}
1658
1659static void f2fs_free_dic(struct decompress_io_ctx *dic,
1660		bool bypass_destroy_callback)
1661{
1662	int i;
1663
1664	f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1665
1666	if (dic->tpages) {
1667		for (i = 0; i < dic->cluster_size; i++) {
1668			if (dic->rpages[i])
1669				continue;
1670			if (!dic->tpages[i])
1671				continue;
1672			f2fs_compress_free_page(dic->tpages[i]);
1673		}
1674		page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1675	}
1676
1677	if (dic->cpages) {
1678		for (i = 0; i < dic->nr_cpages; i++) {
1679			if (!dic->cpages[i])
1680				continue;
1681			f2fs_compress_free_page(dic->cpages[i]);
1682		}
1683		page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1684	}
1685
1686	page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1687	kmem_cache_free(dic_entry_slab, dic);
1688}
1689
1690static void f2fs_late_free_dic(struct work_struct *work)
1691{
1692	struct decompress_io_ctx *dic =
1693		container_of(work, struct decompress_io_ctx, free_work);
1694
1695	f2fs_free_dic(dic, false);
1696}
1697
1698static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1699{
1700	if (refcount_dec_and_test(&dic->refcnt)) {
1701		if (in_task) {
1702			f2fs_free_dic(dic, false);
1703		} else {
1704			INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1705			queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1706					&dic->free_work);
1707		}
1708	}
1709}
1710
1711static void f2fs_verify_cluster(struct work_struct *work)
1712{
1713	struct decompress_io_ctx *dic =
1714		container_of(work, struct decompress_io_ctx, verity_work);
1715	int i;
1716
1717	/* Verify, update, and unlock the decompressed pages. */
1718	for (i = 0; i < dic->cluster_size; i++) {
1719		struct page *rpage = dic->rpages[i];
1720
1721		if (!rpage)
1722			continue;
1723
1724		if (fsverity_verify_page(rpage))
1725			SetPageUptodate(rpage);
1726		else
1727			ClearPageUptodate(rpage);
1728		unlock_page(rpage);
1729	}
1730
1731	f2fs_put_dic(dic, true);
1732}
1733
1734/*
1735 * This is called when a compressed cluster has been decompressed
1736 * (or failed to be read and/or decompressed).
1737 */
1738void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1739				bool in_task)
1740{
1741	int i;
1742
1743	if (!failed && dic->need_verity) {
1744		/*
1745		 * Note that to avoid deadlocks, the verity work can't be done
1746		 * on the decompression workqueue.  This is because verifying
1747		 * the data pages can involve reading metadata pages from the
1748		 * file, and these metadata pages may be compressed.
1749		 */
1750		INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1751		fsverity_enqueue_verify_work(&dic->verity_work);
1752		return;
1753	}
1754
1755	/* Update and unlock the cluster's pagecache pages. */
1756	for (i = 0; i < dic->cluster_size; i++) {
1757		struct page *rpage = dic->rpages[i];
1758
1759		if (!rpage)
1760			continue;
1761
1762		if (failed)
1763			ClearPageUptodate(rpage);
1764		else
1765			SetPageUptodate(rpage);
1766		unlock_page(rpage);
1767	}
1768
1769	/*
1770	 * Release the reference to the decompress_io_ctx that was being held
1771	 * for I/O completion.
1772	 */
1773	f2fs_put_dic(dic, in_task);
1774}
1775
1776/*
1777 * Put a reference to a compressed page's decompress_io_ctx.
1778 *
1779 * This is called when the page is no longer needed and can be freed.
1780 */
1781void f2fs_put_page_dic(struct page *page, bool in_task)
1782{
1783	struct decompress_io_ctx *dic =
1784			(struct decompress_io_ctx *)page_private(page);
1785
1786	f2fs_put_dic(dic, in_task);
1787}
1788
1789/*
1790 * check whether cluster blocks are contiguous, and add extent cache entry
1791 * only if cluster blocks are logically and physically contiguous.
1792 */
1793unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
1794{
1795	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
1796	int i = compressed ? 1 : 0;
1797	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1798						dn->ofs_in_node + i);
1799
1800	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1801		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1802						dn->ofs_in_node + i);
1803
1804		if (!__is_valid_data_blkaddr(blkaddr))
1805			break;
1806		if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1807			return 0;
1808	}
1809
1810	return compressed ? i - 1 : i;
1811}
1812
1813const struct address_space_operations f2fs_compress_aops = {
1814	.release_folio = f2fs_release_folio,
1815	.invalidate_folio = f2fs_invalidate_folio,
1816};
1817
1818struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1819{
1820	return sbi->compress_inode->i_mapping;
1821}
1822
1823void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1824{
1825	if (!sbi->compress_inode)
1826		return;
1827	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1828}
1829
1830void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1831						nid_t ino, block_t blkaddr)
1832{
1833	struct page *cpage;
1834	int ret;
1835
1836	if (!test_opt(sbi, COMPRESS_CACHE))
1837		return;
1838
1839	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1840		return;
1841
1842	if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1843		return;
1844
1845	cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1846	if (cpage) {
1847		f2fs_put_page(cpage, 0);
1848		return;
1849	}
1850
1851	cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1852	if (!cpage)
1853		return;
1854
1855	ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1856						blkaddr, GFP_NOFS);
1857	if (ret) {
1858		f2fs_put_page(cpage, 0);
1859		return;
1860	}
1861
1862	set_page_private_data(cpage, ino);
1863
1864	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1865		goto out;
1866
1867	memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1868	SetPageUptodate(cpage);
1869out:
1870	f2fs_put_page(cpage, 1);
1871}
1872
1873bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1874								block_t blkaddr)
1875{
1876	struct page *cpage;
1877	bool hitted = false;
1878
1879	if (!test_opt(sbi, COMPRESS_CACHE))
1880		return false;
1881
1882	cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1883				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1884	if (cpage) {
1885		if (PageUptodate(cpage)) {
1886			atomic_inc(&sbi->compress_page_hit);
1887			memcpy(page_address(page),
1888				page_address(cpage), PAGE_SIZE);
1889			hitted = true;
1890		}
1891		f2fs_put_page(cpage, 1);
1892	}
1893
1894	return hitted;
1895}
1896
1897void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1898{
1899	struct address_space *mapping = COMPRESS_MAPPING(sbi);
1900	struct folio_batch fbatch;
1901	pgoff_t index = 0;
1902	pgoff_t end = MAX_BLKADDR(sbi);
1903
1904	if (!mapping->nrpages)
1905		return;
1906
1907	folio_batch_init(&fbatch);
1908
1909	do {
1910		unsigned int nr, i;
1911
1912		nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
1913		if (!nr)
1914			break;
1915
1916		for (i = 0; i < nr; i++) {
1917			struct folio *folio = fbatch.folios[i];
1918
1919			folio_lock(folio);
1920			if (folio->mapping != mapping) {
1921				folio_unlock(folio);
1922				continue;
1923			}
1924
1925			if (ino != get_page_private_data(&folio->page)) {
1926				folio_unlock(folio);
1927				continue;
1928			}
1929
1930			generic_error_remove_page(mapping, &folio->page);
1931			folio_unlock(folio);
1932		}
1933		folio_batch_release(&fbatch);
1934		cond_resched();
1935	} while (index < end);
1936}
1937
1938int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1939{
1940	struct inode *inode;
1941
1942	if (!test_opt(sbi, COMPRESS_CACHE))
1943		return 0;
1944
1945	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
1946	if (IS_ERR(inode))
1947		return PTR_ERR(inode);
1948	sbi->compress_inode = inode;
1949
1950	sbi->compress_percent = COMPRESS_PERCENT;
1951	sbi->compress_watermark = COMPRESS_WATERMARK;
1952
1953	atomic_set(&sbi->compress_page_hit, 0);
1954
1955	return 0;
1956}
1957
1958void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
1959{
1960	if (!sbi->compress_inode)
1961		return;
1962	iput(sbi->compress_inode);
1963	sbi->compress_inode = NULL;
1964}
1965
1966int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1967{
1968	dev_t dev = sbi->sb->s_bdev->bd_dev;
1969	char slab_name[32];
1970
1971	if (!f2fs_sb_has_compression(sbi))
1972		return 0;
1973
1974	sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1975
1976	sbi->page_array_slab_size = sizeof(struct page *) <<
1977					F2FS_OPTION(sbi).compress_log_size;
1978
1979	sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1980					sbi->page_array_slab_size);
1981	return sbi->page_array_slab ? 0 : -ENOMEM;
1982}
1983
1984void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1985{
1986	kmem_cache_destroy(sbi->page_array_slab);
1987}
1988
1989int __init f2fs_init_compress_cache(void)
1990{
1991	cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1992					sizeof(struct compress_io_ctx));
1993	if (!cic_entry_slab)
1994		return -ENOMEM;
1995	dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1996					sizeof(struct decompress_io_ctx));
1997	if (!dic_entry_slab)
1998		goto free_cic;
1999	return 0;
2000free_cic:
2001	kmem_cache_destroy(cic_entry_slab);
2002	return -ENOMEM;
2003}
2004
2005void f2fs_destroy_compress_cache(void)
2006{
2007	kmem_cache_destroy(dic_entry_slab);
2008	kmem_cache_destroy(cic_entry_slab);
2009}