Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file is part of UBIFS.
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation.
6 *
7 * Authors: Artem Bityutskiy (Битюцкий Артём)
8 * Adrian Hunter
9 */
10
11/*
12 * This file implements VFS file and inode operations for regular files, device
13 * nodes and symlinks as well as address space operations.
14 *
15 * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
16 * the page is dirty and is used for optimization purposes - dirty pages are
17 * not budgeted so the flag shows that 'ubifs_write_end()' should not release
18 * the budget for this page. The @PG_checked flag is set if full budgeting is
19 * required for the page e.g., when it corresponds to a file hole or it is
20 * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
21 * it is OK to fail in this function, and the budget is released in
22 * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
23 * information about how the page was budgeted, to make it possible to release
24 * the budget properly.
25 *
26 * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
27 * implement. However, this is not true for 'ubifs_writepage()', which may be
28 * called with @i_mutex unlocked. For example, when flusher thread is doing
29 * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
30 * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
31 * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
32 * 'ubifs_writepage()' we are only guaranteed that the page is locked.
33 *
34 * Similarly, @i_mutex is not always locked in 'ubifs_read_folio()', e.g., the
35 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
36 * ondemand_readahead -> read_folio"). In case of readahead, @I_SYNC flag is not
37 * set as well. However, UBIFS disables readahead.
38 */
39
40#include "ubifs.h"
41#include <linux/mount.h>
42#include <linux/slab.h>
43#include <linux/migrate.h>
44
45static int read_block(struct inode *inode, void *addr, unsigned int block,
46 struct ubifs_data_node *dn)
47{
48 struct ubifs_info *c = inode->i_sb->s_fs_info;
49 int err, len, out_len;
50 union ubifs_key key;
51 unsigned int dlen;
52
53 data_key_init(c, &key, inode->i_ino, block);
54 err = ubifs_tnc_lookup(c, &key, dn);
55 if (err) {
56 if (err == -ENOENT)
57 /* Not found, so it must be a hole */
58 memset(addr, 0, UBIFS_BLOCK_SIZE);
59 return err;
60 }
61
62 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
63 ubifs_inode(inode)->creat_sqnum);
64 len = le32_to_cpu(dn->size);
65 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
66 goto dump;
67
68 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
69
70 if (IS_ENCRYPTED(inode)) {
71 err = ubifs_decrypt(inode, dn, &dlen, block);
72 if (err)
73 goto dump;
74 }
75
76 out_len = UBIFS_BLOCK_SIZE;
77 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
78 le16_to_cpu(dn->compr_type));
79 if (err || len != out_len)
80 goto dump;
81
82 /*
83 * Data length can be less than a full block, even for blocks that are
84 * not the last in the file (e.g., as a result of making a hole and
85 * appending data). Ensure that the remainder is zeroed out.
86 */
87 if (len < UBIFS_BLOCK_SIZE)
88 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
89
90 return 0;
91
92dump:
93 ubifs_err(c, "bad data node (block %u, inode %lu)",
94 block, inode->i_ino);
95 ubifs_dump_node(c, dn, UBIFS_MAX_DATA_NODE_SZ);
96 return -EINVAL;
97}
98
99static int do_readpage(struct page *page)
100{
101 void *addr;
102 int err = 0, i;
103 unsigned int block, beyond;
104 struct ubifs_data_node *dn;
105 struct inode *inode = page->mapping->host;
106 struct ubifs_info *c = inode->i_sb->s_fs_info;
107 loff_t i_size = i_size_read(inode);
108
109 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
110 inode->i_ino, page->index, i_size, page->flags);
111 ubifs_assert(c, !PageChecked(page));
112 ubifs_assert(c, !PagePrivate(page));
113
114 addr = kmap(page);
115
116 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
117 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
118 if (block >= beyond) {
119 /* Reading beyond inode */
120 SetPageChecked(page);
121 memset(addr, 0, PAGE_SIZE);
122 goto out;
123 }
124
125 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
126 if (!dn) {
127 err = -ENOMEM;
128 goto error;
129 }
130
131 i = 0;
132 while (1) {
133 int ret;
134
135 if (block >= beyond) {
136 /* Reading beyond inode */
137 err = -ENOENT;
138 memset(addr, 0, UBIFS_BLOCK_SIZE);
139 } else {
140 ret = read_block(inode, addr, block, dn);
141 if (ret) {
142 err = ret;
143 if (err != -ENOENT)
144 break;
145 } else if (block + 1 == beyond) {
146 int dlen = le32_to_cpu(dn->size);
147 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
148
149 if (ilen && ilen < dlen)
150 memset(addr + ilen, 0, dlen - ilen);
151 }
152 }
153 if (++i >= UBIFS_BLOCKS_PER_PAGE)
154 break;
155 block += 1;
156 addr += UBIFS_BLOCK_SIZE;
157 }
158 if (err) {
159 struct ubifs_info *c = inode->i_sb->s_fs_info;
160 if (err == -ENOENT) {
161 /* Not found, so it must be a hole */
162 SetPageChecked(page);
163 dbg_gen("hole");
164 goto out_free;
165 }
166 ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
167 page->index, inode->i_ino, err);
168 goto error;
169 }
170
171out_free:
172 kfree(dn);
173out:
174 SetPageUptodate(page);
175 ClearPageError(page);
176 flush_dcache_page(page);
177 kunmap(page);
178 return 0;
179
180error:
181 kfree(dn);
182 ClearPageUptodate(page);
183 SetPageError(page);
184 flush_dcache_page(page);
185 kunmap(page);
186 return err;
187}
188
189/**
190 * release_new_page_budget - release budget of a new page.
191 * @c: UBIFS file-system description object
192 *
193 * This is a helper function which releases budget corresponding to the budget
194 * of one new page of data.
195 */
196static void release_new_page_budget(struct ubifs_info *c)
197{
198 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
199
200 ubifs_release_budget(c, &req);
201}
202
203/**
204 * release_existing_page_budget - release budget of an existing page.
205 * @c: UBIFS file-system description object
206 *
207 * This is a helper function which releases budget corresponding to the budget
208 * of changing one page of data which already exists on the flash media.
209 */
210static void release_existing_page_budget(struct ubifs_info *c)
211{
212 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
213
214 ubifs_release_budget(c, &req);
215}
216
217static int write_begin_slow(struct address_space *mapping,
218 loff_t pos, unsigned len, struct page **pagep)
219{
220 struct inode *inode = mapping->host;
221 struct ubifs_info *c = inode->i_sb->s_fs_info;
222 pgoff_t index = pos >> PAGE_SHIFT;
223 struct ubifs_budget_req req = { .new_page = 1 };
224 int err, appending = !!(pos + len > inode->i_size);
225 struct page *page;
226
227 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
228 inode->i_ino, pos, len, inode->i_size);
229
230 /*
231 * At the slow path we have to budget before locking the page, because
232 * budgeting may force write-back, which would wait on locked pages and
233 * deadlock if we had the page locked. At this point we do not know
234 * anything about the page, so assume that this is a new page which is
235 * written to a hole. This corresponds to largest budget. Later the
236 * budget will be amended if this is not true.
237 */
238 if (appending)
239 /* We are appending data, budget for inode change */
240 req.dirtied_ino = 1;
241
242 err = ubifs_budget_space(c, &req);
243 if (unlikely(err))
244 return err;
245
246 page = grab_cache_page_write_begin(mapping, index);
247 if (unlikely(!page)) {
248 ubifs_release_budget(c, &req);
249 return -ENOMEM;
250 }
251
252 if (!PageUptodate(page)) {
253 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
254 SetPageChecked(page);
255 else {
256 err = do_readpage(page);
257 if (err) {
258 unlock_page(page);
259 put_page(page);
260 ubifs_release_budget(c, &req);
261 return err;
262 }
263 }
264
265 SetPageUptodate(page);
266 ClearPageError(page);
267 }
268
269 if (PagePrivate(page))
270 /*
271 * The page is dirty, which means it was budgeted twice:
272 * o first time the budget was allocated by the task which
273 * made the page dirty and set the PG_private flag;
274 * o and then we budgeted for it for the second time at the
275 * very beginning of this function.
276 *
277 * So what we have to do is to release the page budget we
278 * allocated.
279 */
280 release_new_page_budget(c);
281 else if (!PageChecked(page))
282 /*
283 * We are changing a page which already exists on the media.
284 * This means that changing the page does not make the amount
285 * of indexing information larger, and this part of the budget
286 * which we have already acquired may be released.
287 */
288 ubifs_convert_page_budget(c);
289
290 if (appending) {
291 struct ubifs_inode *ui = ubifs_inode(inode);
292
293 /*
294 * 'ubifs_write_end()' is optimized from the fast-path part of
295 * 'ubifs_write_begin()' and expects the @ui_mutex to be locked
296 * if data is appended.
297 */
298 mutex_lock(&ui->ui_mutex);
299 if (ui->dirty)
300 /*
301 * The inode is dirty already, so we may free the
302 * budget we allocated.
303 */
304 ubifs_release_dirty_inode_budget(c, ui);
305 }
306
307 *pagep = page;
308 return 0;
309}
310
311/**
312 * allocate_budget - allocate budget for 'ubifs_write_begin()'.
313 * @c: UBIFS file-system description object
314 * @page: page to allocate budget for
315 * @ui: UBIFS inode object the page belongs to
316 * @appending: non-zero if the page is appended
317 *
318 * This is a helper function for 'ubifs_write_begin()' which allocates budget
319 * for the operation. The budget is allocated differently depending on whether
320 * this is appending, whether the page is dirty or not, and so on. This
321 * function leaves the @ui->ui_mutex locked in case of appending.
322 *
323 * Returns: %0 in case of success and %-ENOSPC in case of failure.
324 */
325static int allocate_budget(struct ubifs_info *c, struct page *page,
326 struct ubifs_inode *ui, int appending)
327{
328 struct ubifs_budget_req req = { .fast = 1 };
329
330 if (PagePrivate(page)) {
331 if (!appending)
332 /*
333 * The page is dirty and we are not appending, which
334 * means no budget is needed at all.
335 */
336 return 0;
337
338 mutex_lock(&ui->ui_mutex);
339 if (ui->dirty)
340 /*
341 * The page is dirty and we are appending, so the inode
342 * has to be marked as dirty. However, it is already
343 * dirty, so we do not need any budget. We may return,
344 * but @ui->ui_mutex hast to be left locked because we
345 * should prevent write-back from flushing the inode
346 * and freeing the budget. The lock will be released in
347 * 'ubifs_write_end()'.
348 */
349 return 0;
350
351 /*
352 * The page is dirty, we are appending, the inode is clean, so
353 * we need to budget the inode change.
354 */
355 req.dirtied_ino = 1;
356 } else {
357 if (PageChecked(page))
358 /*
359 * The page corresponds to a hole and does not
360 * exist on the media. So changing it makes
361 * make the amount of indexing information
362 * larger, and we have to budget for a new
363 * page.
364 */
365 req.new_page = 1;
366 else
367 /*
368 * Not a hole, the change will not add any new
369 * indexing information, budget for page
370 * change.
371 */
372 req.dirtied_page = 1;
373
374 if (appending) {
375 mutex_lock(&ui->ui_mutex);
376 if (!ui->dirty)
377 /*
378 * The inode is clean but we will have to mark
379 * it as dirty because we are appending. This
380 * needs a budget.
381 */
382 req.dirtied_ino = 1;
383 }
384 }
385
386 return ubifs_budget_space(c, &req);
387}
388
389/*
390 * This function is called when a page of data is going to be written. Since
391 * the page of data will not necessarily go to the flash straight away, UBIFS
392 * has to reserve space on the media for it, which is done by means of
393 * budgeting.
394 *
395 * This is the hot-path of the file-system and we are trying to optimize it as
396 * much as possible. For this reasons it is split on 2 parts - slow and fast.
397 *
398 * There many budgeting cases:
399 * o a new page is appended - we have to budget for a new page and for
400 * changing the inode; however, if the inode is already dirty, there is
401 * no need to budget for it;
402 * o an existing clean page is changed - we have budget for it; if the page
403 * does not exist on the media (a hole), we have to budget for a new
404 * page; otherwise, we may budget for changing an existing page; the
405 * difference between these cases is that changing an existing page does
406 * not introduce anything new to the FS indexing information, so it does
407 * not grow, and smaller budget is acquired in this case;
408 * o an existing dirty page is changed - no need to budget at all, because
409 * the page budget has been acquired by earlier, when the page has been
410 * marked dirty.
411 *
412 * UBIFS budgeting sub-system may force write-back if it thinks there is no
413 * space to reserve. This imposes some locking restrictions and makes it
414 * impossible to take into account the above cases, and makes it impossible to
415 * optimize budgeting.
416 *
417 * The solution for this is that the fast path of 'ubifs_write_begin()' assumes
418 * there is a plenty of flash space and the budget will be acquired quickly,
419 * without forcing write-back. The slow path does not make this assumption.
420 */
421static int ubifs_write_begin(struct file *file, struct address_space *mapping,
422 loff_t pos, unsigned len,
423 struct page **pagep, void **fsdata)
424{
425 struct inode *inode = mapping->host;
426 struct ubifs_info *c = inode->i_sb->s_fs_info;
427 struct ubifs_inode *ui = ubifs_inode(inode);
428 pgoff_t index = pos >> PAGE_SHIFT;
429 int err, appending = !!(pos + len > inode->i_size);
430 int skipped_read = 0;
431 struct page *page;
432
433 ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
434 ubifs_assert(c, !c->ro_media && !c->ro_mount);
435
436 if (unlikely(c->ro_error))
437 return -EROFS;
438
439 /* Try out the fast-path part first */
440 page = grab_cache_page_write_begin(mapping, index);
441 if (unlikely(!page))
442 return -ENOMEM;
443
444 if (!PageUptodate(page)) {
445 /* The page is not loaded from the flash */
446 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
447 /*
448 * We change whole page so no need to load it. But we
449 * do not know whether this page exists on the media or
450 * not, so we assume the latter because it requires
451 * larger budget. The assumption is that it is better
452 * to budget a bit more than to read the page from the
453 * media. Thus, we are setting the @PG_checked flag
454 * here.
455 */
456 SetPageChecked(page);
457 skipped_read = 1;
458 } else {
459 err = do_readpage(page);
460 if (err) {
461 unlock_page(page);
462 put_page(page);
463 return err;
464 }
465 }
466
467 SetPageUptodate(page);
468 ClearPageError(page);
469 }
470
471 err = allocate_budget(c, page, ui, appending);
472 if (unlikely(err)) {
473 ubifs_assert(c, err == -ENOSPC);
474 /*
475 * If we skipped reading the page because we were going to
476 * write all of it, then it is not up to date.
477 */
478 if (skipped_read) {
479 ClearPageChecked(page);
480 ClearPageUptodate(page);
481 }
482 /*
483 * Budgeting failed which means it would have to force
484 * write-back but didn't, because we set the @fast flag in the
485 * request. Write-back cannot be done now, while we have the
486 * page locked, because it would deadlock. Unlock and free
487 * everything and fall-back to slow-path.
488 */
489 if (appending) {
490 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
491 mutex_unlock(&ui->ui_mutex);
492 }
493 unlock_page(page);
494 put_page(page);
495
496 return write_begin_slow(mapping, pos, len, pagep);
497 }
498
499 /*
500 * Whee, we acquired budgeting quickly - without involving
501 * garbage-collection, committing or forcing write-back. We return
502 * with @ui->ui_mutex locked if we are appending pages, and unlocked
503 * otherwise. This is an optimization (slightly hacky though).
504 */
505 *pagep = page;
506 return 0;
507
508}
509
510/**
511 * cancel_budget - cancel budget.
512 * @c: UBIFS file-system description object
513 * @page: page to cancel budget for
514 * @ui: UBIFS inode object the page belongs to
515 * @appending: non-zero if the page is appended
516 *
517 * This is a helper function for a page write operation. It unlocks the
518 * @ui->ui_mutex in case of appending.
519 */
520static void cancel_budget(struct ubifs_info *c, struct page *page,
521 struct ubifs_inode *ui, int appending)
522{
523 if (appending) {
524 if (!ui->dirty)
525 ubifs_release_dirty_inode_budget(c, ui);
526 mutex_unlock(&ui->ui_mutex);
527 }
528 if (!PagePrivate(page)) {
529 if (PageChecked(page))
530 release_new_page_budget(c);
531 else
532 release_existing_page_budget(c);
533 }
534}
535
536static int ubifs_write_end(struct file *file, struct address_space *mapping,
537 loff_t pos, unsigned len, unsigned copied,
538 struct page *page, void *fsdata)
539{
540 struct inode *inode = mapping->host;
541 struct ubifs_inode *ui = ubifs_inode(inode);
542 struct ubifs_info *c = inode->i_sb->s_fs_info;
543 loff_t end_pos = pos + len;
544 int appending = !!(end_pos > inode->i_size);
545
546 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
547 inode->i_ino, pos, page->index, len, copied, inode->i_size);
548
549 if (unlikely(copied < len && len == PAGE_SIZE)) {
550 /*
551 * VFS copied less data to the page that it intended and
552 * declared in its '->write_begin()' call via the @len
553 * argument. If the page was not up-to-date, and @len was
554 * @PAGE_SIZE, the 'ubifs_write_begin()' function did
555 * not load it from the media (for optimization reasons). This
556 * means that part of the page contains garbage. So read the
557 * page now.
558 */
559 dbg_gen("copied %d instead of %d, read page and repeat",
560 copied, len);
561 cancel_budget(c, page, ui, appending);
562 ClearPageChecked(page);
563
564 /*
565 * Return 0 to force VFS to repeat the whole operation, or the
566 * error code if 'do_readpage()' fails.
567 */
568 copied = do_readpage(page);
569 goto out;
570 }
571
572 if (!PagePrivate(page)) {
573 attach_page_private(page, (void *)1);
574 atomic_long_inc(&c->dirty_pg_cnt);
575 __set_page_dirty_nobuffers(page);
576 }
577
578 if (appending) {
579 i_size_write(inode, end_pos);
580 ui->ui_size = end_pos;
581 /*
582 * Note, we do not set @I_DIRTY_PAGES (which means that the
583 * inode has dirty pages), this has been done in
584 * '__set_page_dirty_nobuffers()'.
585 */
586 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
587 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
588 mutex_unlock(&ui->ui_mutex);
589 }
590
591out:
592 unlock_page(page);
593 put_page(page);
594 return copied;
595}
596
597/**
598 * populate_page - copy data nodes into a page for bulk-read.
599 * @c: UBIFS file-system description object
600 * @page: page
601 * @bu: bulk-read information
602 * @n: next zbranch slot
603 *
604 * Returns: %0 on success and a negative error code on failure.
605 */
606static int populate_page(struct ubifs_info *c, struct page *page,
607 struct bu_info *bu, int *n)
608{
609 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
610 struct inode *inode = page->mapping->host;
611 loff_t i_size = i_size_read(inode);
612 unsigned int page_block;
613 void *addr, *zaddr;
614 pgoff_t end_index;
615
616 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
617 inode->i_ino, page->index, i_size, page->flags);
618
619 addr = zaddr = kmap(page);
620
621 end_index = (i_size - 1) >> PAGE_SHIFT;
622 if (!i_size || page->index > end_index) {
623 hole = 1;
624 memset(addr, 0, PAGE_SIZE);
625 goto out_hole;
626 }
627
628 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
629 while (1) {
630 int err, len, out_len, dlen;
631
632 if (nn >= bu->cnt) {
633 hole = 1;
634 memset(addr, 0, UBIFS_BLOCK_SIZE);
635 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
636 struct ubifs_data_node *dn;
637
638 dn = bu->buf + (bu->zbranch[nn].offs - offs);
639
640 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
641 ubifs_inode(inode)->creat_sqnum);
642
643 len = le32_to_cpu(dn->size);
644 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
645 goto out_err;
646
647 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
648 out_len = UBIFS_BLOCK_SIZE;
649
650 if (IS_ENCRYPTED(inode)) {
651 err = ubifs_decrypt(inode, dn, &dlen, page_block);
652 if (err)
653 goto out_err;
654 }
655
656 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
657 le16_to_cpu(dn->compr_type));
658 if (err || len != out_len)
659 goto out_err;
660
661 if (len < UBIFS_BLOCK_SIZE)
662 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
663
664 nn += 1;
665 read = (i << UBIFS_BLOCK_SHIFT) + len;
666 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
667 nn += 1;
668 continue;
669 } else {
670 hole = 1;
671 memset(addr, 0, UBIFS_BLOCK_SIZE);
672 }
673 if (++i >= UBIFS_BLOCKS_PER_PAGE)
674 break;
675 addr += UBIFS_BLOCK_SIZE;
676 page_block += 1;
677 }
678
679 if (end_index == page->index) {
680 int len = i_size & (PAGE_SIZE - 1);
681
682 if (len && len < read)
683 memset(zaddr + len, 0, read - len);
684 }
685
686out_hole:
687 if (hole) {
688 SetPageChecked(page);
689 dbg_gen("hole");
690 }
691
692 SetPageUptodate(page);
693 ClearPageError(page);
694 flush_dcache_page(page);
695 kunmap(page);
696 *n = nn;
697 return 0;
698
699out_err:
700 ClearPageUptodate(page);
701 SetPageError(page);
702 flush_dcache_page(page);
703 kunmap(page);
704 ubifs_err(c, "bad data node (block %u, inode %lu)",
705 page_block, inode->i_ino);
706 return -EINVAL;
707}
708
709/**
710 * ubifs_do_bulk_read - do bulk-read.
711 * @c: UBIFS file-system description object
712 * @bu: bulk-read information
713 * @page1: first page to read
714 *
715 * Returns: %1 if the bulk-read is done, otherwise %0 is returned.
716 */
717static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
718 struct page *page1)
719{
720 pgoff_t offset = page1->index, end_index;
721 struct address_space *mapping = page1->mapping;
722 struct inode *inode = mapping->host;
723 struct ubifs_inode *ui = ubifs_inode(inode);
724 int err, page_idx, page_cnt, ret = 0, n = 0;
725 int allocate = bu->buf ? 0 : 1;
726 loff_t isize;
727 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
728
729 err = ubifs_tnc_get_bu_keys(c, bu);
730 if (err)
731 goto out_warn;
732
733 if (bu->eof) {
734 /* Turn off bulk-read at the end of the file */
735 ui->read_in_a_row = 1;
736 ui->bulk_read = 0;
737 }
738
739 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
740 if (!page_cnt) {
741 /*
742 * This happens when there are multiple blocks per page and the
743 * blocks for the first page we are looking for, are not
744 * together. If all the pages were like this, bulk-read would
745 * reduce performance, so we turn it off for a while.
746 */
747 goto out_bu_off;
748 }
749
750 if (bu->cnt) {
751 if (allocate) {
752 /*
753 * Allocate bulk-read buffer depending on how many data
754 * nodes we are going to read.
755 */
756 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
757 bu->zbranch[bu->cnt - 1].len -
758 bu->zbranch[0].offs;
759 ubifs_assert(c, bu->buf_len > 0);
760 ubifs_assert(c, bu->buf_len <= c->leb_size);
761 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
762 if (!bu->buf)
763 goto out_bu_off;
764 }
765
766 err = ubifs_tnc_bulk_read(c, bu);
767 if (err)
768 goto out_warn;
769 }
770
771 err = populate_page(c, page1, bu, &n);
772 if (err)
773 goto out_warn;
774
775 unlock_page(page1);
776 ret = 1;
777
778 isize = i_size_read(inode);
779 if (isize == 0)
780 goto out_free;
781 end_index = ((isize - 1) >> PAGE_SHIFT);
782
783 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
784 pgoff_t page_offset = offset + page_idx;
785 struct page *page;
786
787 if (page_offset > end_index)
788 break;
789 page = pagecache_get_page(mapping, page_offset,
790 FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
791 ra_gfp_mask);
792 if (!page)
793 break;
794 if (!PageUptodate(page))
795 err = populate_page(c, page, bu, &n);
796 unlock_page(page);
797 put_page(page);
798 if (err)
799 break;
800 }
801
802 ui->last_page_read = offset + page_idx - 1;
803
804out_free:
805 if (allocate)
806 kfree(bu->buf);
807 return ret;
808
809out_warn:
810 ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
811 goto out_free;
812
813out_bu_off:
814 ui->read_in_a_row = ui->bulk_read = 0;
815 goto out_free;
816}
817
818/**
819 * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
820 * @page: page from which to start bulk-read.
821 *
822 * Some flash media are capable of reading sequentially at faster rates. UBIFS
823 * bulk-read facility is designed to take advantage of that, by reading in one
824 * go consecutive data nodes that are also located consecutively in the same
825 * LEB.
826 *
827 * Returns: %1 if a bulk-read is done and %0 otherwise.
828 */
829static int ubifs_bulk_read(struct page *page)
830{
831 struct inode *inode = page->mapping->host;
832 struct ubifs_info *c = inode->i_sb->s_fs_info;
833 struct ubifs_inode *ui = ubifs_inode(inode);
834 pgoff_t index = page->index, last_page_read = ui->last_page_read;
835 struct bu_info *bu;
836 int err = 0, allocated = 0;
837
838 ui->last_page_read = index;
839 if (!c->bulk_read)
840 return 0;
841
842 /*
843 * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
844 * so don't bother if we cannot lock the mutex.
845 */
846 if (!mutex_trylock(&ui->ui_mutex))
847 return 0;
848
849 if (index != last_page_read + 1) {
850 /* Turn off bulk-read if we stop reading sequentially */
851 ui->read_in_a_row = 1;
852 if (ui->bulk_read)
853 ui->bulk_read = 0;
854 goto out_unlock;
855 }
856
857 if (!ui->bulk_read) {
858 ui->read_in_a_row += 1;
859 if (ui->read_in_a_row < 3)
860 goto out_unlock;
861 /* Three reads in a row, so switch on bulk-read */
862 ui->bulk_read = 1;
863 }
864
865 /*
866 * If possible, try to use pre-allocated bulk-read information, which
867 * is protected by @c->bu_mutex.
868 */
869 if (mutex_trylock(&c->bu_mutex))
870 bu = &c->bu;
871 else {
872 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
873 if (!bu)
874 goto out_unlock;
875
876 bu->buf = NULL;
877 allocated = 1;
878 }
879
880 bu->buf_len = c->max_bu_buf_len;
881 data_key_init(c, &bu->key, inode->i_ino,
882 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
883 err = ubifs_do_bulk_read(c, bu, page);
884
885 if (!allocated)
886 mutex_unlock(&c->bu_mutex);
887 else
888 kfree(bu);
889
890out_unlock:
891 mutex_unlock(&ui->ui_mutex);
892 return err;
893}
894
895static int ubifs_read_folio(struct file *file, struct folio *folio)
896{
897 struct page *page = &folio->page;
898
899 if (ubifs_bulk_read(page))
900 return 0;
901 do_readpage(page);
902 folio_unlock(folio);
903 return 0;
904}
905
906static int do_writepage(struct page *page, int len)
907{
908 int err = 0, i, blen;
909 unsigned int block;
910 void *addr;
911 union ubifs_key key;
912 struct inode *inode = page->mapping->host;
913 struct ubifs_info *c = inode->i_sb->s_fs_info;
914
915#ifdef UBIFS_DEBUG
916 struct ubifs_inode *ui = ubifs_inode(inode);
917 spin_lock(&ui->ui_lock);
918 ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT);
919 spin_unlock(&ui->ui_lock);
920#endif
921
922 /* Update radix tree tags */
923 set_page_writeback(page);
924
925 addr = kmap(page);
926 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
927 i = 0;
928 while (len) {
929 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
930 data_key_init(c, &key, inode->i_ino, block);
931 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
932 if (err)
933 break;
934 if (++i >= UBIFS_BLOCKS_PER_PAGE)
935 break;
936 block += 1;
937 addr += blen;
938 len -= blen;
939 }
940 if (err) {
941 SetPageError(page);
942 ubifs_err(c, "cannot write page %lu of inode %lu, error %d",
943 page->index, inode->i_ino, err);
944 ubifs_ro_mode(c, err);
945 }
946
947 ubifs_assert(c, PagePrivate(page));
948 if (PageChecked(page))
949 release_new_page_budget(c);
950 else
951 release_existing_page_budget(c);
952
953 atomic_long_dec(&c->dirty_pg_cnt);
954 detach_page_private(page);
955 ClearPageChecked(page);
956
957 kunmap(page);
958 unlock_page(page);
959 end_page_writeback(page);
960 return err;
961}
962
963/*
964 * When writing-back dirty inodes, VFS first writes-back pages belonging to the
965 * inode, then the inode itself. For UBIFS this may cause a problem. Consider a
966 * situation when a we have an inode with size 0, then a megabyte of data is
967 * appended to the inode, then write-back starts and flushes some amount of the
968 * dirty pages, the journal becomes full, commit happens and finishes, and then
969 * an unclean reboot happens. When the file system is mounted next time, the
970 * inode size would still be 0, but there would be many pages which are beyond
971 * the inode size, they would be indexed and consume flash space. Because the
972 * journal has been committed, the replay would not be able to detect this
973 * situation and correct the inode size. This means UBIFS would have to scan
974 * whole index and correct all inode sizes, which is long an unacceptable.
975 *
976 * To prevent situations like this, UBIFS writes pages back only if they are
977 * within the last synchronized inode size, i.e. the size which has been
978 * written to the flash media last time. Otherwise, UBIFS forces inode
979 * write-back, thus making sure the on-flash inode contains current inode size,
980 * and then keeps writing pages back.
981 *
982 * Some locking issues explanation. 'ubifs_writepage()' first is called with
983 * the page locked, and it locks @ui_mutex. However, write-back does take inode
984 * @i_mutex, which means other VFS operations may be run on this inode at the
985 * same time. And the problematic one is truncation to smaller size, from where
986 * we have to call 'truncate_setsize()', which first changes @inode->i_size,
987 * then drops the truncated pages. And while dropping the pages, it takes the
988 * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
989 * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'.
990 * This means that @inode->i_size is changed while @ui_mutex is unlocked.
991 *
992 * XXX(truncate): with the new truncate sequence this is not true anymore,
993 * and the calls to truncate_setsize can be move around freely. They should
994 * be moved to the very end of the truncate sequence.
995 *
996 * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
997 * inode size. How do we do this if @inode->i_size may became smaller while we
998 * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the
999 * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size
1000 * internally and updates it under @ui_mutex.
1001 *
1002 * Q: why we do not worry that if we race with truncation, we may end up with a
1003 * situation when the inode is truncated while we are in the middle of
1004 * 'do_writepage()', so we do write beyond inode size?
1005 * A: If we are in the middle of 'do_writepage()', truncation would be locked
1006 * on the page lock and it would not write the truncated inode node to the
1007 * journal before we have finished.
1008 */
1009static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1010{
1011 struct inode *inode = page->mapping->host;
1012 struct ubifs_info *c = inode->i_sb->s_fs_info;
1013 struct ubifs_inode *ui = ubifs_inode(inode);
1014 loff_t i_size = i_size_read(inode), synced_i_size;
1015 pgoff_t end_index = i_size >> PAGE_SHIFT;
1016 int err, len = i_size & (PAGE_SIZE - 1);
1017 void *kaddr;
1018
1019 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1020 inode->i_ino, page->index, page->flags);
1021 ubifs_assert(c, PagePrivate(page));
1022
1023 /* Is the page fully outside @i_size? (truncate in progress) */
1024 if (page->index > end_index || (page->index == end_index && !len)) {
1025 err = 0;
1026 goto out_unlock;
1027 }
1028
1029 spin_lock(&ui->ui_lock);
1030 synced_i_size = ui->synced_i_size;
1031 spin_unlock(&ui->ui_lock);
1032
1033 /* Is the page fully inside @i_size? */
1034 if (page->index < end_index) {
1035 if (page->index >= synced_i_size >> PAGE_SHIFT) {
1036 err = inode->i_sb->s_op->write_inode(inode, NULL);
1037 if (err)
1038 goto out_redirty;
1039 /*
1040 * The inode has been written, but the write-buffer has
1041 * not been synchronized, so in case of an unclean
1042 * reboot we may end up with some pages beyond inode
1043 * size, but they would be in the journal (because
1044 * commit flushes write buffers) and recovery would deal
1045 * with this.
1046 */
1047 }
1048 return do_writepage(page, PAGE_SIZE);
1049 }
1050
1051 /*
1052 * The page straddles @i_size. It must be zeroed out on each and every
1053 * writepage invocation because it may be mmapped. "A file is mapped
1054 * in multiples of the page size. For a file that is not a multiple of
1055 * the page size, the remaining memory is zeroed when mapped, and
1056 * writes to that region are not written out to the file."
1057 */
1058 kaddr = kmap_atomic(page);
1059 memset(kaddr + len, 0, PAGE_SIZE - len);
1060 flush_dcache_page(page);
1061 kunmap_atomic(kaddr);
1062
1063 if (i_size > synced_i_size) {
1064 err = inode->i_sb->s_op->write_inode(inode, NULL);
1065 if (err)
1066 goto out_redirty;
1067 }
1068
1069 return do_writepage(page, len);
1070out_redirty:
1071 /*
1072 * redirty_page_for_writepage() won't call ubifs_dirty_inode() because
1073 * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
1074 * there is no need to do space budget for dirty inode.
1075 */
1076 redirty_page_for_writepage(wbc, page);
1077out_unlock:
1078 unlock_page(page);
1079 return err;
1080}
1081
1082/**
1083 * do_attr_changes - change inode attributes.
1084 * @inode: inode to change attributes for
1085 * @attr: describes attributes to change
1086 */
1087static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1088{
1089 if (attr->ia_valid & ATTR_UID)
1090 inode->i_uid = attr->ia_uid;
1091 if (attr->ia_valid & ATTR_GID)
1092 inode->i_gid = attr->ia_gid;
1093 if (attr->ia_valid & ATTR_ATIME)
1094 inode_set_atime_to_ts(inode, attr->ia_atime);
1095 if (attr->ia_valid & ATTR_MTIME)
1096 inode_set_mtime_to_ts(inode, attr->ia_mtime);
1097 if (attr->ia_valid & ATTR_CTIME)
1098 inode_set_ctime_to_ts(inode, attr->ia_ctime);
1099 if (attr->ia_valid & ATTR_MODE) {
1100 umode_t mode = attr->ia_mode;
1101
1102 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1103 mode &= ~S_ISGID;
1104 inode->i_mode = mode;
1105 }
1106}
1107
1108/**
1109 * do_truncation - truncate an inode.
1110 * @c: UBIFS file-system description object
1111 * @inode: inode to truncate
1112 * @attr: inode attribute changes description
1113 *
1114 * This function implements VFS '->setattr()' call when the inode is truncated
1115 * to a smaller size.
1116 *
1117 * Returns: %0 in case of success and a negative error code
1118 * in case of failure.
1119 */
1120static int do_truncation(struct ubifs_info *c, struct inode *inode,
1121 const struct iattr *attr)
1122{
1123 int err;
1124 struct ubifs_budget_req req;
1125 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1126 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1127 struct ubifs_inode *ui = ubifs_inode(inode);
1128
1129 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1130 memset(&req, 0, sizeof(struct ubifs_budget_req));
1131
1132 /*
1133 * If this is truncation to a smaller size, and we do not truncate on a
1134 * block boundary, budget for changing one data block, because the last
1135 * block will be re-written.
1136 */
1137 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1138 req.dirtied_page = 1;
1139
1140 req.dirtied_ino = 1;
1141 /* A funny way to budget for truncation node */
1142 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1143 err = ubifs_budget_space(c, &req);
1144 if (err) {
1145 /*
1146 * Treat truncations to zero as deletion and always allow them,
1147 * just like we do for '->unlink()'.
1148 */
1149 if (new_size || err != -ENOSPC)
1150 return err;
1151 budgeted = 0;
1152 }
1153
1154 truncate_setsize(inode, new_size);
1155
1156 if (offset) {
1157 pgoff_t index = new_size >> PAGE_SHIFT;
1158 struct page *page;
1159
1160 page = find_lock_page(inode->i_mapping, index);
1161 if (page) {
1162 if (PageDirty(page)) {
1163 /*
1164 * 'ubifs_jnl_truncate()' will try to truncate
1165 * the last data node, but it contains
1166 * out-of-date data because the page is dirty.
1167 * Write the page now, so that
1168 * 'ubifs_jnl_truncate()' will see an already
1169 * truncated (and up to date) data node.
1170 */
1171 ubifs_assert(c, PagePrivate(page));
1172
1173 clear_page_dirty_for_io(page);
1174 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1175 offset = new_size &
1176 (PAGE_SIZE - 1);
1177 err = do_writepage(page, offset);
1178 put_page(page);
1179 if (err)
1180 goto out_budg;
1181 /*
1182 * We could now tell 'ubifs_jnl_truncate()' not
1183 * to read the last block.
1184 */
1185 } else {
1186 /*
1187 * We could 'kmap()' the page and pass the data
1188 * to 'ubifs_jnl_truncate()' to save it from
1189 * having to read it.
1190 */
1191 unlock_page(page);
1192 put_page(page);
1193 }
1194 }
1195 }
1196
1197 mutex_lock(&ui->ui_mutex);
1198 ui->ui_size = inode->i_size;
1199 /* Truncation changes inode [mc]time */
1200 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1201 /* Other attributes may be changed at the same time as well */
1202 do_attr_changes(inode, attr);
1203 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1204 mutex_unlock(&ui->ui_mutex);
1205
1206out_budg:
1207 if (budgeted)
1208 ubifs_release_budget(c, &req);
1209 else {
1210 c->bi.nospace = c->bi.nospace_rp = 0;
1211 smp_wmb();
1212 }
1213 return err;
1214}
1215
1216/**
1217 * do_setattr - change inode attributes.
1218 * @c: UBIFS file-system description object
1219 * @inode: inode to change attributes for
1220 * @attr: inode attribute changes description
1221 *
1222 * This function implements VFS '->setattr()' call for all cases except
1223 * truncations to smaller size.
1224 *
1225 * Returns: %0 in case of success and a negative
1226 * error code in case of failure.
1227 */
1228static int do_setattr(struct ubifs_info *c, struct inode *inode,
1229 const struct iattr *attr)
1230{
1231 int err, release;
1232 loff_t new_size = attr->ia_size;
1233 struct ubifs_inode *ui = ubifs_inode(inode);
1234 struct ubifs_budget_req req = { .dirtied_ino = 1,
1235 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1236
1237 err = ubifs_budget_space(c, &req);
1238 if (err)
1239 return err;
1240
1241 if (attr->ia_valid & ATTR_SIZE) {
1242 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1243 truncate_setsize(inode, new_size);
1244 }
1245
1246 mutex_lock(&ui->ui_mutex);
1247 if (attr->ia_valid & ATTR_SIZE) {
1248 /* Truncation changes inode [mc]time */
1249 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1250 /* 'truncate_setsize()' changed @i_size, update @ui_size */
1251 ui->ui_size = inode->i_size;
1252 }
1253
1254 do_attr_changes(inode, attr);
1255
1256 release = ui->dirty;
1257 if (attr->ia_valid & ATTR_SIZE)
1258 /*
1259 * Inode length changed, so we have to make sure
1260 * @I_DIRTY_DATASYNC is set.
1261 */
1262 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1263 else
1264 mark_inode_dirty_sync(inode);
1265 mutex_unlock(&ui->ui_mutex);
1266
1267 if (release)
1268 ubifs_release_budget(c, &req);
1269 if (IS_SYNC(inode))
1270 err = inode->i_sb->s_op->write_inode(inode, NULL);
1271 return err;
1272}
1273
1274int ubifs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1275 struct iattr *attr)
1276{
1277 int err;
1278 struct inode *inode = d_inode(dentry);
1279 struct ubifs_info *c = inode->i_sb->s_fs_info;
1280
1281 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1282 inode->i_ino, inode->i_mode, attr->ia_valid);
1283 err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1284 if (err)
1285 return err;
1286
1287 err = dbg_check_synced_i_size(c, inode);
1288 if (err)
1289 return err;
1290
1291 err = fscrypt_prepare_setattr(dentry, attr);
1292 if (err)
1293 return err;
1294
1295 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1296 /* Truncation to a smaller size */
1297 err = do_truncation(c, inode, attr);
1298 else
1299 err = do_setattr(c, inode, attr);
1300
1301 return err;
1302}
1303
1304static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
1305 size_t length)
1306{
1307 struct inode *inode = folio->mapping->host;
1308 struct ubifs_info *c = inode->i_sb->s_fs_info;
1309
1310 ubifs_assert(c, folio_test_private(folio));
1311 if (offset || length < folio_size(folio))
1312 /* Partial folio remains dirty */
1313 return;
1314
1315 if (folio_test_checked(folio))
1316 release_new_page_budget(c);
1317 else
1318 release_existing_page_budget(c);
1319
1320 atomic_long_dec(&c->dirty_pg_cnt);
1321 folio_detach_private(folio);
1322 folio_clear_checked(folio);
1323}
1324
1325int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1326{
1327 struct inode *inode = file->f_mapping->host;
1328 struct ubifs_info *c = inode->i_sb->s_fs_info;
1329 int err;
1330
1331 dbg_gen("syncing inode %lu", inode->i_ino);
1332
1333 if (c->ro_mount)
1334 /*
1335 * For some really strange reasons VFS does not filter out
1336 * 'fsync()' for R/O mounted file-systems as per 2.6.39.
1337 */
1338 return 0;
1339
1340 err = file_write_and_wait_range(file, start, end);
1341 if (err)
1342 return err;
1343 inode_lock(inode);
1344
1345 /* Synchronize the inode unless this is a 'datasync()' call. */
1346 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1347 err = inode->i_sb->s_op->write_inode(inode, NULL);
1348 if (err)
1349 goto out;
1350 }
1351
1352 /*
1353 * Nodes related to this inode may still sit in a write-buffer. Flush
1354 * them.
1355 */
1356 err = ubifs_sync_wbufs_by_inode(c, inode);
1357out:
1358 inode_unlock(inode);
1359 return err;
1360}
1361
1362/**
1363 * mctime_update_needed - check if mtime or ctime update is needed.
1364 * @inode: the inode to do the check for
1365 * @now: current time
1366 *
1367 * This helper function checks if the inode mtime/ctime should be updated or
1368 * not. If current values of the time-stamps are within the UBIFS inode time
1369 * granularity, they are not updated. This is an optimization.
1370 *
1371 * Returns: %1 if time update is needed, %0 if not
1372 */
1373static inline int mctime_update_needed(const struct inode *inode,
1374 const struct timespec64 *now)
1375{
1376 struct timespec64 ctime = inode_get_ctime(inode);
1377 struct timespec64 mtime = inode_get_mtime(inode);
1378
1379 if (!timespec64_equal(&mtime, now) || !timespec64_equal(&ctime, now))
1380 return 1;
1381 return 0;
1382}
1383
1384/**
1385 * ubifs_update_time - update time of inode.
1386 * @inode: inode to update
1387 * @flags: time updating control flag determines updating
1388 * which time fields of @inode
1389 *
1390 * This function updates time of the inode.
1391 *
1392 * Returns: %0 for success or a negative error code otherwise.
1393 */
1394int ubifs_update_time(struct inode *inode, int flags)
1395{
1396 struct ubifs_inode *ui = ubifs_inode(inode);
1397 struct ubifs_info *c = inode->i_sb->s_fs_info;
1398 struct ubifs_budget_req req = { .dirtied_ino = 1,
1399 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1400 int err, release;
1401
1402 if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) {
1403 generic_update_time(inode, flags);
1404 return 0;
1405 }
1406
1407 err = ubifs_budget_space(c, &req);
1408 if (err)
1409 return err;
1410
1411 mutex_lock(&ui->ui_mutex);
1412 inode_update_timestamps(inode, flags);
1413 release = ui->dirty;
1414 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1415 mutex_unlock(&ui->ui_mutex);
1416 if (release)
1417 ubifs_release_budget(c, &req);
1418 return 0;
1419}
1420
1421/**
1422 * update_mctime - update mtime and ctime of an inode.
1423 * @inode: inode to update
1424 *
1425 * This function updates mtime and ctime of the inode if it is not equivalent to
1426 * current time.
1427 *
1428 * Returns: %0 in case of success and a negative error code in
1429 * case of failure.
1430 */
1431static int update_mctime(struct inode *inode)
1432{
1433 struct timespec64 now = current_time(inode);
1434 struct ubifs_inode *ui = ubifs_inode(inode);
1435 struct ubifs_info *c = inode->i_sb->s_fs_info;
1436
1437 if (mctime_update_needed(inode, &now)) {
1438 int err, release;
1439 struct ubifs_budget_req req = { .dirtied_ino = 1,
1440 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1441
1442 err = ubifs_budget_space(c, &req);
1443 if (err)
1444 return err;
1445
1446 mutex_lock(&ui->ui_mutex);
1447 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1448 release = ui->dirty;
1449 mark_inode_dirty_sync(inode);
1450 mutex_unlock(&ui->ui_mutex);
1451 if (release)
1452 ubifs_release_budget(c, &req);
1453 }
1454
1455 return 0;
1456}
1457
1458static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1459{
1460 int err = update_mctime(file_inode(iocb->ki_filp));
1461 if (err)
1462 return err;
1463
1464 return generic_file_write_iter(iocb, from);
1465}
1466
1467static bool ubifs_dirty_folio(struct address_space *mapping,
1468 struct folio *folio)
1469{
1470 bool ret;
1471 struct ubifs_info *c = mapping->host->i_sb->s_fs_info;
1472
1473 ret = filemap_dirty_folio(mapping, folio);
1474 /*
1475 * An attempt to dirty a page without budgeting for it - should not
1476 * happen.
1477 */
1478 ubifs_assert(c, ret == false);
1479 return ret;
1480}
1481
1482static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
1483{
1484 struct inode *inode = folio->mapping->host;
1485 struct ubifs_info *c = inode->i_sb->s_fs_info;
1486
1487 if (folio_test_writeback(folio))
1488 return false;
1489
1490 /*
1491 * Page is private but not dirty, weird? There is one condition
1492 * making it happened. ubifs_writepage skipped the page because
1493 * page index beyonds isize (for example. truncated by other
1494 * process named A), then the page is invalidated by fadvise64
1495 * syscall before being truncated by process A.
1496 */
1497 ubifs_assert(c, folio_test_private(folio));
1498 if (folio_test_checked(folio))
1499 release_new_page_budget(c);
1500 else
1501 release_existing_page_budget(c);
1502
1503 atomic_long_dec(&c->dirty_pg_cnt);
1504 folio_detach_private(folio);
1505 folio_clear_checked(folio);
1506 return true;
1507}
1508
1509/*
1510 * mmap()d file has taken write protection fault and is being made writable.
1511 * UBIFS must ensure page is budgeted for.
1512 */
1513static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1514{
1515 struct page *page = vmf->page;
1516 struct inode *inode = file_inode(vmf->vma->vm_file);
1517 struct ubifs_info *c = inode->i_sb->s_fs_info;
1518 struct timespec64 now = current_time(inode);
1519 struct ubifs_budget_req req = { .new_page = 1 };
1520 int err, update_time;
1521
1522 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1523 i_size_read(inode));
1524 ubifs_assert(c, !c->ro_media && !c->ro_mount);
1525
1526 if (unlikely(c->ro_error))
1527 return VM_FAULT_SIGBUS; /* -EROFS */
1528
1529 /*
1530 * We have not locked @page so far so we may budget for changing the
1531 * page. Note, we cannot do this after we locked the page, because
1532 * budgeting may cause write-back which would cause deadlock.
1533 *
1534 * At the moment we do not know whether the page is dirty or not, so we
1535 * assume that it is not and budget for a new page. We could look at
1536 * the @PG_private flag and figure this out, but we may race with write
1537 * back and the page state may change by the time we lock it, so this
1538 * would need additional care. We do not bother with this at the
1539 * moment, although it might be good idea to do. Instead, we allocate
1540 * budget for a new page and amend it later on if the page was in fact
1541 * dirty.
1542 *
1543 * The budgeting-related logic of this function is similar to what we
1544 * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there
1545 * for more comments.
1546 */
1547 update_time = mctime_update_needed(inode, &now);
1548 if (update_time)
1549 /*
1550 * We have to change inode time stamp which requires extra
1551 * budgeting.
1552 */
1553 req.dirtied_ino = 1;
1554
1555 err = ubifs_budget_space(c, &req);
1556 if (unlikely(err)) {
1557 if (err == -ENOSPC)
1558 ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
1559 inode->i_ino);
1560 return VM_FAULT_SIGBUS;
1561 }
1562
1563 lock_page(page);
1564 if (unlikely(page->mapping != inode->i_mapping ||
1565 page_offset(page) > i_size_read(inode))) {
1566 /* Page got truncated out from underneath us */
1567 goto sigbus;
1568 }
1569
1570 if (PagePrivate(page))
1571 release_new_page_budget(c);
1572 else {
1573 if (!PageChecked(page))
1574 ubifs_convert_page_budget(c);
1575 attach_page_private(page, (void *)1);
1576 atomic_long_inc(&c->dirty_pg_cnt);
1577 __set_page_dirty_nobuffers(page);
1578 }
1579
1580 if (update_time) {
1581 int release;
1582 struct ubifs_inode *ui = ubifs_inode(inode);
1583
1584 mutex_lock(&ui->ui_mutex);
1585 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1586 release = ui->dirty;
1587 mark_inode_dirty_sync(inode);
1588 mutex_unlock(&ui->ui_mutex);
1589 if (release)
1590 ubifs_release_dirty_inode_budget(c, ui);
1591 }
1592
1593 wait_for_stable_page(page);
1594 return VM_FAULT_LOCKED;
1595
1596sigbus:
1597 unlock_page(page);
1598 ubifs_release_budget(c, &req);
1599 return VM_FAULT_SIGBUS;
1600}
1601
1602static const struct vm_operations_struct ubifs_file_vm_ops = {
1603 .fault = filemap_fault,
1604 .map_pages = filemap_map_pages,
1605 .page_mkwrite = ubifs_vm_page_mkwrite,
1606};
1607
1608static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1609{
1610 int err;
1611
1612 err = generic_file_mmap(file, vma);
1613 if (err)
1614 return err;
1615 vma->vm_ops = &ubifs_file_vm_ops;
1616
1617 if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1618 file_accessed(file);
1619
1620 return 0;
1621}
1622
1623static const char *ubifs_get_link(struct dentry *dentry,
1624 struct inode *inode,
1625 struct delayed_call *done)
1626{
1627 struct ubifs_inode *ui = ubifs_inode(inode);
1628
1629 if (!IS_ENCRYPTED(inode))
1630 return ui->data;
1631
1632 if (!dentry)
1633 return ERR_PTR(-ECHILD);
1634
1635 return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
1636}
1637
1638static int ubifs_symlink_getattr(struct mnt_idmap *idmap,
1639 const struct path *path, struct kstat *stat,
1640 u32 request_mask, unsigned int query_flags)
1641{
1642 ubifs_getattr(idmap, path, stat, request_mask, query_flags);
1643
1644 if (IS_ENCRYPTED(d_inode(path->dentry)))
1645 return fscrypt_symlink_getattr(path, stat);
1646 return 0;
1647}
1648
1649const struct address_space_operations ubifs_file_address_operations = {
1650 .read_folio = ubifs_read_folio,
1651 .writepage = ubifs_writepage,
1652 .write_begin = ubifs_write_begin,
1653 .write_end = ubifs_write_end,
1654 .invalidate_folio = ubifs_invalidate_folio,
1655 .dirty_folio = ubifs_dirty_folio,
1656 .migrate_folio = filemap_migrate_folio,
1657 .release_folio = ubifs_release_folio,
1658};
1659
1660const struct inode_operations ubifs_file_inode_operations = {
1661 .setattr = ubifs_setattr,
1662 .getattr = ubifs_getattr,
1663 .listxattr = ubifs_listxattr,
1664 .update_time = ubifs_update_time,
1665 .fileattr_get = ubifs_fileattr_get,
1666 .fileattr_set = ubifs_fileattr_set,
1667};
1668
1669const struct inode_operations ubifs_symlink_inode_operations = {
1670 .get_link = ubifs_get_link,
1671 .setattr = ubifs_setattr,
1672 .getattr = ubifs_symlink_getattr,
1673 .listxattr = ubifs_listxattr,
1674 .update_time = ubifs_update_time,
1675};
1676
1677const struct file_operations ubifs_file_operations = {
1678 .llseek = generic_file_llseek,
1679 .read_iter = generic_file_read_iter,
1680 .write_iter = ubifs_write_iter,
1681 .mmap = ubifs_file_mmap,
1682 .fsync = ubifs_fsync,
1683 .unlocked_ioctl = ubifs_ioctl,
1684 .splice_read = filemap_splice_read,
1685 .splice_write = iter_file_splice_write,
1686 .open = fscrypt_file_open,
1687#ifdef CONFIG_COMPAT
1688 .compat_ioctl = ubifs_compat_ioctl,
1689#endif
1690};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file is part of UBIFS.
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation.
6 *
7 * Authors: Artem Bityutskiy (Битюцкий Артём)
8 * Adrian Hunter
9 */
10
11/*
12 * This file implements VFS file and inode operations for regular files, device
13 * nodes and symlinks as well as address space operations.
14 *
15 * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
16 * the page is dirty and is used for optimization purposes - dirty pages are
17 * not budgeted so the flag shows that 'ubifs_write_end()' should not release
18 * the budget for this page. The @PG_checked flag is set if full budgeting is
19 * required for the page e.g., when it corresponds to a file hole or it is
20 * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
21 * it is OK to fail in this function, and the budget is released in
22 * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
23 * information about how the page was budgeted, to make it possible to release
24 * the budget properly.
25 *
26 * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
27 * implement. However, this is not true for 'ubifs_writepage()', which may be
28 * called with @i_mutex unlocked. For example, when flusher thread is doing
29 * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
30 * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
31 * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
32 * 'ubifs_writepage()' we are only guaranteed that the page is locked.
33 *
34 * Similarly, @i_mutex is not always locked in 'ubifs_read_folio()', e.g., the
35 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
36 * ondemand_readahead -> read_folio"). In case of readahead, @I_SYNC flag is not
37 * set as well. However, UBIFS disables readahead.
38 */
39
40#include "ubifs.h"
41#include <linux/mount.h>
42#include <linux/slab.h>
43#include <linux/migrate.h>
44
45static int read_block(struct inode *inode, void *addr, unsigned int block,
46 struct ubifs_data_node *dn)
47{
48 struct ubifs_info *c = inode->i_sb->s_fs_info;
49 int err, len, out_len;
50 union ubifs_key key;
51 unsigned int dlen;
52
53 data_key_init(c, &key, inode->i_ino, block);
54 err = ubifs_tnc_lookup(c, &key, dn);
55 if (err) {
56 if (err == -ENOENT)
57 /* Not found, so it must be a hole */
58 memset(addr, 0, UBIFS_BLOCK_SIZE);
59 return err;
60 }
61
62 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
63 ubifs_inode(inode)->creat_sqnum);
64 len = le32_to_cpu(dn->size);
65 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
66 goto dump;
67
68 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
69
70 if (IS_ENCRYPTED(inode)) {
71 err = ubifs_decrypt(inode, dn, &dlen, block);
72 if (err)
73 goto dump;
74 }
75
76 out_len = UBIFS_BLOCK_SIZE;
77 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
78 le16_to_cpu(dn->compr_type));
79 if (err || len != out_len)
80 goto dump;
81
82 /*
83 * Data length can be less than a full block, even for blocks that are
84 * not the last in the file (e.g., as a result of making a hole and
85 * appending data). Ensure that the remainder is zeroed out.
86 */
87 if (len < UBIFS_BLOCK_SIZE)
88 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
89
90 return 0;
91
92dump:
93 ubifs_err(c, "bad data node (block %u, inode %lu)",
94 block, inode->i_ino);
95 ubifs_dump_node(c, dn, UBIFS_MAX_DATA_NODE_SZ);
96 return -EINVAL;
97}
98
99static int do_readpage(struct folio *folio)
100{
101 void *addr;
102 int err = 0, i;
103 unsigned int block, beyond;
104 struct ubifs_data_node *dn = NULL;
105 struct inode *inode = folio->mapping->host;
106 struct ubifs_info *c = inode->i_sb->s_fs_info;
107 loff_t i_size = i_size_read(inode);
108
109 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
110 inode->i_ino, folio->index, i_size, folio->flags);
111 ubifs_assert(c, !folio_test_checked(folio));
112 ubifs_assert(c, !folio->private);
113
114 addr = kmap_local_folio(folio, 0);
115
116 block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
117 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
118 if (block >= beyond) {
119 /* Reading beyond inode */
120 folio_set_checked(folio);
121 addr = folio_zero_tail(folio, 0, addr);
122 goto out;
123 }
124
125 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
126 if (!dn) {
127 err = -ENOMEM;
128 goto out;
129 }
130
131 i = 0;
132 while (1) {
133 int ret;
134
135 if (block >= beyond) {
136 /* Reading beyond inode */
137 err = -ENOENT;
138 memset(addr, 0, UBIFS_BLOCK_SIZE);
139 } else {
140 ret = read_block(inode, addr, block, dn);
141 if (ret) {
142 err = ret;
143 if (err != -ENOENT)
144 break;
145 } else if (block + 1 == beyond) {
146 int dlen = le32_to_cpu(dn->size);
147 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
148
149 if (ilen && ilen < dlen)
150 memset(addr + ilen, 0, dlen - ilen);
151 }
152 }
153 if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio)))
154 break;
155 block += 1;
156 addr += UBIFS_BLOCK_SIZE;
157 if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) {
158 kunmap_local(addr - UBIFS_BLOCK_SIZE);
159 addr = kmap_local_folio(folio, i * UBIFS_BLOCK_SIZE);
160 }
161 }
162
163 if (err) {
164 struct ubifs_info *c = inode->i_sb->s_fs_info;
165 if (err == -ENOENT) {
166 /* Not found, so it must be a hole */
167 folio_set_checked(folio);
168 dbg_gen("hole");
169 err = 0;
170 } else {
171 ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
172 folio->index, inode->i_ino, err);
173 }
174 }
175
176out:
177 kfree(dn);
178 if (!err)
179 folio_mark_uptodate(folio);
180 flush_dcache_folio(folio);
181 kunmap_local(addr);
182 return err;
183}
184
185/**
186 * release_new_page_budget - release budget of a new page.
187 * @c: UBIFS file-system description object
188 *
189 * This is a helper function which releases budget corresponding to the budget
190 * of one new page of data.
191 */
192static void release_new_page_budget(struct ubifs_info *c)
193{
194 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
195
196 ubifs_release_budget(c, &req);
197}
198
199/**
200 * release_existing_page_budget - release budget of an existing page.
201 * @c: UBIFS file-system description object
202 *
203 * This is a helper function which releases budget corresponding to the budget
204 * of changing one page of data which already exists on the flash media.
205 */
206static void release_existing_page_budget(struct ubifs_info *c)
207{
208 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
209
210 ubifs_release_budget(c, &req);
211}
212
213static int write_begin_slow(struct address_space *mapping,
214 loff_t pos, unsigned len, struct folio **foliop)
215{
216 struct inode *inode = mapping->host;
217 struct ubifs_info *c = inode->i_sb->s_fs_info;
218 pgoff_t index = pos >> PAGE_SHIFT;
219 struct ubifs_budget_req req = { .new_page = 1 };
220 int err, appending = !!(pos + len > inode->i_size);
221 struct folio *folio;
222
223 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
224 inode->i_ino, pos, len, inode->i_size);
225
226 /*
227 * At the slow path we have to budget before locking the folio, because
228 * budgeting may force write-back, which would wait on locked folios and
229 * deadlock if we had the folio locked. At this point we do not know
230 * anything about the folio, so assume that this is a new folio which is
231 * written to a hole. This corresponds to largest budget. Later the
232 * budget will be amended if this is not true.
233 */
234 if (appending)
235 /* We are appending data, budget for inode change */
236 req.dirtied_ino = 1;
237
238 err = ubifs_budget_space(c, &req);
239 if (unlikely(err))
240 return err;
241
242 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
243 mapping_gfp_mask(mapping));
244 if (IS_ERR(folio)) {
245 ubifs_release_budget(c, &req);
246 return PTR_ERR(folio);
247 }
248
249 if (!folio_test_uptodate(folio)) {
250 if (pos == folio_pos(folio) && len >= folio_size(folio))
251 folio_set_checked(folio);
252 else {
253 err = do_readpage(folio);
254 if (err) {
255 folio_unlock(folio);
256 folio_put(folio);
257 ubifs_release_budget(c, &req);
258 return err;
259 }
260 }
261 }
262
263 if (folio->private)
264 /*
265 * The folio is dirty, which means it was budgeted twice:
266 * o first time the budget was allocated by the task which
267 * made the folio dirty and set the private field;
268 * o and then we budgeted for it for the second time at the
269 * very beginning of this function.
270 *
271 * So what we have to do is to release the folio budget we
272 * allocated.
273 */
274 release_new_page_budget(c);
275 else if (!folio_test_checked(folio))
276 /*
277 * We are changing a folio which already exists on the media.
278 * This means that changing the folio does not make the amount
279 * of indexing information larger, and this part of the budget
280 * which we have already acquired may be released.
281 */
282 ubifs_convert_page_budget(c);
283
284 if (appending) {
285 struct ubifs_inode *ui = ubifs_inode(inode);
286
287 /*
288 * 'ubifs_write_end()' is optimized from the fast-path part of
289 * 'ubifs_write_begin()' and expects the @ui_mutex to be locked
290 * if data is appended.
291 */
292 mutex_lock(&ui->ui_mutex);
293 if (ui->dirty)
294 /*
295 * The inode is dirty already, so we may free the
296 * budget we allocated.
297 */
298 ubifs_release_dirty_inode_budget(c, ui);
299 }
300
301 *foliop = folio;
302 return 0;
303}
304
305/**
306 * allocate_budget - allocate budget for 'ubifs_write_begin()'.
307 * @c: UBIFS file-system description object
308 * @folio: folio to allocate budget for
309 * @ui: UBIFS inode object the page belongs to
310 * @appending: non-zero if the page is appended
311 *
312 * This is a helper function for 'ubifs_write_begin()' which allocates budget
313 * for the operation. The budget is allocated differently depending on whether
314 * this is appending, whether the page is dirty or not, and so on. This
315 * function leaves the @ui->ui_mutex locked in case of appending.
316 *
317 * Returns: %0 in case of success and %-ENOSPC in case of failure.
318 */
319static int allocate_budget(struct ubifs_info *c, struct folio *folio,
320 struct ubifs_inode *ui, int appending)
321{
322 struct ubifs_budget_req req = { .fast = 1 };
323
324 if (folio->private) {
325 if (!appending)
326 /*
327 * The folio is dirty and we are not appending, which
328 * means no budget is needed at all.
329 */
330 return 0;
331
332 mutex_lock(&ui->ui_mutex);
333 if (ui->dirty)
334 /*
335 * The page is dirty and we are appending, so the inode
336 * has to be marked as dirty. However, it is already
337 * dirty, so we do not need any budget. We may return,
338 * but @ui->ui_mutex hast to be left locked because we
339 * should prevent write-back from flushing the inode
340 * and freeing the budget. The lock will be released in
341 * 'ubifs_write_end()'.
342 */
343 return 0;
344
345 /*
346 * The page is dirty, we are appending, the inode is clean, so
347 * we need to budget the inode change.
348 */
349 req.dirtied_ino = 1;
350 } else {
351 if (folio_test_checked(folio))
352 /*
353 * The page corresponds to a hole and does not
354 * exist on the media. So changing it makes
355 * the amount of indexing information
356 * larger, and we have to budget for a new
357 * page.
358 */
359 req.new_page = 1;
360 else
361 /*
362 * Not a hole, the change will not add any new
363 * indexing information, budget for page
364 * change.
365 */
366 req.dirtied_page = 1;
367
368 if (appending) {
369 mutex_lock(&ui->ui_mutex);
370 if (!ui->dirty)
371 /*
372 * The inode is clean but we will have to mark
373 * it as dirty because we are appending. This
374 * needs a budget.
375 */
376 req.dirtied_ino = 1;
377 }
378 }
379
380 return ubifs_budget_space(c, &req);
381}
382
383/*
384 * This function is called when a page of data is going to be written. Since
385 * the page of data will not necessarily go to the flash straight away, UBIFS
386 * has to reserve space on the media for it, which is done by means of
387 * budgeting.
388 *
389 * This is the hot-path of the file-system and we are trying to optimize it as
390 * much as possible. For this reasons it is split on 2 parts - slow and fast.
391 *
392 * There many budgeting cases:
393 * o a new page is appended - we have to budget for a new page and for
394 * changing the inode; however, if the inode is already dirty, there is
395 * no need to budget for it;
396 * o an existing clean page is changed - we have budget for it; if the page
397 * does not exist on the media (a hole), we have to budget for a new
398 * page; otherwise, we may budget for changing an existing page; the
399 * difference between these cases is that changing an existing page does
400 * not introduce anything new to the FS indexing information, so it does
401 * not grow, and smaller budget is acquired in this case;
402 * o an existing dirty page is changed - no need to budget at all, because
403 * the page budget has been acquired by earlier, when the page has been
404 * marked dirty.
405 *
406 * UBIFS budgeting sub-system may force write-back if it thinks there is no
407 * space to reserve. This imposes some locking restrictions and makes it
408 * impossible to take into account the above cases, and makes it impossible to
409 * optimize budgeting.
410 *
411 * The solution for this is that the fast path of 'ubifs_write_begin()' assumes
412 * there is a plenty of flash space and the budget will be acquired quickly,
413 * without forcing write-back. The slow path does not make this assumption.
414 */
415static int ubifs_write_begin(struct file *file, struct address_space *mapping,
416 loff_t pos, unsigned len,
417 struct folio **foliop, void **fsdata)
418{
419 struct inode *inode = mapping->host;
420 struct ubifs_info *c = inode->i_sb->s_fs_info;
421 struct ubifs_inode *ui = ubifs_inode(inode);
422 pgoff_t index = pos >> PAGE_SHIFT;
423 int err, appending = !!(pos + len > inode->i_size);
424 int skipped_read = 0;
425 struct folio *folio;
426
427 ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
428 ubifs_assert(c, !c->ro_media && !c->ro_mount);
429
430 if (unlikely(c->ro_error))
431 return -EROFS;
432
433 /* Try out the fast-path part first */
434 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
435 mapping_gfp_mask(mapping));
436 if (IS_ERR(folio))
437 return PTR_ERR(folio);
438
439 if (!folio_test_uptodate(folio)) {
440 /* The page is not loaded from the flash */
441 if (pos == folio_pos(folio) && len >= folio_size(folio)) {
442 /*
443 * We change whole page so no need to load it. But we
444 * do not know whether this page exists on the media or
445 * not, so we assume the latter because it requires
446 * larger budget. The assumption is that it is better
447 * to budget a bit more than to read the page from the
448 * media. Thus, we are setting the @PG_checked flag
449 * here.
450 */
451 folio_set_checked(folio);
452 skipped_read = 1;
453 } else {
454 err = do_readpage(folio);
455 if (err) {
456 folio_unlock(folio);
457 folio_put(folio);
458 return err;
459 }
460 }
461 }
462
463 err = allocate_budget(c, folio, ui, appending);
464 if (unlikely(err)) {
465 ubifs_assert(c, err == -ENOSPC);
466 /*
467 * If we skipped reading the page because we were going to
468 * write all of it, then it is not up to date.
469 */
470 if (skipped_read)
471 folio_clear_checked(folio);
472 /*
473 * Budgeting failed which means it would have to force
474 * write-back but didn't, because we set the @fast flag in the
475 * request. Write-back cannot be done now, while we have the
476 * page locked, because it would deadlock. Unlock and free
477 * everything and fall-back to slow-path.
478 */
479 if (appending) {
480 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
481 mutex_unlock(&ui->ui_mutex);
482 }
483 folio_unlock(folio);
484 folio_put(folio);
485
486 return write_begin_slow(mapping, pos, len, foliop);
487 }
488
489 /*
490 * Whee, we acquired budgeting quickly - without involving
491 * garbage-collection, committing or forcing write-back. We return
492 * with @ui->ui_mutex locked if we are appending pages, and unlocked
493 * otherwise. This is an optimization (slightly hacky though).
494 */
495 *foliop = folio;
496 return 0;
497}
498
499/**
500 * cancel_budget - cancel budget.
501 * @c: UBIFS file-system description object
502 * @folio: folio to cancel budget for
503 * @ui: UBIFS inode object the page belongs to
504 * @appending: non-zero if the page is appended
505 *
506 * This is a helper function for a page write operation. It unlocks the
507 * @ui->ui_mutex in case of appending.
508 */
509static void cancel_budget(struct ubifs_info *c, struct folio *folio,
510 struct ubifs_inode *ui, int appending)
511{
512 if (appending) {
513 if (!ui->dirty)
514 ubifs_release_dirty_inode_budget(c, ui);
515 mutex_unlock(&ui->ui_mutex);
516 }
517 if (!folio->private) {
518 if (folio_test_checked(folio))
519 release_new_page_budget(c);
520 else
521 release_existing_page_budget(c);
522 }
523}
524
525static int ubifs_write_end(struct file *file, struct address_space *mapping,
526 loff_t pos, unsigned len, unsigned copied,
527 struct folio *folio, void *fsdata)
528{
529 struct inode *inode = mapping->host;
530 struct ubifs_inode *ui = ubifs_inode(inode);
531 struct ubifs_info *c = inode->i_sb->s_fs_info;
532 loff_t end_pos = pos + len;
533 int appending = !!(end_pos > inode->i_size);
534
535 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
536 inode->i_ino, pos, folio->index, len, copied, inode->i_size);
537
538 if (unlikely(copied < len && !folio_test_uptodate(folio))) {
539 /*
540 * VFS copied less data to the folio than it intended and
541 * declared in its '->write_begin()' call via the @len
542 * argument. If the folio was not up-to-date,
543 * the 'ubifs_write_begin()' function did
544 * not load it from the media (for optimization reasons). This
545 * means that part of the folio contains garbage. So read the
546 * folio now.
547 */
548 dbg_gen("copied %d instead of %d, read page and repeat",
549 copied, len);
550 cancel_budget(c, folio, ui, appending);
551 folio_clear_checked(folio);
552
553 /*
554 * Return 0 to force VFS to repeat the whole operation, or the
555 * error code if 'do_readpage()' fails.
556 */
557 copied = do_readpage(folio);
558 goto out;
559 }
560
561 if (len == folio_size(folio))
562 folio_mark_uptodate(folio);
563
564 if (!folio->private) {
565 folio_attach_private(folio, (void *)1);
566 atomic_long_inc(&c->dirty_pg_cnt);
567 filemap_dirty_folio(mapping, folio);
568 }
569
570 if (appending) {
571 i_size_write(inode, end_pos);
572 ui->ui_size = end_pos;
573 /*
574 * We do not set @I_DIRTY_PAGES (which means that
575 * the inode has dirty pages), this was done in
576 * filemap_dirty_folio().
577 */
578 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
579 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
580 mutex_unlock(&ui->ui_mutex);
581 }
582
583out:
584 folio_unlock(folio);
585 folio_put(folio);
586 return copied;
587}
588
589/**
590 * populate_page - copy data nodes into a page for bulk-read.
591 * @c: UBIFS file-system description object
592 * @folio: folio
593 * @bu: bulk-read information
594 * @n: next zbranch slot
595 *
596 * Returns: %0 on success and a negative error code on failure.
597 */
598static int populate_page(struct ubifs_info *c, struct folio *folio,
599 struct bu_info *bu, int *n)
600{
601 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
602 struct inode *inode = folio->mapping->host;
603 loff_t i_size = i_size_read(inode);
604 unsigned int page_block;
605 void *addr, *zaddr;
606 pgoff_t end_index;
607
608 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
609 inode->i_ino, folio->index, i_size, folio->flags);
610
611 addr = zaddr = kmap_local_folio(folio, 0);
612
613 end_index = (i_size - 1) >> PAGE_SHIFT;
614 if (!i_size || folio->index > end_index) {
615 hole = 1;
616 addr = folio_zero_tail(folio, 0, addr);
617 goto out_hole;
618 }
619
620 page_block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
621 while (1) {
622 int err, len, out_len, dlen;
623
624 if (nn >= bu->cnt) {
625 hole = 1;
626 memset(addr, 0, UBIFS_BLOCK_SIZE);
627 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
628 struct ubifs_data_node *dn;
629
630 dn = bu->buf + (bu->zbranch[nn].offs - offs);
631
632 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
633 ubifs_inode(inode)->creat_sqnum);
634
635 len = le32_to_cpu(dn->size);
636 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
637 goto out_err;
638
639 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
640 out_len = UBIFS_BLOCK_SIZE;
641
642 if (IS_ENCRYPTED(inode)) {
643 err = ubifs_decrypt(inode, dn, &dlen, page_block);
644 if (err)
645 goto out_err;
646 }
647
648 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
649 le16_to_cpu(dn->compr_type));
650 if (err || len != out_len)
651 goto out_err;
652
653 if (len < UBIFS_BLOCK_SIZE)
654 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
655
656 nn += 1;
657 read = (i << UBIFS_BLOCK_SHIFT) + len;
658 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
659 nn += 1;
660 continue;
661 } else {
662 hole = 1;
663 memset(addr, 0, UBIFS_BLOCK_SIZE);
664 }
665 if (++i >= UBIFS_BLOCKS_PER_PAGE)
666 break;
667 addr += UBIFS_BLOCK_SIZE;
668 page_block += 1;
669 if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) {
670 kunmap_local(addr - UBIFS_BLOCK_SIZE);
671 addr = kmap_local_folio(folio, i * UBIFS_BLOCK_SIZE);
672 }
673 }
674
675 if (end_index == folio->index) {
676 int len = i_size & (PAGE_SIZE - 1);
677
678 if (len && len < read)
679 memset(zaddr + len, 0, read - len);
680 }
681
682out_hole:
683 if (hole) {
684 folio_set_checked(folio);
685 dbg_gen("hole");
686 }
687
688 folio_mark_uptodate(folio);
689 flush_dcache_folio(folio);
690 kunmap_local(addr);
691 *n = nn;
692 return 0;
693
694out_err:
695 flush_dcache_folio(folio);
696 kunmap_local(addr);
697 ubifs_err(c, "bad data node (block %u, inode %lu)",
698 page_block, inode->i_ino);
699 return -EINVAL;
700}
701
702/**
703 * ubifs_do_bulk_read - do bulk-read.
704 * @c: UBIFS file-system description object
705 * @bu: bulk-read information
706 * @folio1: first folio to read
707 *
708 * Returns: %1 if the bulk-read is done, otherwise %0 is returned.
709 */
710static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
711 struct folio *folio1)
712{
713 pgoff_t offset = folio1->index, end_index;
714 struct address_space *mapping = folio1->mapping;
715 struct inode *inode = mapping->host;
716 struct ubifs_inode *ui = ubifs_inode(inode);
717 int err, page_idx, page_cnt, ret = 0, n = 0;
718 int allocate = bu->buf ? 0 : 1;
719 loff_t isize;
720 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
721
722 err = ubifs_tnc_get_bu_keys(c, bu);
723 if (err)
724 goto out_warn;
725
726 if (bu->eof) {
727 /* Turn off bulk-read at the end of the file */
728 ui->read_in_a_row = 1;
729 ui->bulk_read = 0;
730 }
731
732 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
733 if (!page_cnt) {
734 /*
735 * This happens when there are multiple blocks per page and the
736 * blocks for the first page we are looking for, are not
737 * together. If all the pages were like this, bulk-read would
738 * reduce performance, so we turn it off for a while.
739 */
740 goto out_bu_off;
741 }
742
743 if (bu->cnt) {
744 if (allocate) {
745 /*
746 * Allocate bulk-read buffer depending on how many data
747 * nodes we are going to read.
748 */
749 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
750 bu->zbranch[bu->cnt - 1].len -
751 bu->zbranch[0].offs;
752 ubifs_assert(c, bu->buf_len > 0);
753 ubifs_assert(c, bu->buf_len <= c->leb_size);
754 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
755 if (!bu->buf)
756 goto out_bu_off;
757 }
758
759 err = ubifs_tnc_bulk_read(c, bu);
760 if (err)
761 goto out_warn;
762 }
763
764 err = populate_page(c, folio1, bu, &n);
765 if (err)
766 goto out_warn;
767
768 folio_unlock(folio1);
769 ret = 1;
770
771 isize = i_size_read(inode);
772 if (isize == 0)
773 goto out_free;
774 end_index = ((isize - 1) >> PAGE_SHIFT);
775
776 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
777 pgoff_t page_offset = offset + page_idx;
778 struct folio *folio;
779
780 if (page_offset > end_index)
781 break;
782 folio = __filemap_get_folio(mapping, page_offset,
783 FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
784 ra_gfp_mask);
785 if (IS_ERR(folio))
786 break;
787 if (!folio_test_uptodate(folio))
788 err = populate_page(c, folio, bu, &n);
789 folio_unlock(folio);
790 folio_put(folio);
791 if (err)
792 break;
793 }
794
795 ui->last_page_read = offset + page_idx - 1;
796
797out_free:
798 if (allocate)
799 kfree(bu->buf);
800 return ret;
801
802out_warn:
803 ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
804 goto out_free;
805
806out_bu_off:
807 ui->read_in_a_row = ui->bulk_read = 0;
808 goto out_free;
809}
810
811/**
812 * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
813 * @folio: folio from which to start bulk-read.
814 *
815 * Some flash media are capable of reading sequentially at faster rates. UBIFS
816 * bulk-read facility is designed to take advantage of that, by reading in one
817 * go consecutive data nodes that are also located consecutively in the same
818 * LEB.
819 *
820 * Returns: %1 if a bulk-read is done and %0 otherwise.
821 */
822static int ubifs_bulk_read(struct folio *folio)
823{
824 struct inode *inode = folio->mapping->host;
825 struct ubifs_info *c = inode->i_sb->s_fs_info;
826 struct ubifs_inode *ui = ubifs_inode(inode);
827 pgoff_t index = folio->index, last_page_read = ui->last_page_read;
828 struct bu_info *bu;
829 int err = 0, allocated = 0;
830
831 ui->last_page_read = index;
832 if (!c->bulk_read)
833 return 0;
834
835 /*
836 * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
837 * so don't bother if we cannot lock the mutex.
838 */
839 if (!mutex_trylock(&ui->ui_mutex))
840 return 0;
841
842 if (index != last_page_read + 1) {
843 /* Turn off bulk-read if we stop reading sequentially */
844 ui->read_in_a_row = 1;
845 if (ui->bulk_read)
846 ui->bulk_read = 0;
847 goto out_unlock;
848 }
849
850 if (!ui->bulk_read) {
851 ui->read_in_a_row += 1;
852 if (ui->read_in_a_row < 3)
853 goto out_unlock;
854 /* Three reads in a row, so switch on bulk-read */
855 ui->bulk_read = 1;
856 }
857
858 /*
859 * If possible, try to use pre-allocated bulk-read information, which
860 * is protected by @c->bu_mutex.
861 */
862 if (mutex_trylock(&c->bu_mutex))
863 bu = &c->bu;
864 else {
865 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
866 if (!bu)
867 goto out_unlock;
868
869 bu->buf = NULL;
870 allocated = 1;
871 }
872
873 bu->buf_len = c->max_bu_buf_len;
874 data_key_init(c, &bu->key, inode->i_ino,
875 folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
876 err = ubifs_do_bulk_read(c, bu, folio);
877
878 if (!allocated)
879 mutex_unlock(&c->bu_mutex);
880 else
881 kfree(bu);
882
883out_unlock:
884 mutex_unlock(&ui->ui_mutex);
885 return err;
886}
887
888static int ubifs_read_folio(struct file *file, struct folio *folio)
889{
890 if (ubifs_bulk_read(folio))
891 return 0;
892 do_readpage(folio);
893 folio_unlock(folio);
894 return 0;
895}
896
897static int do_writepage(struct folio *folio, size_t len)
898{
899 int err = 0, blen;
900 unsigned int block;
901 void *addr;
902 size_t offset = 0;
903 union ubifs_key key;
904 struct inode *inode = folio->mapping->host;
905 struct ubifs_info *c = inode->i_sb->s_fs_info;
906
907#ifdef UBIFS_DEBUG
908 struct ubifs_inode *ui = ubifs_inode(inode);
909 spin_lock(&ui->ui_lock);
910 ubifs_assert(c, folio->index <= ui->synced_i_size >> PAGE_SHIFT);
911 spin_unlock(&ui->ui_lock);
912#endif
913
914 folio_start_writeback(folio);
915
916 addr = kmap_local_folio(folio, offset);
917 block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
918 for (;;) {
919 blen = min_t(size_t, len, UBIFS_BLOCK_SIZE);
920 data_key_init(c, &key, inode->i_ino, block);
921 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
922 if (err)
923 break;
924 len -= blen;
925 if (!len)
926 break;
927 block += 1;
928 addr += blen;
929 if (folio_test_highmem(folio) && !offset_in_page(addr)) {
930 kunmap_local(addr - blen);
931 offset += PAGE_SIZE;
932 addr = kmap_local_folio(folio, offset);
933 }
934 }
935 kunmap_local(addr);
936 if (err) {
937 mapping_set_error(folio->mapping, err);
938 ubifs_err(c, "cannot write folio %lu of inode %lu, error %d",
939 folio->index, inode->i_ino, err);
940 ubifs_ro_mode(c, err);
941 }
942
943 ubifs_assert(c, folio->private != NULL);
944 if (folio_test_checked(folio))
945 release_new_page_budget(c);
946 else
947 release_existing_page_budget(c);
948
949 atomic_long_dec(&c->dirty_pg_cnt);
950 folio_detach_private(folio);
951 folio_clear_checked(folio);
952
953 folio_unlock(folio);
954 folio_end_writeback(folio);
955 return err;
956}
957
958/*
959 * When writing-back dirty inodes, VFS first writes-back pages belonging to the
960 * inode, then the inode itself. For UBIFS this may cause a problem. Consider a
961 * situation when a we have an inode with size 0, then a megabyte of data is
962 * appended to the inode, then write-back starts and flushes some amount of the
963 * dirty pages, the journal becomes full, commit happens and finishes, and then
964 * an unclean reboot happens. When the file system is mounted next time, the
965 * inode size would still be 0, but there would be many pages which are beyond
966 * the inode size, they would be indexed and consume flash space. Because the
967 * journal has been committed, the replay would not be able to detect this
968 * situation and correct the inode size. This means UBIFS would have to scan
969 * whole index and correct all inode sizes, which is long an unacceptable.
970 *
971 * To prevent situations like this, UBIFS writes pages back only if they are
972 * within the last synchronized inode size, i.e. the size which has been
973 * written to the flash media last time. Otherwise, UBIFS forces inode
974 * write-back, thus making sure the on-flash inode contains current inode size,
975 * and then keeps writing pages back.
976 *
977 * Some locking issues explanation. 'ubifs_writepage()' first is called with
978 * the page locked, and it locks @ui_mutex. However, write-back does take inode
979 * @i_mutex, which means other VFS operations may be run on this inode at the
980 * same time. And the problematic one is truncation to smaller size, from where
981 * we have to call 'truncate_setsize()', which first changes @inode->i_size,
982 * then drops the truncated pages. And while dropping the pages, it takes the
983 * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
984 * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'.
985 * This means that @inode->i_size is changed while @ui_mutex is unlocked.
986 *
987 * XXX(truncate): with the new truncate sequence this is not true anymore,
988 * and the calls to truncate_setsize can be move around freely. They should
989 * be moved to the very end of the truncate sequence.
990 *
991 * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
992 * inode size. How do we do this if @inode->i_size may became smaller while we
993 * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the
994 * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size
995 * internally and updates it under @ui_mutex.
996 *
997 * Q: why we do not worry that if we race with truncation, we may end up with a
998 * situation when the inode is truncated while we are in the middle of
999 * 'do_writepage()', so we do write beyond inode size?
1000 * A: If we are in the middle of 'do_writepage()', truncation would be locked
1001 * on the page lock and it would not write the truncated inode node to the
1002 * journal before we have finished.
1003 */
1004static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
1005 void *data)
1006{
1007 struct inode *inode = folio->mapping->host;
1008 struct ubifs_info *c = inode->i_sb->s_fs_info;
1009 struct ubifs_inode *ui = ubifs_inode(inode);
1010 loff_t i_size = i_size_read(inode), synced_i_size;
1011 int err, len = folio_size(folio);
1012
1013 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1014 inode->i_ino, folio->index, folio->flags);
1015 ubifs_assert(c, folio->private != NULL);
1016
1017 /* Is the folio fully outside @i_size? (truncate in progress) */
1018 if (folio_pos(folio) >= i_size) {
1019 err = 0;
1020 goto out_unlock;
1021 }
1022
1023 spin_lock(&ui->ui_lock);
1024 synced_i_size = ui->synced_i_size;
1025 spin_unlock(&ui->ui_lock);
1026
1027 /* Is the folio fully inside i_size? */
1028 if (folio_pos(folio) + len <= i_size) {
1029 if (folio_pos(folio) + len > synced_i_size) {
1030 err = inode->i_sb->s_op->write_inode(inode, NULL);
1031 if (err)
1032 goto out_redirty;
1033 /*
1034 * The inode has been written, but the write-buffer has
1035 * not been synchronized, so in case of an unclean
1036 * reboot we may end up with some pages beyond inode
1037 * size, but they would be in the journal (because
1038 * commit flushes write buffers) and recovery would deal
1039 * with this.
1040 */
1041 }
1042 return do_writepage(folio, len);
1043 }
1044
1045 /*
1046 * The folio straddles @i_size. It must be zeroed out on each and every
1047 * writepage invocation because it may be mmapped. "A file is mapped
1048 * in multiples of the page size. For a file that is not a multiple of
1049 * the page size, the remaining memory is zeroed when mapped, and
1050 * writes to that region are not written out to the file."
1051 */
1052 len = i_size - folio_pos(folio);
1053 folio_zero_segment(folio, len, folio_size(folio));
1054
1055 if (i_size > synced_i_size) {
1056 err = inode->i_sb->s_op->write_inode(inode, NULL);
1057 if (err)
1058 goto out_redirty;
1059 }
1060
1061 return do_writepage(folio, len);
1062out_redirty:
1063 /*
1064 * folio_redirty_for_writepage() won't call ubifs_dirty_inode() because
1065 * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
1066 * there is no need to do space budget for dirty inode.
1067 */
1068 folio_redirty_for_writepage(wbc, folio);
1069out_unlock:
1070 folio_unlock(folio);
1071 return err;
1072}
1073
1074static int ubifs_writepages(struct address_space *mapping,
1075 struct writeback_control *wbc)
1076{
1077 return write_cache_pages(mapping, wbc, ubifs_writepage, NULL);
1078}
1079
1080/**
1081 * do_attr_changes - change inode attributes.
1082 * @inode: inode to change attributes for
1083 * @attr: describes attributes to change
1084 */
1085static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1086{
1087 if (attr->ia_valid & ATTR_UID)
1088 inode->i_uid = attr->ia_uid;
1089 if (attr->ia_valid & ATTR_GID)
1090 inode->i_gid = attr->ia_gid;
1091 if (attr->ia_valid & ATTR_ATIME)
1092 inode_set_atime_to_ts(inode, attr->ia_atime);
1093 if (attr->ia_valid & ATTR_MTIME)
1094 inode_set_mtime_to_ts(inode, attr->ia_mtime);
1095 if (attr->ia_valid & ATTR_CTIME)
1096 inode_set_ctime_to_ts(inode, attr->ia_ctime);
1097 if (attr->ia_valid & ATTR_MODE) {
1098 umode_t mode = attr->ia_mode;
1099
1100 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1101 mode &= ~S_ISGID;
1102 inode->i_mode = mode;
1103 }
1104}
1105
1106/**
1107 * do_truncation - truncate an inode.
1108 * @c: UBIFS file-system description object
1109 * @inode: inode to truncate
1110 * @attr: inode attribute changes description
1111 *
1112 * This function implements VFS '->setattr()' call when the inode is truncated
1113 * to a smaller size.
1114 *
1115 * Returns: %0 in case of success and a negative error code
1116 * in case of failure.
1117 */
1118static int do_truncation(struct ubifs_info *c, struct inode *inode,
1119 const struct iattr *attr)
1120{
1121 int err;
1122 struct ubifs_budget_req req;
1123 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1124 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1125 struct ubifs_inode *ui = ubifs_inode(inode);
1126
1127 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1128 memset(&req, 0, sizeof(struct ubifs_budget_req));
1129
1130 /*
1131 * If this is truncation to a smaller size, and we do not truncate on a
1132 * block boundary, budget for changing one data block, because the last
1133 * block will be re-written.
1134 */
1135 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1136 req.dirtied_page = 1;
1137
1138 req.dirtied_ino = 1;
1139 /* A funny way to budget for truncation node */
1140 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1141 err = ubifs_budget_space(c, &req);
1142 if (err) {
1143 /*
1144 * Treat truncations to zero as deletion and always allow them,
1145 * just like we do for '->unlink()'.
1146 */
1147 if (new_size || err != -ENOSPC)
1148 return err;
1149 budgeted = 0;
1150 }
1151
1152 truncate_setsize(inode, new_size);
1153
1154 if (offset) {
1155 pgoff_t index = new_size >> PAGE_SHIFT;
1156 struct folio *folio;
1157
1158 folio = filemap_lock_folio(inode->i_mapping, index);
1159 if (!IS_ERR(folio)) {
1160 if (folio_test_dirty(folio)) {
1161 /*
1162 * 'ubifs_jnl_truncate()' will try to truncate
1163 * the last data node, but it contains
1164 * out-of-date data because the page is dirty.
1165 * Write the page now, so that
1166 * 'ubifs_jnl_truncate()' will see an already
1167 * truncated (and up to date) data node.
1168 */
1169 ubifs_assert(c, folio->private != NULL);
1170
1171 folio_clear_dirty_for_io(folio);
1172 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1173 offset = offset_in_folio(folio,
1174 new_size);
1175 err = do_writepage(folio, offset);
1176 folio_put(folio);
1177 if (err)
1178 goto out_budg;
1179 /*
1180 * We could now tell 'ubifs_jnl_truncate()' not
1181 * to read the last block.
1182 */
1183 } else {
1184 /*
1185 * We could 'kmap()' the page and pass the data
1186 * to 'ubifs_jnl_truncate()' to save it from
1187 * having to read it.
1188 */
1189 folio_unlock(folio);
1190 folio_put(folio);
1191 }
1192 }
1193 }
1194
1195 mutex_lock(&ui->ui_mutex);
1196 ui->ui_size = inode->i_size;
1197 /* Truncation changes inode [mc]time */
1198 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1199 /* Other attributes may be changed at the same time as well */
1200 do_attr_changes(inode, attr);
1201 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1202 mutex_unlock(&ui->ui_mutex);
1203
1204out_budg:
1205 if (budgeted)
1206 ubifs_release_budget(c, &req);
1207 else {
1208 c->bi.nospace = c->bi.nospace_rp = 0;
1209 smp_wmb();
1210 }
1211 return err;
1212}
1213
1214/**
1215 * do_setattr - change inode attributes.
1216 * @c: UBIFS file-system description object
1217 * @inode: inode to change attributes for
1218 * @attr: inode attribute changes description
1219 *
1220 * This function implements VFS '->setattr()' call for all cases except
1221 * truncations to smaller size.
1222 *
1223 * Returns: %0 in case of success and a negative
1224 * error code in case of failure.
1225 */
1226static int do_setattr(struct ubifs_info *c, struct inode *inode,
1227 const struct iattr *attr)
1228{
1229 int err, release;
1230 loff_t new_size = attr->ia_size;
1231 struct ubifs_inode *ui = ubifs_inode(inode);
1232 struct ubifs_budget_req req = { .dirtied_ino = 1,
1233 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1234
1235 err = ubifs_budget_space(c, &req);
1236 if (err)
1237 return err;
1238
1239 if (attr->ia_valid & ATTR_SIZE) {
1240 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1241 truncate_setsize(inode, new_size);
1242 }
1243
1244 mutex_lock(&ui->ui_mutex);
1245 if (attr->ia_valid & ATTR_SIZE) {
1246 /* Truncation changes inode [mc]time */
1247 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1248 /* 'truncate_setsize()' changed @i_size, update @ui_size */
1249 ui->ui_size = inode->i_size;
1250 }
1251
1252 do_attr_changes(inode, attr);
1253
1254 release = ui->dirty;
1255 if (attr->ia_valid & ATTR_SIZE)
1256 /*
1257 * Inode length changed, so we have to make sure
1258 * @I_DIRTY_DATASYNC is set.
1259 */
1260 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1261 else
1262 mark_inode_dirty_sync(inode);
1263 mutex_unlock(&ui->ui_mutex);
1264
1265 if (release)
1266 ubifs_release_budget(c, &req);
1267 if (IS_SYNC(inode))
1268 err = inode->i_sb->s_op->write_inode(inode, NULL);
1269 return err;
1270}
1271
1272int ubifs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1273 struct iattr *attr)
1274{
1275 int err;
1276 struct inode *inode = d_inode(dentry);
1277 struct ubifs_info *c = inode->i_sb->s_fs_info;
1278
1279 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1280 inode->i_ino, inode->i_mode, attr->ia_valid);
1281 err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1282 if (err)
1283 return err;
1284
1285 err = dbg_check_synced_i_size(c, inode);
1286 if (err)
1287 return err;
1288
1289 err = fscrypt_prepare_setattr(dentry, attr);
1290 if (err)
1291 return err;
1292
1293 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1294 /* Truncation to a smaller size */
1295 err = do_truncation(c, inode, attr);
1296 else
1297 err = do_setattr(c, inode, attr);
1298
1299 return err;
1300}
1301
1302static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
1303 size_t length)
1304{
1305 struct inode *inode = folio->mapping->host;
1306 struct ubifs_info *c = inode->i_sb->s_fs_info;
1307
1308 ubifs_assert(c, folio_test_private(folio));
1309 if (offset || length < folio_size(folio))
1310 /* Partial folio remains dirty */
1311 return;
1312
1313 if (folio_test_checked(folio))
1314 release_new_page_budget(c);
1315 else
1316 release_existing_page_budget(c);
1317
1318 atomic_long_dec(&c->dirty_pg_cnt);
1319 folio_detach_private(folio);
1320 folio_clear_checked(folio);
1321}
1322
1323int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1324{
1325 struct inode *inode = file->f_mapping->host;
1326 struct ubifs_info *c = inode->i_sb->s_fs_info;
1327 int err;
1328
1329 dbg_gen("syncing inode %lu", inode->i_ino);
1330
1331 if (c->ro_mount)
1332 /*
1333 * For some really strange reasons VFS does not filter out
1334 * 'fsync()' for R/O mounted file-systems as per 2.6.39.
1335 */
1336 return 0;
1337
1338 err = file_write_and_wait_range(file, start, end);
1339 if (err)
1340 return err;
1341 inode_lock(inode);
1342
1343 /* Synchronize the inode unless this is a 'datasync()' call. */
1344 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1345 err = inode->i_sb->s_op->write_inode(inode, NULL);
1346 if (err)
1347 goto out;
1348 }
1349
1350 /*
1351 * Nodes related to this inode may still sit in a write-buffer. Flush
1352 * them.
1353 */
1354 err = ubifs_sync_wbufs_by_inode(c, inode);
1355out:
1356 inode_unlock(inode);
1357 return err;
1358}
1359
1360/**
1361 * mctime_update_needed - check if mtime or ctime update is needed.
1362 * @inode: the inode to do the check for
1363 * @now: current time
1364 *
1365 * This helper function checks if the inode mtime/ctime should be updated or
1366 * not. If current values of the time-stamps are within the UBIFS inode time
1367 * granularity, they are not updated. This is an optimization.
1368 *
1369 * Returns: %1 if time update is needed, %0 if not
1370 */
1371static inline int mctime_update_needed(const struct inode *inode,
1372 const struct timespec64 *now)
1373{
1374 struct timespec64 ctime = inode_get_ctime(inode);
1375 struct timespec64 mtime = inode_get_mtime(inode);
1376
1377 if (!timespec64_equal(&mtime, now) || !timespec64_equal(&ctime, now))
1378 return 1;
1379 return 0;
1380}
1381
1382/**
1383 * ubifs_update_time - update time of inode.
1384 * @inode: inode to update
1385 * @flags: time updating control flag determines updating
1386 * which time fields of @inode
1387 *
1388 * This function updates time of the inode.
1389 *
1390 * Returns: %0 for success or a negative error code otherwise.
1391 */
1392int ubifs_update_time(struct inode *inode, int flags)
1393{
1394 struct ubifs_inode *ui = ubifs_inode(inode);
1395 struct ubifs_info *c = inode->i_sb->s_fs_info;
1396 struct ubifs_budget_req req = { .dirtied_ino = 1,
1397 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1398 int err, release;
1399
1400 if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) {
1401 generic_update_time(inode, flags);
1402 return 0;
1403 }
1404
1405 err = ubifs_budget_space(c, &req);
1406 if (err)
1407 return err;
1408
1409 mutex_lock(&ui->ui_mutex);
1410 inode_update_timestamps(inode, flags);
1411 release = ui->dirty;
1412 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1413 mutex_unlock(&ui->ui_mutex);
1414 if (release)
1415 ubifs_release_budget(c, &req);
1416 return 0;
1417}
1418
1419/**
1420 * update_mctime - update mtime and ctime of an inode.
1421 * @inode: inode to update
1422 *
1423 * This function updates mtime and ctime of the inode if it is not equivalent to
1424 * current time.
1425 *
1426 * Returns: %0 in case of success and a negative error code in
1427 * case of failure.
1428 */
1429static int update_mctime(struct inode *inode)
1430{
1431 struct timespec64 now = current_time(inode);
1432 struct ubifs_inode *ui = ubifs_inode(inode);
1433 struct ubifs_info *c = inode->i_sb->s_fs_info;
1434
1435 if (mctime_update_needed(inode, &now)) {
1436 int err, release;
1437 struct ubifs_budget_req req = { .dirtied_ino = 1,
1438 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1439
1440 err = ubifs_budget_space(c, &req);
1441 if (err)
1442 return err;
1443
1444 mutex_lock(&ui->ui_mutex);
1445 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1446 release = ui->dirty;
1447 mark_inode_dirty_sync(inode);
1448 mutex_unlock(&ui->ui_mutex);
1449 if (release)
1450 ubifs_release_budget(c, &req);
1451 }
1452
1453 return 0;
1454}
1455
1456static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1457{
1458 int err = update_mctime(file_inode(iocb->ki_filp));
1459 if (err)
1460 return err;
1461
1462 return generic_file_write_iter(iocb, from);
1463}
1464
1465static bool ubifs_dirty_folio(struct address_space *mapping,
1466 struct folio *folio)
1467{
1468 bool ret;
1469 struct ubifs_info *c = mapping->host->i_sb->s_fs_info;
1470
1471 ret = filemap_dirty_folio(mapping, folio);
1472 /*
1473 * An attempt to dirty a page without budgeting for it - should not
1474 * happen.
1475 */
1476 ubifs_assert(c, ret == false);
1477 return ret;
1478}
1479
1480static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
1481{
1482 struct inode *inode = folio->mapping->host;
1483 struct ubifs_info *c = inode->i_sb->s_fs_info;
1484
1485 if (folio_test_writeback(folio))
1486 return false;
1487
1488 /*
1489 * Page is private but not dirty, weird? There is one condition
1490 * making it happened. ubifs_writepage skipped the page because
1491 * page index beyonds isize (for example. truncated by other
1492 * process named A), then the page is invalidated by fadvise64
1493 * syscall before being truncated by process A.
1494 */
1495 ubifs_assert(c, folio_test_private(folio));
1496 if (folio_test_checked(folio))
1497 release_new_page_budget(c);
1498 else
1499 release_existing_page_budget(c);
1500
1501 atomic_long_dec(&c->dirty_pg_cnt);
1502 folio_detach_private(folio);
1503 folio_clear_checked(folio);
1504 return true;
1505}
1506
1507/*
1508 * mmap()d file has taken write protection fault and is being made writable.
1509 * UBIFS must ensure page is budgeted for.
1510 */
1511static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1512{
1513 struct folio *folio = page_folio(vmf->page);
1514 struct inode *inode = file_inode(vmf->vma->vm_file);
1515 struct ubifs_info *c = inode->i_sb->s_fs_info;
1516 struct timespec64 now = current_time(inode);
1517 struct ubifs_budget_req req = { .new_page = 1 };
1518 int err, update_time;
1519
1520 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, folio->index,
1521 i_size_read(inode));
1522 ubifs_assert(c, !c->ro_media && !c->ro_mount);
1523
1524 if (unlikely(c->ro_error))
1525 return VM_FAULT_SIGBUS; /* -EROFS */
1526
1527 /*
1528 * We have not locked @folio so far so we may budget for changing the
1529 * folio. Note, we cannot do this after we locked the folio, because
1530 * budgeting may cause write-back which would cause deadlock.
1531 *
1532 * At the moment we do not know whether the folio is dirty or not, so we
1533 * assume that it is not and budget for a new folio. We could look at
1534 * the @PG_private flag and figure this out, but we may race with write
1535 * back and the folio state may change by the time we lock it, so this
1536 * would need additional care. We do not bother with this at the
1537 * moment, although it might be good idea to do. Instead, we allocate
1538 * budget for a new folio and amend it later on if the folio was in fact
1539 * dirty.
1540 *
1541 * The budgeting-related logic of this function is similar to what we
1542 * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there
1543 * for more comments.
1544 */
1545 update_time = mctime_update_needed(inode, &now);
1546 if (update_time)
1547 /*
1548 * We have to change inode time stamp which requires extra
1549 * budgeting.
1550 */
1551 req.dirtied_ino = 1;
1552
1553 err = ubifs_budget_space(c, &req);
1554 if (unlikely(err)) {
1555 if (err == -ENOSPC)
1556 ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
1557 inode->i_ino);
1558 return VM_FAULT_SIGBUS;
1559 }
1560
1561 folio_lock(folio);
1562 if (unlikely(folio->mapping != inode->i_mapping ||
1563 folio_pos(folio) >= i_size_read(inode))) {
1564 /* Folio got truncated out from underneath us */
1565 goto sigbus;
1566 }
1567
1568 if (folio->private)
1569 release_new_page_budget(c);
1570 else {
1571 if (!folio_test_checked(folio))
1572 ubifs_convert_page_budget(c);
1573 folio_attach_private(folio, (void *)1);
1574 atomic_long_inc(&c->dirty_pg_cnt);
1575 filemap_dirty_folio(folio->mapping, folio);
1576 }
1577
1578 if (update_time) {
1579 int release;
1580 struct ubifs_inode *ui = ubifs_inode(inode);
1581
1582 mutex_lock(&ui->ui_mutex);
1583 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1584 release = ui->dirty;
1585 mark_inode_dirty_sync(inode);
1586 mutex_unlock(&ui->ui_mutex);
1587 if (release)
1588 ubifs_release_dirty_inode_budget(c, ui);
1589 }
1590
1591 folio_wait_stable(folio);
1592 return VM_FAULT_LOCKED;
1593
1594sigbus:
1595 folio_unlock(folio);
1596 ubifs_release_budget(c, &req);
1597 return VM_FAULT_SIGBUS;
1598}
1599
1600static const struct vm_operations_struct ubifs_file_vm_ops = {
1601 .fault = filemap_fault,
1602 .map_pages = filemap_map_pages,
1603 .page_mkwrite = ubifs_vm_page_mkwrite,
1604};
1605
1606static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1607{
1608 int err;
1609
1610 err = generic_file_mmap(file, vma);
1611 if (err)
1612 return err;
1613 vma->vm_ops = &ubifs_file_vm_ops;
1614
1615 if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1616 file_accessed(file);
1617
1618 return 0;
1619}
1620
1621static const char *ubifs_get_link(struct dentry *dentry,
1622 struct inode *inode,
1623 struct delayed_call *done)
1624{
1625 struct ubifs_inode *ui = ubifs_inode(inode);
1626
1627 if (!IS_ENCRYPTED(inode))
1628 return ui->data;
1629
1630 if (!dentry)
1631 return ERR_PTR(-ECHILD);
1632
1633 return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
1634}
1635
1636static int ubifs_symlink_getattr(struct mnt_idmap *idmap,
1637 const struct path *path, struct kstat *stat,
1638 u32 request_mask, unsigned int query_flags)
1639{
1640 ubifs_getattr(idmap, path, stat, request_mask, query_flags);
1641
1642 if (IS_ENCRYPTED(d_inode(path->dentry)))
1643 return fscrypt_symlink_getattr(path, stat);
1644 return 0;
1645}
1646
1647const struct address_space_operations ubifs_file_address_operations = {
1648 .read_folio = ubifs_read_folio,
1649 .writepages = ubifs_writepages,
1650 .write_begin = ubifs_write_begin,
1651 .write_end = ubifs_write_end,
1652 .invalidate_folio = ubifs_invalidate_folio,
1653 .dirty_folio = ubifs_dirty_folio,
1654 .migrate_folio = filemap_migrate_folio,
1655 .release_folio = ubifs_release_folio,
1656};
1657
1658const struct inode_operations ubifs_file_inode_operations = {
1659 .setattr = ubifs_setattr,
1660 .getattr = ubifs_getattr,
1661 .listxattr = ubifs_listxattr,
1662 .update_time = ubifs_update_time,
1663 .fileattr_get = ubifs_fileattr_get,
1664 .fileattr_set = ubifs_fileattr_set,
1665};
1666
1667const struct inode_operations ubifs_symlink_inode_operations = {
1668 .get_link = ubifs_get_link,
1669 .setattr = ubifs_setattr,
1670 .getattr = ubifs_symlink_getattr,
1671 .listxattr = ubifs_listxattr,
1672 .update_time = ubifs_update_time,
1673};
1674
1675const struct file_operations ubifs_file_operations = {
1676 .llseek = generic_file_llseek,
1677 .read_iter = generic_file_read_iter,
1678 .write_iter = ubifs_write_iter,
1679 .mmap = ubifs_file_mmap,
1680 .fsync = ubifs_fsync,
1681 .unlocked_ioctl = ubifs_ioctl,
1682 .splice_read = filemap_splice_read,
1683 .splice_write = iter_file_splice_write,
1684 .open = fscrypt_file_open,
1685#ifdef CONFIG_COMPAT
1686 .compat_ioctl = ubifs_compat_ioctl,
1687#endif
1688};