Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7#include "internal.h"
8#include <linux/sched/mm.h>
9#include <trace/events/erofs.h>
10
11void erofs_unmap_metabuf(struct erofs_buf *buf)
12{
13 if (buf->kmap_type == EROFS_KMAP)
14 kunmap_local(buf->base);
15 buf->base = NULL;
16 buf->kmap_type = EROFS_NO_KMAP;
17}
18
19void erofs_put_metabuf(struct erofs_buf *buf)
20{
21 if (!buf->page)
22 return;
23 erofs_unmap_metabuf(buf);
24 put_page(buf->page);
25 buf->page = NULL;
26}
27
28/*
29 * Derive the block size from inode->i_blkbits to make compatible with
30 * anonymous inode in fscache mode.
31 */
32void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
33 enum erofs_kmap_type type)
34{
35 struct inode *inode = buf->inode;
36 erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
37 pgoff_t index = offset >> PAGE_SHIFT;
38 struct page *page = buf->page;
39 struct folio *folio;
40 unsigned int nofs_flag;
41
42 if (!page || page->index != index) {
43 erofs_put_metabuf(buf);
44
45 nofs_flag = memalloc_nofs_save();
46 folio = read_cache_folio(inode->i_mapping, index, NULL, NULL);
47 memalloc_nofs_restore(nofs_flag);
48 if (IS_ERR(folio))
49 return folio;
50
51 /* should already be PageUptodate, no need to lock page */
52 page = folio_file_page(folio, index);
53 buf->page = page;
54 }
55 if (buf->kmap_type == EROFS_NO_KMAP) {
56 if (type == EROFS_KMAP)
57 buf->base = kmap_local_page(page);
58 buf->kmap_type = type;
59 } else if (buf->kmap_type != type) {
60 DBG_BUGON(1);
61 return ERR_PTR(-EFAULT);
62 }
63 if (type == EROFS_NO_KMAP)
64 return NULL;
65 return buf->base + (offset & ~PAGE_MASK);
66}
67
68void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
69{
70 if (erofs_is_fscache_mode(sb))
71 buf->inode = EROFS_SB(sb)->s_fscache->inode;
72 else
73 buf->inode = sb->s_bdev->bd_inode;
74}
75
76void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
77 erofs_blk_t blkaddr, enum erofs_kmap_type type)
78{
79 erofs_init_metabuf(buf, sb);
80 return erofs_bread(buf, blkaddr, type);
81}
82
83static int erofs_map_blocks_flatmode(struct inode *inode,
84 struct erofs_map_blocks *map)
85{
86 erofs_blk_t nblocks, lastblk;
87 u64 offset = map->m_la;
88 struct erofs_inode *vi = EROFS_I(inode);
89 struct super_block *sb = inode->i_sb;
90 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
91
92 nblocks = erofs_iblks(inode);
93 lastblk = nblocks - tailendpacking;
94
95 /* there is no hole in flatmode */
96 map->m_flags = EROFS_MAP_MAPPED;
97 if (offset < erofs_pos(sb, lastblk)) {
98 map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
99 map->m_plen = erofs_pos(sb, lastblk) - offset;
100 } else if (tailendpacking) {
101 map->m_pa = erofs_iloc(inode) + vi->inode_isize +
102 vi->xattr_isize + erofs_blkoff(sb, offset);
103 map->m_plen = inode->i_size - offset;
104
105 /* inline data should be located in the same meta block */
106 if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
107 erofs_err(sb, "inline data cross block boundary @ nid %llu",
108 vi->nid);
109 DBG_BUGON(1);
110 return -EFSCORRUPTED;
111 }
112 map->m_flags |= EROFS_MAP_META;
113 } else {
114 erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
115 vi->nid, inode->i_size, map->m_la);
116 DBG_BUGON(1);
117 return -EIO;
118 }
119 return 0;
120}
121
122int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
123{
124 struct super_block *sb = inode->i_sb;
125 struct erofs_inode *vi = EROFS_I(inode);
126 struct erofs_inode_chunk_index *idx;
127 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
128 u64 chunknr;
129 unsigned int unit;
130 erofs_off_t pos;
131 void *kaddr;
132 int err = 0;
133
134 trace_erofs_map_blocks_enter(inode, map, 0);
135 map->m_deviceid = 0;
136 if (map->m_la >= inode->i_size) {
137 /* leave out-of-bound access unmapped */
138 map->m_flags = 0;
139 map->m_plen = 0;
140 goto out;
141 }
142
143 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
144 err = erofs_map_blocks_flatmode(inode, map);
145 goto out;
146 }
147
148 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
149 unit = sizeof(*idx); /* chunk index */
150 else
151 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
152
153 chunknr = map->m_la >> vi->chunkbits;
154 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
155 vi->xattr_isize, unit) + unit * chunknr;
156
157 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
158 if (IS_ERR(kaddr)) {
159 err = PTR_ERR(kaddr);
160 goto out;
161 }
162 map->m_la = chunknr << vi->chunkbits;
163 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
164 round_up(inode->i_size - map->m_la, sb->s_blocksize));
165
166 /* handle block map */
167 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
168 __le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
169
170 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
171 map->m_flags = 0;
172 } else {
173 map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
174 map->m_flags = EROFS_MAP_MAPPED;
175 }
176 goto out_unlock;
177 }
178 /* parse chunk indexes */
179 idx = kaddr + erofs_blkoff(sb, pos);
180 switch (le32_to_cpu(idx->blkaddr)) {
181 case EROFS_NULL_ADDR:
182 map->m_flags = 0;
183 break;
184 default:
185 map->m_deviceid = le16_to_cpu(idx->device_id) &
186 EROFS_SB(sb)->device_id_mask;
187 map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
188 map->m_flags = EROFS_MAP_MAPPED;
189 break;
190 }
191out_unlock:
192 erofs_put_metabuf(&buf);
193out:
194 if (!err)
195 map->m_llen = map->m_plen;
196 trace_erofs_map_blocks_exit(inode, map, 0, err);
197 return err;
198}
199
200int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
201{
202 struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
203 struct erofs_device_info *dif;
204 int id;
205
206 map->m_bdev = sb->s_bdev;
207 map->m_daxdev = EROFS_SB(sb)->dax_dev;
208 map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
209 map->m_fscache = EROFS_SB(sb)->s_fscache;
210
211 if (map->m_deviceid) {
212 down_read(&devs->rwsem);
213 dif = idr_find(&devs->tree, map->m_deviceid - 1);
214 if (!dif) {
215 up_read(&devs->rwsem);
216 return -ENODEV;
217 }
218 if (devs->flatdev) {
219 map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
220 up_read(&devs->rwsem);
221 return 0;
222 }
223 map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL;
224 map->m_daxdev = dif->dax_dev;
225 map->m_dax_part_off = dif->dax_part_off;
226 map->m_fscache = dif->fscache;
227 up_read(&devs->rwsem);
228 } else if (devs->extra_devices && !devs->flatdev) {
229 down_read(&devs->rwsem);
230 idr_for_each_entry(&devs->tree, dif, id) {
231 erofs_off_t startoff, length;
232
233 if (!dif->mapped_blkaddr)
234 continue;
235 startoff = erofs_pos(sb, dif->mapped_blkaddr);
236 length = erofs_pos(sb, dif->blocks);
237
238 if (map->m_pa >= startoff &&
239 map->m_pa < startoff + length) {
240 map->m_pa -= startoff;
241 map->m_bdev = dif->bdev_handle ?
242 dif->bdev_handle->bdev : NULL;
243 map->m_daxdev = dif->dax_dev;
244 map->m_dax_part_off = dif->dax_part_off;
245 map->m_fscache = dif->fscache;
246 break;
247 }
248 }
249 up_read(&devs->rwsem);
250 }
251 return 0;
252}
253
254static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
255 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
256{
257 int ret;
258 struct super_block *sb = inode->i_sb;
259 struct erofs_map_blocks map;
260 struct erofs_map_dev mdev;
261
262 map.m_la = offset;
263 map.m_llen = length;
264
265 ret = erofs_map_blocks(inode, &map);
266 if (ret < 0)
267 return ret;
268
269 mdev = (struct erofs_map_dev) {
270 .m_deviceid = map.m_deviceid,
271 .m_pa = map.m_pa,
272 };
273 ret = erofs_map_dev(sb, &mdev);
274 if (ret)
275 return ret;
276
277 iomap->offset = map.m_la;
278 if (flags & IOMAP_DAX)
279 iomap->dax_dev = mdev.m_daxdev;
280 else
281 iomap->bdev = mdev.m_bdev;
282 iomap->length = map.m_llen;
283 iomap->flags = 0;
284 iomap->private = NULL;
285
286 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
287 iomap->type = IOMAP_HOLE;
288 iomap->addr = IOMAP_NULL_ADDR;
289 if (!iomap->length)
290 iomap->length = length;
291 return 0;
292 }
293
294 if (map.m_flags & EROFS_MAP_META) {
295 void *ptr;
296 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
297
298 iomap->type = IOMAP_INLINE;
299 ptr = erofs_read_metabuf(&buf, sb,
300 erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
301 if (IS_ERR(ptr))
302 return PTR_ERR(ptr);
303 iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
304 iomap->private = buf.base;
305 } else {
306 iomap->type = IOMAP_MAPPED;
307 iomap->addr = mdev.m_pa;
308 if (flags & IOMAP_DAX)
309 iomap->addr += mdev.m_dax_part_off;
310 }
311 return 0;
312}
313
314static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
315 ssize_t written, unsigned int flags, struct iomap *iomap)
316{
317 void *ptr = iomap->private;
318
319 if (ptr) {
320 struct erofs_buf buf = {
321 .page = kmap_to_page(ptr),
322 .base = ptr,
323 .kmap_type = EROFS_KMAP,
324 };
325
326 DBG_BUGON(iomap->type != IOMAP_INLINE);
327 erofs_put_metabuf(&buf);
328 } else {
329 DBG_BUGON(iomap->type == IOMAP_INLINE);
330 }
331 return written;
332}
333
334static const struct iomap_ops erofs_iomap_ops = {
335 .iomap_begin = erofs_iomap_begin,
336 .iomap_end = erofs_iomap_end,
337};
338
339int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
340 u64 start, u64 len)
341{
342 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
343#ifdef CONFIG_EROFS_FS_ZIP
344 return iomap_fiemap(inode, fieinfo, start, len,
345 &z_erofs_iomap_report_ops);
346#else
347 return -EOPNOTSUPP;
348#endif
349 }
350 return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
351}
352
353/*
354 * since we dont have write or truncate flows, so no inode
355 * locking needs to be held at the moment.
356 */
357static int erofs_read_folio(struct file *file, struct folio *folio)
358{
359 return iomap_read_folio(folio, &erofs_iomap_ops);
360}
361
362static void erofs_readahead(struct readahead_control *rac)
363{
364 return iomap_readahead(rac, &erofs_iomap_ops);
365}
366
367static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
368{
369 return iomap_bmap(mapping, block, &erofs_iomap_ops);
370}
371
372static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
373{
374 struct inode *inode = file_inode(iocb->ki_filp);
375
376 /* no need taking (shared) inode lock since it's a ro filesystem */
377 if (!iov_iter_count(to))
378 return 0;
379
380#ifdef CONFIG_FS_DAX
381 if (IS_DAX(inode))
382 return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
383#endif
384 if (iocb->ki_flags & IOCB_DIRECT) {
385 struct block_device *bdev = inode->i_sb->s_bdev;
386 unsigned int blksize_mask;
387
388 if (bdev)
389 blksize_mask = bdev_logical_block_size(bdev) - 1;
390 else
391 blksize_mask = i_blocksize(inode) - 1;
392
393 if ((iocb->ki_pos | iov_iter_count(to) |
394 iov_iter_alignment(to)) & blksize_mask)
395 return -EINVAL;
396
397 return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
398 NULL, 0, NULL, 0);
399 }
400 return filemap_read(iocb, to, 0);
401}
402
403/* for uncompressed (aligned) files and raw access for other files */
404const struct address_space_operations erofs_raw_access_aops = {
405 .read_folio = erofs_read_folio,
406 .readahead = erofs_readahead,
407 .bmap = erofs_bmap,
408 .direct_IO = noop_direct_IO,
409 .release_folio = iomap_release_folio,
410 .invalidate_folio = iomap_invalidate_folio,
411};
412
413#ifdef CONFIG_FS_DAX
414static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
415 unsigned int order)
416{
417 return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
418}
419
420static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
421{
422 return erofs_dax_huge_fault(vmf, 0);
423}
424
425static const struct vm_operations_struct erofs_dax_vm_ops = {
426 .fault = erofs_dax_fault,
427 .huge_fault = erofs_dax_huge_fault,
428};
429
430static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
431{
432 if (!IS_DAX(file_inode(file)))
433 return generic_file_readonly_mmap(file, vma);
434
435 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
436 return -EINVAL;
437
438 vma->vm_ops = &erofs_dax_vm_ops;
439 vm_flags_set(vma, VM_HUGEPAGE);
440 return 0;
441}
442#else
443#define erofs_file_mmap generic_file_readonly_mmap
444#endif
445
446const struct file_operations erofs_file_fops = {
447 .llseek = generic_file_llseek,
448 .read_iter = erofs_file_read_iter,
449 .mmap = erofs_file_mmap,
450 .get_unmapped_area = thp_get_unmapped_area,
451 .splice_read = filemap_splice_read,
452};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7#include "internal.h"
8#include <linux/prefetch.h>
9
10#include <trace/events/erofs.h>
11
12static void erofs_readendio(struct bio *bio)
13{
14 struct bio_vec *bvec;
15 blk_status_t err = bio->bi_status;
16 struct bvec_iter_all iter_all;
17
18 bio_for_each_segment_all(bvec, bio, iter_all) {
19 struct page *page = bvec->bv_page;
20
21 /* page is already locked */
22 DBG_BUGON(PageUptodate(page));
23
24 if (err)
25 SetPageError(page);
26 else
27 SetPageUptodate(page);
28
29 unlock_page(page);
30 /* page could be reclaimed now */
31 }
32 bio_put(bio);
33}
34
35struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
36{
37 struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
38 struct page *page;
39
40 page = read_cache_page_gfp(mapping, blkaddr,
41 mapping_gfp_constraint(mapping, ~__GFP_FS));
42 /* should already be PageUptodate */
43 if (!IS_ERR(page))
44 lock_page(page);
45 return page;
46}
47
48static int erofs_map_blocks_flatmode(struct inode *inode,
49 struct erofs_map_blocks *map,
50 int flags)
51{
52 int err = 0;
53 erofs_blk_t nblocks, lastblk;
54 u64 offset = map->m_la;
55 struct erofs_inode *vi = EROFS_I(inode);
56 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
57
58 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
59
60 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
61 lastblk = nblocks - tailendpacking;
62
63 if (offset >= inode->i_size) {
64 /* leave out-of-bound access unmapped */
65 map->m_flags = 0;
66 map->m_plen = 0;
67 goto out;
68 }
69
70 /* there is no hole in flatmode */
71 map->m_flags = EROFS_MAP_MAPPED;
72
73 if (offset < blknr_to_addr(lastblk)) {
74 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
75 map->m_plen = blknr_to_addr(lastblk) - offset;
76 } else if (tailendpacking) {
77 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
78 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
79
80 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
81 vi->xattr_isize + erofs_blkoff(map->m_la);
82 map->m_plen = inode->i_size - offset;
83
84 /* inline data should be located in one meta block */
85 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
86 erofs_err(inode->i_sb,
87 "inline data cross block boundary @ nid %llu",
88 vi->nid);
89 DBG_BUGON(1);
90 err = -EFSCORRUPTED;
91 goto err_out;
92 }
93
94 map->m_flags |= EROFS_MAP_META;
95 } else {
96 erofs_err(inode->i_sb,
97 "internal error @ nid: %llu (size %llu), m_la 0x%llx",
98 vi->nid, inode->i_size, map->m_la);
99 DBG_BUGON(1);
100 err = -EIO;
101 goto err_out;
102 }
103
104out:
105 map->m_llen = map->m_plen;
106
107err_out:
108 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
109 return err;
110}
111
112int erofs_map_blocks(struct inode *inode,
113 struct erofs_map_blocks *map, int flags)
114{
115 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
116 int err = z_erofs_map_blocks_iter(inode, map, flags);
117
118 if (map->mpage) {
119 put_page(map->mpage);
120 map->mpage = NULL;
121 }
122 return err;
123 }
124 return erofs_map_blocks_flatmode(inode, map, flags);
125}
126
127static inline struct bio *erofs_read_raw_page(struct bio *bio,
128 struct address_space *mapping,
129 struct page *page,
130 erofs_off_t *last_block,
131 unsigned int nblocks,
132 bool ra)
133{
134 struct inode *const inode = mapping->host;
135 struct super_block *const sb = inode->i_sb;
136 erofs_off_t current_block = (erofs_off_t)page->index;
137 int err;
138
139 DBG_BUGON(!nblocks);
140
141 if (PageUptodate(page)) {
142 err = 0;
143 goto has_updated;
144 }
145
146 /* note that for readpage case, bio also equals to NULL */
147 if (bio &&
148 /* not continuous */
149 *last_block + 1 != current_block) {
150submit_bio_retry:
151 submit_bio(bio);
152 bio = NULL;
153 }
154
155 if (!bio) {
156 struct erofs_map_blocks map = {
157 .m_la = blknr_to_addr(current_block),
158 };
159 erofs_blk_t blknr;
160 unsigned int blkoff;
161
162 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
163 if (err)
164 goto err_out;
165
166 /* zero out the holed page */
167 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
168 zero_user_segment(page, 0, PAGE_SIZE);
169 SetPageUptodate(page);
170
171 /* imply err = 0, see erofs_map_blocks */
172 goto has_updated;
173 }
174
175 /* for RAW access mode, m_plen must be equal to m_llen */
176 DBG_BUGON(map.m_plen != map.m_llen);
177
178 blknr = erofs_blknr(map.m_pa);
179 blkoff = erofs_blkoff(map.m_pa);
180
181 /* deal with inline page */
182 if (map.m_flags & EROFS_MAP_META) {
183 void *vsrc, *vto;
184 struct page *ipage;
185
186 DBG_BUGON(map.m_plen > PAGE_SIZE);
187
188 ipage = erofs_get_meta_page(inode->i_sb, blknr);
189
190 if (IS_ERR(ipage)) {
191 err = PTR_ERR(ipage);
192 goto err_out;
193 }
194
195 vsrc = kmap_atomic(ipage);
196 vto = kmap_atomic(page);
197 memcpy(vto, vsrc + blkoff, map.m_plen);
198 memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
199 kunmap_atomic(vto);
200 kunmap_atomic(vsrc);
201 flush_dcache_page(page);
202
203 SetPageUptodate(page);
204 /* TODO: could we unlock the page earlier? */
205 unlock_page(ipage);
206 put_page(ipage);
207
208 /* imply err = 0, see erofs_map_blocks */
209 goto has_updated;
210 }
211
212 /* pa must be block-aligned for raw reading */
213 DBG_BUGON(erofs_blkoff(map.m_pa));
214
215 /* max # of continuous pages */
216 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
217 nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
218 if (nblocks > BIO_MAX_PAGES)
219 nblocks = BIO_MAX_PAGES;
220
221 bio = bio_alloc(GFP_NOIO, nblocks);
222
223 bio->bi_end_io = erofs_readendio;
224 bio_set_dev(bio, sb->s_bdev);
225 bio->bi_iter.bi_sector = (sector_t)blknr <<
226 LOG_SECTORS_PER_BLOCK;
227 bio->bi_opf = REQ_OP_READ;
228 }
229
230 err = bio_add_page(bio, page, PAGE_SIZE, 0);
231 /* out of the extent or bio is full */
232 if (err < PAGE_SIZE)
233 goto submit_bio_retry;
234
235 *last_block = current_block;
236
237 /* shift in advance in case of it followed by too many gaps */
238 if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
239 /* err should reassign to 0 after submitting */
240 err = 0;
241 goto submit_bio_out;
242 }
243
244 return bio;
245
246err_out:
247 /* for sync reading, set page error immediately */
248 if (!ra) {
249 SetPageError(page);
250 ClearPageUptodate(page);
251 }
252has_updated:
253 unlock_page(page);
254
255 /* if updated manually, continuous pages has a gap */
256 if (bio)
257submit_bio_out:
258 submit_bio(bio);
259 return err ? ERR_PTR(err) : NULL;
260}
261
262/*
263 * since we dont have write or truncate flows, so no inode
264 * locking needs to be held at the moment.
265 */
266static int erofs_raw_access_readpage(struct file *file, struct page *page)
267{
268 erofs_off_t last_block;
269 struct bio *bio;
270
271 trace_erofs_readpage(page, true);
272
273 bio = erofs_read_raw_page(NULL, page->mapping,
274 page, &last_block, 1, false);
275
276 if (IS_ERR(bio))
277 return PTR_ERR(bio);
278
279 DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
280 return 0;
281}
282
283static int erofs_raw_access_readpages(struct file *filp,
284 struct address_space *mapping,
285 struct list_head *pages,
286 unsigned int nr_pages)
287{
288 erofs_off_t last_block;
289 struct bio *bio = NULL;
290 gfp_t gfp = readahead_gfp_mask(mapping);
291 struct page *page = list_last_entry(pages, struct page, lru);
292
293 trace_erofs_readpages(mapping->host, page, nr_pages, true);
294
295 for (; nr_pages; --nr_pages) {
296 page = list_entry(pages->prev, struct page, lru);
297
298 prefetchw(&page->flags);
299 list_del(&page->lru);
300
301 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
302 bio = erofs_read_raw_page(bio, mapping, page,
303 &last_block, nr_pages, true);
304
305 /* all the page errors are ignored when readahead */
306 if (IS_ERR(bio)) {
307 pr_err("%s, readahead error at page %lu of nid %llu\n",
308 __func__, page->index,
309 EROFS_I(mapping->host)->nid);
310
311 bio = NULL;
312 }
313 }
314
315 /* pages could still be locked */
316 put_page(page);
317 }
318 DBG_BUGON(!list_empty(pages));
319
320 /* the rare case (end in gaps) */
321 if (bio)
322 submit_bio(bio);
323 return 0;
324}
325
326static int erofs_get_block(struct inode *inode, sector_t iblock,
327 struct buffer_head *bh, int create)
328{
329 struct erofs_map_blocks map = {
330 .m_la = iblock << 9,
331 };
332 int err;
333
334 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
335 if (err)
336 return err;
337
338 if (map.m_flags & EROFS_MAP_MAPPED)
339 bh->b_blocknr = erofs_blknr(map.m_pa);
340
341 return err;
342}
343
344static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
345{
346 struct inode *inode = mapping->host;
347
348 if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
349 erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
350
351 if (block >> LOG_SECTORS_PER_BLOCK >= blks)
352 return 0;
353 }
354
355 return generic_block_bmap(mapping, block, erofs_get_block);
356}
357
358/* for uncompressed (aligned) files and raw access for other files */
359const struct address_space_operations erofs_raw_access_aops = {
360 .readpage = erofs_raw_access_readpage,
361 .readpages = erofs_raw_access_readpages,
362 .bmap = erofs_bmap,
363};
364